summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUjjwal Sharma <usharma1998@gmail.com>2019-03-15 18:35:06 +0530
committerRefael Ackermann <refack@gmail.com>2019-03-28 16:36:18 -0400
commitf579e1194046c50f2e6bb54348d48c8e7d1a53cf (patch)
tree9125787c758358365f74f9fd9673c14f57e67870
parent2c73868b0471fbd4038f500d076df056cbf697fe (diff)
downloadandroid-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.tar.gz
android-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.tar.bz2
android-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.zip
deps: update V8 to 7.4.288.13
PR-URL: https://github.com/nodejs/node/pull/26685 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Michaƫl Zasso <targos@protonmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com>
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/BUILD.gn397
-rw-r--r--deps/v8/ChangeLog1455
-rw-r--r--deps/v8/DEPS35
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h2
-rw-r--r--deps/v8/gni/v8.gni48
-rw-r--r--deps/v8/include/libplatform/libplatform.h9
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h14
-rw-r--r--deps/v8/include/v8-internal.h41
-rw-r--r--deps/v8/include/v8-platform.h18
-rw-r--r--deps/v8/include/v8-profiler.h111
-rw-r--r--deps/v8/include/v8-util.h18
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h713
-rw-r--r--deps/v8/infra/OWNERS1
-rw-r--r--deps/v8/infra/config/OWNERS1
-rw-r--r--deps/v8/infra/config/PRESUBMIT.py29
-rw-r--r--deps/v8/infra/config/cq.cfg158
-rw-r--r--deps/v8/infra/mb/mb_config.pyl51
-rw-r--r--deps/v8/infra/testing/OWNERS3
-rw-r--r--deps/v8/infra/testing/PRESUBMIT.py4
-rw-r--r--deps/v8/infra/testing/builders.pyl72
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc4
-rw-r--r--deps/v8/src/allocation.cc2
-rw-r--r--deps/v8/src/api-arguments-inl.h11
-rw-r--r--deps/v8/src/api-arguments.cc14
-rw-r--r--deps/v8/src/api-arguments.h2
-rw-r--r--deps/v8/src/api-inl.h4
-rw-r--r--deps/v8/src/api-natives.cc5
-rw-r--r--deps/v8/src/api.cc565
-rw-r--r--deps/v8/src/api.h47
-rw-r--r--deps/v8/src/arguments-inl.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h26
-rw-r--r--deps/v8/src/arm/assembler-arm.cc34
-rw-r--r--deps/v8/src/arm/assembler-arm.h20
-rw-r--r--deps/v8/src/arm/constants-arm.h16
-rw-r--r--deps/v8/src/arm/disasm-arm.cc1
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc15
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc81
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h41
-rw-r--r--deps/v8/src/arm/register-arm.h12
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h26
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc43
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h29
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc8
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc11
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h8
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc15
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc227
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h39
-rw-r--r--deps/v8/src/arm64/register-arm64.h11
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc9
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h4
-rw-r--r--deps/v8/src/asan.h10
-rw-r--r--deps/v8/src/asmjs/asm-js.cc1
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc2
-rw-r--r--deps/v8/src/asmjs/asm-parser.h1
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc2
-rw-r--r--deps/v8/src/assembler.cc15
-rw-r--r--deps/v8/src/assembler.h9
-rw-r--r--deps/v8/src/assert-scope.cc2
-rw-r--r--deps/v8/src/assert-scope.h33
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h4
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc1
-rw-r--r--deps/v8/src/ast/ast-value-factory.h3
-rw-r--r--deps/v8/src/ast/ast.cc28
-rw-r--r--deps/v8/src/ast/ast.h137
-rw-r--r--deps/v8/src/ast/modules.cc47
-rw-r--r--deps/v8/src/ast/modules.h7
-rw-r--r--deps/v8/src/ast/prettyprinter.cc9
-rw-r--r--deps/v8/src/ast/prettyprinter.h1
-rw-r--r--deps/v8/src/ast/scopes.cc811
-rw-r--r--deps/v8/src/ast/scopes.h165
-rw-r--r--deps/v8/src/ast/variables.h8
-rw-r--r--deps/v8/src/bailout-reason.h1
-rw-r--r--deps/v8/src/base/division-by-constant.cc22
-rw-r--r--deps/v8/src/base/division-by-constant.h33
-rw-r--r--deps/v8/src/base/ieee754.cc335
-rw-r--r--deps/v8/src/base/ieee754.h8
-rw-r--r--deps/v8/src/base/macros.h2
-rw-r--r--deps/v8/src/base/platform/mutex.cc61
-rw-r--r--deps/v8/src/base/platform/mutex.h130
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc23
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc9
-rw-r--r--deps/v8/src/base/platform/time.cc4
-rw-r--r--deps/v8/src/base/small-vector.h16
-rw-r--r--deps/v8/src/base/timezone-cache.h16
-rw-r--r--deps/v8/src/basic-block-profiler.cc2
-rw-r--r--deps/v8/src/bootstrapper.cc336
-rw-r--r--deps/v8/src/bootstrapper.h11
-rw-r--r--deps/v8/src/builtins/arguments.tq2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc356
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc858
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq2
-rw-r--r--deps/v8/src/builtins/array-every.tq151
-rw-r--r--deps/v8/src/builtins/array-filter.tq150
-rw-r--r--deps/v8/src/builtins/array-find.tq158
-rw-r--r--deps/v8/src/builtins/array-findindex.tq161
-rw-r--r--deps/v8/src/builtins/array-foreach.tq72
-rw-r--r--deps/v8/src/builtins/array-join.tq158
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq4
-rw-r--r--deps/v8/src/builtins/array-map.tq281
-rw-r--r--deps/v8/src/builtins/array-of.tq6
-rw-r--r--deps/v8/src/builtins/array-reduce-right.tq183
-rw-r--r--deps/v8/src/builtins/array-reduce.tq182
-rw-r--r--deps/v8/src/builtins/array-reverse.tq63
-rw-r--r--deps/v8/src/builtins/array-slice.tq14
-rw-r--r--deps/v8/src/builtins/array-some.tq150
-rw-r--r--deps/v8/src/builtins/array-splice.tq10
-rw-r--r--deps/v8/src/builtins/array-unshift.tq6
-rw-r--r--deps/v8/src/builtins/array.tq35
-rw-r--r--deps/v8/src/builtins/base.tq929
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1150
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h36
-rw-r--r--deps/v8/src/builtins/builtins-array.cc44
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc1
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc32
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc1
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc253
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h15
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc1
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc94
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-date.cc69
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h363
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h3
-rw-r--r--deps/v8/src/builtins/builtins-error.cc1
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc52
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc36
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc103
-rw-r--r--deps/v8/src/builtins/builtins-number.cc2
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc32
-rw-r--r--deps/v8/src/builtins/builtins-object.cc7
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc125
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc1
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc4
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc226
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc2
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-string.cc112
-rw-r--r--deps/v8/src/builtins/builtins-symbol-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc1
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc1
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc1049
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h51
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc108
-rw-r--r--deps/v8/src/builtins/collections.tq2
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc6
-rw-r--r--deps/v8/src/builtins/data-view.tq29
-rw-r--r--deps/v8/src/builtins/frames.tq29
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc9
-rw-r--r--deps/v8/src/builtins/growable-fixed-array.tq45
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc512
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc113
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc113
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq10
-rw-r--r--deps/v8/src/builtins/object.tq12
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc386
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc380
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc39
-rw-r--r--deps/v8/src/builtins/string-endswith.tq85
-rw-r--r--deps/v8/src/builtins/string-startswith.tq71
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq371
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq79
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq50
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq59
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq60
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq107
-rw-r--r--deps/v8/src/builtins/typed-array-subarray.tq63
-rw-r--r--deps/v8/src/builtins/typed-array.tq457
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc964
-rw-r--r--deps/v8/src/code-comments.cc4
-rw-r--r--deps/v8/src/code-desc.cc73
-rw-r--r--deps/v8/src/code-desc.h83
-rw-r--r--deps/v8/src/code-factory.cc1
-rw-r--r--deps/v8/src/code-reference.cc23
-rw-r--r--deps/v8/src/code-reference.h4
-rw-r--r--deps/v8/src/code-stub-assembler.cc858
-rw-r--r--deps/v8/src/code-stub-assembler.h272
-rw-r--r--deps/v8/src/code-tracer.h1
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc1
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc13
-rw-r--r--deps/v8/src/compiler.cc100
-rw-r--r--deps/v8/src/compiler.h6
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc17
-rw-r--r--deps/v8/src/compiler/access-builder.h7
-rw-r--r--deps/v8/src/compiler/access-info.cc436
-rw-r--r--deps/v8/src/compiler/access-info.h62
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc35
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc80
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h4
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc7
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc24
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h4
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc64
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h8
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc53
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc7
-rw-r--r--deps/v8/src/compiler/backend/instruction.h24
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc23
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc23
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc5
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc58
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc19
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc892
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h210
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc56
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h1
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc31
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc89
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h1
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc1
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc35
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc21
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h4
-rw-r--r--deps/v8/src/compiler/code-assembler.cc48
-rw-r--r--deps/v8/src/compiler/code-assembler.h54
-rw-r--r--deps/v8/src/compiler/common-operator.cc2
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc132
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h32
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc64
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h3
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc15
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc2
-rw-r--r--deps/v8/src/compiler/graph-assembler.h3
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc1
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc587
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc118
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h48
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc7
-rw-r--r--deps/v8/src/compiler/js-inlining.cc19
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1129
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h20
-rw-r--r--deps/v8/src/compiler/js-operator.cc16
-rw-r--r--deps/v8/src/compiler/js-operator.h2
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc78
-rw-r--r--deps/v8/src/compiler/linkage.cc11
-rw-r--r--deps/v8/src/compiler/linkage.h6
-rw-r--r--deps/v8/src/compiler/load-elimination.cc67
-rw-r--r--deps/v8/src/compiler/load-elimination.h9
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/machine-operator.cc2
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc17
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h1
-rw-r--r--deps/v8/src/compiler/node-cache.cc10
-rw-r--r--deps/v8/src/compiler/node-cache.h14
-rw-r--r--deps/v8/src/compiler/node-matchers.h2
-rw-r--r--deps/v8/src/compiler/node-properties.cc17
-rw-r--r--deps/v8/src/compiler/node-properties.h1
-rw-r--r--deps/v8/src/compiler/node.cc29
-rw-r--r--deps/v8/src/compiler/node.h57
-rw-r--r--deps/v8/src/compiler/opcodes.cc2
-rw-r--r--deps/v8/src/compiler/opcodes.h3
-rw-r--r--deps/v8/src/compiler/pipeline.cc53
-rw-r--r--deps/v8/src/compiler/pipeline.h6
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc6
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h46
-rw-r--r--deps/v8/src/compiler/representation-change.cc30
-rw-r--r--deps/v8/src/compiler/representation-change.h5
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc623
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h173
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc68
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc182
-rw-r--r--deps/v8/src/compiler/simplified-operator.h63
-rw-r--r--deps/v8/src/compiler/type-cache.cc2
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc1
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc5
-rw-r--r--deps/v8/src/compiler/typer.cc7
-rw-r--r--deps/v8/src/compiler/typer.h2
-rw-r--r--deps/v8/src/compiler/types.cc14
-rw-r--r--deps/v8/src/compiler/verifier.cc18
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc214
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h22
-rw-r--r--deps/v8/src/constant-pool.h8
-rw-r--r--deps/v8/src/contexts-inl.h22
-rw-r--r--deps/v8/src/contexts.cc35
-rw-r--r--deps/v8/src/contexts.h24
-rw-r--r--deps/v8/src/conversions-inl.h8
-rw-r--r--deps/v8/src/conversions.h5
-rw-r--r--deps/v8/src/counters.h18
-rw-r--r--deps/v8/src/cpu-features.h2
-rw-r--r--deps/v8/src/d8.cc69
-rw-r--r--deps/v8/src/d8.h5
-rw-r--r--deps/v8/src/date.cc21
-rw-r--r--deps/v8/src/date.h5
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc3
-rw-r--r--deps/v8/src/debug/debug-coverage.cc57
-rw-r--r--deps/v8/src/debug/debug-coverage.h4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc11
-rw-r--r--deps/v8/src/debug/debug-frames.cc2
-rw-r--r--deps/v8/src/debug/debug-interface.h34
-rw-r--r--deps/v8/src/debug/debug-scopes.cc80
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc7
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc6
-rw-r--r--deps/v8/src/debug/debug-type-profile.h2
-rw-r--r--deps/v8/src/debug/debug.cc27
-rw-r--r--deps/v8/src/debug/debug.h15
-rw-r--r--deps/v8/src/debug/interface-types.h24
-rw-r--r--deps/v8/src/debug/liveedit.cc5
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc10
-rw-r--r--deps/v8/src/deoptimizer.cc8
-rw-r--r--deps/v8/src/deoptimizer.h9
-rw-r--r--deps/v8/src/disasm.h2
-rw-r--r--deps/v8/src/disassembler.cc8
-rw-r--r--deps/v8/src/eh-frame.cc2
-rw-r--r--deps/v8/src/eh-frame.h2
-rw-r--r--deps/v8/src/elements.cc26
-rw-r--r--deps/v8/src/execution.cc43
-rw-r--r--deps/v8/src/external-reference.cc43
-rw-r--r--deps/v8/src/external-reference.h37
-rw-r--r--deps/v8/src/feedback-vector-inl.h13
-rw-r--r--deps/v8/src/feedback-vector.cc52
-rw-r--r--deps/v8/src/feedback-vector.h31
-rw-r--r--deps/v8/src/field-index-inl.h16
-rw-r--r--deps/v8/src/field-index.h8
-rw-r--r--deps/v8/src/flag-definitions.h75
-rw-r--r--deps/v8/src/flags.cc6
-rw-r--r--deps/v8/src/flush-instruction-cache.cc27
-rw-r--r--deps/v8/src/flush-instruction-cache.h23
-rw-r--r--deps/v8/src/frames.cc110
-rw-r--r--deps/v8/src/frames.h21
-rw-r--r--deps/v8/src/function-kind.h190
-rw-r--r--deps/v8/src/gdb-jit.cc22
-rw-r--r--deps/v8/src/global-handles.cc666
-rw-r--r--deps/v8/src/global-handles.h107
-rw-r--r--deps/v8/src/globals.h232
-rw-r--r--deps/v8/src/handler-table.cc9
-rw-r--r--deps/v8/src/handler-table.h2
-rw-r--r--deps/v8/src/handles.cc7
-rw-r--r--deps/v8/src/hash-seed-inl.h50
-rw-r--r--deps/v8/src/heap-symbols.h13
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h7
-rw-r--r--deps/v8/src/heap/code-stats.cc1
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc55
-rw-r--r--deps/v8/src/heap/embedder-tracing.h4
-rw-r--r--deps/v8/src/heap/factory-inl.h3
-rw-r--r--deps/v8/src/heap/factory.cc324
-rw-r--r--deps/v8/src/heap/factory.h26
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc52
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h61
-rw-r--r--deps/v8/src/heap/gc-tracer.cc27
-rw-r--r--deps/v8/src/heap/gc-tracer.h8
-rw-r--r--deps/v8/src/heap/heap-controller.cc6
-rw-r--r--deps/v8/src/heap/heap-inl.h153
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h111
-rw-r--r--deps/v8/src/heap/heap.cc626
-rw-r--r--deps/v8/src/heap/heap.h195
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc59
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h24
-rw-r--r--deps/v8/src/heap/incremental-marking.cc337
-rw-r--r--deps/v8/src/heap/incremental-marking.h65
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc31
-rw-r--r--deps/v8/src/heap/item-parallel-job.h24
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h51
-rw-r--r--deps/v8/src/heap/mark-compact.cc244
-rw-r--r--deps/v8/src/heap/mark-compact.h82
-rw-r--r--deps/v8/src/heap/marking.cc96
-rw-r--r--deps/v8/src/heap/marking.h179
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc1
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h31
-rw-r--r--deps/v8/src/heap/objects-visiting.h42
-rw-r--r--deps/v8/src/heap/read-only-heap.cc28
-rw-r--r--deps/v8/src/heap/read-only-heap.h48
-rw-r--r--deps/v8/src/heap/remembered-set.h8
-rw-r--r--deps/v8/src/heap/scavenger-inl.h133
-rw-r--r--deps/v8/src/heap/scavenger.cc54
-rw-r--r--deps/v8/src/heap/scavenger.h22
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc61
-rw-r--r--deps/v8/src/heap/spaces-inl.h31
-rw-r--r--deps/v8/src/heap/spaces.cc252
-rw-r--r--deps/v8/src/heap/spaces.h174
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h53
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc46
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h64
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc34
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc1
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h30
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc15
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc117
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h20
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc893
-rw-r--r--deps/v8/src/ic/accessor-assembler.h46
-rw-r--r--deps/v8/src/ic/handler-configuration.h4
-rw-r--r--deps/v8/src/ic/ic.cc491
-rw-r--r--deps/v8/src/ic/ic.h44
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc94
-rw-r--r--deps/v8/src/ic/stub-cache.cc9
-rw-r--r--deps/v8/src/ic/stub-cache.h4
-rw-r--r--deps/v8/src/identity-map.h2
-rw-r--r--deps/v8/src/inspector/BUILD.gn32
-rw-r--r--deps/v8/src/inspector/DEPS2
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/custom-preview.cc2
-rw-r--r--deps/v8/src/inspector/injected-script.cc2
-rw-r--r--deps/v8/src/inspector/inspected-context.cc1
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json2
-rw-r--r--deps/v8/src/inspector/js_protocol.json3144
-rw-r--r--deps/v8/src/inspector/string-16.cc51
-rw-r--r--deps/v8/src/inspector/string-util.cc22
-rw-r--r--deps/v8/src/inspector/string-util.h37
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc2
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc55
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc16
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/value-mirror.cc73
-rw-r--r--deps/v8/src/interface-descriptors.h29
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc109
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h26
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc181
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h15
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc88
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc9
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h83
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h2
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc4
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc30
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc123
-rw-r--r--deps/v8/src/isolate-data.h10
-rw-r--r--deps/v8/src/isolate-inl.h78
-rw-r--r--deps/v8/src/isolate.cc596
-rw-r--r--deps/v8/src/isolate.h318
-rw-r--r--deps/v8/src/json-parser.cc7
-rw-r--r--deps/v8/src/json-parser.h4
-rw-r--r--deps/v8/src/json-stringifier.cc130
-rw-r--r--deps/v8/src/keys.cc133
-rw-r--r--deps/v8/src/keys.h4
-rw-r--r--deps/v8/src/label.h2
-rw-r--r--deps/v8/src/layout-descriptor-inl.h2
-rw-r--r--deps/v8/src/layout-descriptor.h2
-rw-r--r--deps/v8/src/libplatform/default-platform.cc10
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc69
-rw-r--r--deps/v8/src/libsampler/OWNERS1
-rw-r--r--deps/v8/src/libsampler/sampler.cc2
-rw-r--r--deps/v8/src/libsampler/sampler.h11
-rw-r--r--deps/v8/src/log.cc21
-rw-r--r--deps/v8/src/lookup.cc7
-rw-r--r--deps/v8/src/map-updater.cc161
-rw-r--r--deps/v8/src/map-updater.h46
-rw-r--r--deps/v8/src/memcopy.h65
-rw-r--r--deps/v8/src/message-template.h22
-rw-r--r--deps/v8/src/messages.cc35
-rw-r--r--deps/v8/src/messages.h16
-rw-r--r--deps/v8/src/microtask-queue.cc73
-rw-r--r--deps/v8/src/microtask-queue.h40
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h30
-rw-r--r--deps/v8/src/mips/assembler-mips.cc44
-rw-r--r--deps/v8/src/mips/assembler-mips.h22
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc8
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc14
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h71
-rw-r--r--deps/v8/src/mips/simulator-mips.cc3
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h22
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc44
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h22
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc8
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc14
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h89
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc3
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h59
-rw-r--r--deps/v8/src/objects-debug.cc134
-rw-r--r--deps/v8/src/objects-definitions.h34
-rw-r--r--deps/v8/src/objects-inl.h379
-rw-r--r--deps/v8/src/objects-printer.cc177
-rw-r--r--deps/v8/src/objects.cc11749
-rw-r--r--deps/v8/src/objects.h71
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h60
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h5
-rw-r--r--deps/v8/src/objects/api-callbacks.h27
-rw-r--r--deps/v8/src/objects/arguments-inl.h3
-rw-r--r--deps/v8/src/objects/arguments.h21
-rw-r--r--deps/v8/src/objects/bigint.cc82
-rw-r--r--deps/v8/src/objects/bigint.h10
-rw-r--r--deps/v8/src/objects/cell-inl.h3
-rw-r--r--deps/v8/src/objects/cell.h11
-rw-r--r--deps/v8/src/objects/code-inl.h154
-rw-r--r--deps/v8/src/objects/code.cc1080
-rw-r--r--deps/v8/src/objects/code.h119
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h5
-rw-r--r--deps/v8/src/objects/compilation-cache.h3
-rw-r--r--deps/v8/src/objects/data-handler.h23
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h4
-rw-r--r--deps/v8/src/objects/debug-objects.h2
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h8
-rw-r--r--deps/v8/src/objects/descriptor-array.h2
-rw-r--r--deps/v8/src/objects/dictionary-inl.h13
-rw-r--r--deps/v8/src/objects/dictionary.h19
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h6
-rw-r--r--deps/v8/src/objects/embedder-data-array.cc2
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h6
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h4
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h46
-rw-r--r--deps/v8/src/objects/fixed-array.h47
-rw-r--r--deps/v8/src/objects/foreign-inl.h5
-rw-r--r--deps/v8/src/objects/foreign.h20
-rw-r--r--deps/v8/src/objects/frame-array-inl.h5
-rw-r--r--deps/v8/src/objects/frame-array.h11
-rw-r--r--deps/v8/src/objects/free-space-inl.h21
-rw-r--r--deps/v8/src/objects/hash-table-inl.h35
-rw-r--r--deps/v8/src/objects/hash-table.h30
-rw-r--r--deps/v8/src/objects/heap-number-inl.h15
-rw-r--r--deps/v8/src/objects/heap-object-inl.h7
-rw-r--r--deps/v8/src/objects/heap-object.h9
-rw-r--r--deps/v8/src/objects/instance-type-inl.h6
-rw-r--r--deps/v8/src/objects/instance-type.h59
-rw-r--r--deps/v8/src/objects/intl-objects.cc74
-rw-r--r--deps/v8/src/objects/intl-objects.h31
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h28
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc10
-rw-r--r--deps/v8/src/objects/js-array-buffer.h10
-rw-r--r--deps/v8/src/objects/js-array-inl.h4
-rw-r--r--deps/v8/src/objects/js-array.h8
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h4
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc9
-rw-r--r--deps/v8/src/objects/js-break-iterator.h4
-rw-r--r--deps/v8/src/objects/js-collator-inl.h4
-rw-r--r--deps/v8/src/objects/js-collator.cc13
-rw-r--r--deps/v8/src/objects/js-collator.h4
-rw-r--r--deps/v8/src/objects/js-collection-inl.h6
-rw-r--r--deps/v8/src/objects/js-collection.h11
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h28
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc689
-rw-r--r--deps/v8/src/objects/js-date-time-format.h28
-rw-r--r--deps/v8/src/objects/js-generator.h12
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format.cc12
-rw-r--r--deps/v8/src/objects/js-list-format.h2
-rw-r--r--deps/v8/src/objects/js-locale-inl.h4
-rw-r--r--deps/v8/src/objects/js-locale.cc238
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-number-format.cc129
-rw-r--r--deps/v8/src/objects/js-number-format.h13
-rw-r--r--deps/v8/src/objects/js-objects-inl.h102
-rw-r--r--deps/v8/src/objects/js-objects.cc5804
-rw-r--r--deps/v8/src/objects/js-objects.h120
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h2
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc9
-rw-r--r--deps/v8/src/objects/js-plural-rules.h2
-rw-r--r--deps/v8/src/objects/js-promise.h12
-rw-r--r--deps/v8/src/objects/js-proxy.h29
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h14
-rw-r--r--deps/v8/src/objects/js-regexp.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc15
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc9
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.cc9
-rw-r--r--deps/v8/src/objects/js-segmenter.h2
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h294
-rw-r--r--deps/v8/src/objects/js-weak-refs.h172
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h22
-rw-r--r--deps/v8/src/objects/literal-objects.cc6
-rw-r--r--deps/v8/src/objects/literal-objects.h16
-rw-r--r--deps/v8/src/objects/map-inl.h99
-rw-r--r--deps/v8/src/objects/map.cc2700
-rw-r--r--deps/v8/src/objects/map.h115
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h3
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/microtask.h22
-rw-r--r--deps/v8/src/objects/module.cc1
-rw-r--r--deps/v8/src/objects/module.h16
-rw-r--r--deps/v8/src/objects/name-inl.h18
-rw-r--r--deps/v8/src/objects/object-macros.h54
-rw-r--r--deps/v8/src/objects/oddball-inl.h22
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h1
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc14
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h26
-rw-r--r--deps/v8/src/objects/promise-inl.h6
-rw-r--r--deps/v8/src/objects/promise.h33
-rw-r--r--deps/v8/src/objects/property-array-inl.h1
-rw-r--r--deps/v8/src/objects/property-cell-inl.h4
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h5
-rw-r--r--deps/v8/src/objects/scope-info.cc349
-rw-r--r--deps/v8/src/objects/scope-info.h7
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h41
-rw-r--r--deps/v8/src/objects/shared-function-info.h133
-rw-r--r--deps/v8/src/objects/slots-atomic-inl.h4
-rw-r--r--deps/v8/src/objects/slots-inl.h27
-rw-r--r--deps/v8/src/objects/slots.h8
-rw-r--r--deps/v8/src/objects/smi.h9
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h28
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc83
-rw-r--r--deps/v8/src/objects/stack-frame-info.h69
-rw-r--r--deps/v8/src/objects/string-comparator.cc74
-rw-r--r--deps/v8/src/objects/string-comparator.h109
-rw-r--r--deps/v8/src/objects/string-inl.h44
-rw-r--r--deps/v8/src/objects/string-table-inl.h4
-rw-r--r--deps/v8/src/objects/string-table.h8
-rw-r--r--deps/v8/src/objects/string.cc1526
-rw-r--r--deps/v8/src/objects/string.h7
-rw-r--r--deps/v8/src/objects/struct-inl.h13
-rw-r--r--deps/v8/src/objects/struct.h41
-rw-r--r--deps/v8/src/objects/template-objects-inl.h37
-rw-r--r--deps/v8/src/objects/template-objects.cc52
-rw-r--r--deps/v8/src/objects/template-objects.h38
-rw-r--r--deps/v8/src/objects/templates-inl.h3
-rw-r--r--deps/v8/src/objects/templates.h16
-rw-r--r--deps/v8/src/optimized-compilation-info.cc10
-rw-r--r--deps/v8/src/optimized-compilation-info.h10
-rw-r--r--deps/v8/src/ostreams.cc43
-rw-r--r--deps/v8/src/ostreams.h26
-rw-r--r--deps/v8/src/parsing/expression-scope.h92
-rw-r--r--deps/v8/src/parsing/keywords-gen.h64
-rw-r--r--deps/v8/src/parsing/keywords.txt2
-rw-r--r--deps/v8/src/parsing/parse-info.cc10
-rw-r--r--deps/v8/src/parsing/parse-info.h64
-rw-r--r--deps/v8/src/parsing/parser-base.h545
-rw-r--r--deps/v8/src/parsing/parser.cc287
-rw-r--r--deps/v8/src/parsing/parser.h79
-rw-r--r--deps/v8/src/parsing/parsing.cc1
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc218
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h14
-rw-r--r--deps/v8/src/parsing/preparse-data.cc183
-rw-r--r--deps/v8/src/parsing/preparse-data.h54
-rw-r--r--deps/v8/src/parsing/preparser.cc48
-rw-r--r--deps/v8/src/parsing/preparser.h126
-rw-r--r--deps/v8/src/parsing/rewriter.cc1
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc89
-rw-r--r--deps/v8/src/parsing/scanner-inl.h35
-rw-r--r--deps/v8/src/parsing/scanner.cc126
-rw-r--r--deps/v8/src/parsing/scanner.h31
-rw-r--r--deps/v8/src/parsing/token.cc3
-rw-r--r--deps/v8/src/parsing/token.h2
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc9
-rw-r--r--deps/v8/src/perf-jit.cc4
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h23
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc33
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h19
-rw-r--r--deps/v8/src/ppc/constants-ppc.h6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc15
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc75
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h46
-rw-r--r--deps/v8/src/ppc/register-ppc.h4
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc4
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h2
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc27
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h4
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc32
-rw-r--r--deps/v8/src/profiler/heap-profiler.h13
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc277
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h43
-rw-r--r--deps/v8/src/profiler/profile-generator.cc26
-rw-r--r--deps/v8/src/profiler/profile-generator.h14
-rw-r--r--deps/v8/src/profiler/profiler-listener.h2
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc6
-rw-r--r--deps/v8/src/profiler/tick-sample.cc13
-rw-r--r--deps/v8/src/property-descriptor.cc1
-rw-r--r--deps/v8/src/property-details.h58
-rw-r--r--deps/v8/src/prototype-inl.h1
-rw-r--r--deps/v8/src/ptr-compr-inl.h77
-rw-r--r--deps/v8/src/ptr-compr.h2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc7
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h5
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc51
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h20
-rw-r--r--deps/v8/src/regexp/bytecodes-irregexp.h4
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc54
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h35
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc4
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.h4
-rw-r--r--deps/v8/src/regexp/jsregexp.cc258
-rw-r--r--deps/v8/src/regexp/jsregexp.h11
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc8
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h4
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc9
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h4
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc8
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h3
-rw-r--r--deps/v8/src/regexp/regexp-ast.h6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc10
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h19
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc34
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h22
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc9
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc6
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc6
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h2
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc286
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h48
-rw-r--r--deps/v8/src/register-configuration.cc4
-rw-r--r--deps/v8/src/reloc-info.h27
-rw-r--r--deps/v8/src/roots-inl.h42
-rw-r--r--deps/v8/src/roots.cc20
-rw-r--r--deps/v8/src/roots.h23
-rw-r--r--deps/v8/src/runtime/runtime-array.cc14
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc70
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc1
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc9
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc1
-rw-r--r--deps/v8/src/runtime/runtime-function.cc1
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc41
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc1
-rw-r--r--deps/v8/src/runtime/runtime-object.cc69
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc1
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc13
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc8
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc56
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc11
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc304
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc1
-rw-r--r--deps/v8/src/runtime/runtime-test.cc85
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc91
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc8
-rw-r--r--deps/v8/src/runtime/runtime.cc2
-rw-r--r--deps/v8/src/runtime/runtime.h48
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h24
-rw-r--r--deps/v8/src/s390/assembler-s390.cc33
-rw-r--r--deps/v8/src/s390/assembler-s390.h64
-rw-r--r--deps/v8/src/s390/constants-s390.h41
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc15
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc72
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h14
-rw-r--r--deps/v8/src/s390/register-s390.h4
-rw-r--r--deps/v8/src/s390/simulator-s390.cc59
-rw-r--r--deps/v8/src/safepoint-table.h3
-rw-r--r--deps/v8/src/snapshot/OWNERS1
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc59
-rw-r--r--deps/v8/src/snapshot/code-serializer.h31
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc26
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h9
-rw-r--r--deps/v8/src/snapshot/deserializer.cc609
-rw-r--r--deps/v8/src/snapshot/deserializer.h44
-rw-r--r--deps/v8/src/snapshot/embedded-data.cc30
-rw-r--r--deps/v8/src/snapshot/embedded-data.h23
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.cc38
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.h35
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc6
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc29
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h6
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc24
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h6
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc28
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h8
-rw-r--r--deps/v8/src/snapshot/references.h2
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/serializer-common.h280
-rw-r--r--deps/v8/src/snapshot/serializer.cc266
-rw-r--r--deps/v8/src/snapshot/serializer.h36
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc11
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h2
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc11
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h7
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc82
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h12
-rw-r--r--deps/v8/src/source-position-table.cc5
-rw-r--r--deps/v8/src/source-position-table.h6
-rw-r--r--deps/v8/src/string-builder-inl.h9
-rw-r--r--deps/v8/src/string-builder.cc19
-rw-r--r--deps/v8/src/string-case.cc41
-rw-r--r--deps/v8/src/string-stream.cc10
-rw-r--r--deps/v8/src/string-stream.h33
-rw-r--r--deps/v8/src/thread-id.cc14
-rw-r--r--deps/v8/src/thread-id.h31
-rw-r--r--deps/v8/src/thread-local-top.cc30
-rw-r--r--deps/v8/src/thread-local-top.h122
-rw-r--r--deps/v8/src/torque/ast.h79
-rw-r--r--deps/v8/src/torque/contextual.h4
-rw-r--r--deps/v8/src/torque/csa-generator.cc136
-rw-r--r--deps/v8/src/torque/csa-generator.h8
-rw-r--r--deps/v8/src/torque/declarable.cc15
-rw-r--r--deps/v8/src/torque/declarable.h58
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc408
-rw-r--r--deps/v8/src/torque/declaration-visitor.h14
-rw-r--r--deps/v8/src/torque/declarations.cc96
-rw-r--r--deps/v8/src/torque/declarations.h18
-rw-r--r--deps/v8/src/torque/earley-parser.cc50
-rw-r--r--deps/v8/src/torque/earley-parser.h53
-rw-r--r--deps/v8/src/torque/global-context.h23
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc594
-rw-r--r--deps/v8/src/torque/implementation-visitor.h118
-rw-r--r--deps/v8/src/torque/instructions.h28
-rw-r--r--deps/v8/src/torque/ls/globals.h58
-rw-r--r--deps/v8/src/torque/ls/json-parser.cc195
-rw-r--r--deps/v8/src/torque/ls/json-parser.h23
-rw-r--r--deps/v8/src/torque/ls/json.cc69
-rw-r--r--deps/v8/src/torque/ls/json.h123
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc224
-rw-r--r--deps/v8/src/torque/ls/message-handler.h27
-rw-r--r--deps/v8/src/torque/ls/message-macros.h59
-rw-r--r--deps/v8/src/torque/ls/message-pipe.h24
-rw-r--r--deps/v8/src/torque/ls/message.h291
-rw-r--r--deps/v8/src/torque/ls/torque-language-server.cc52
-rw-r--r--deps/v8/src/torque/server-data.cc30
-rw-r--r--deps/v8/src/torque/server-data.h46
-rw-r--r--deps/v8/src/torque/source-positions.h55
-rw-r--r--deps/v8/src/torque/torque-compiler.cc97
-rw-r--r--deps/v8/src/torque/torque-compiler.h33
-rw-r--r--deps/v8/src/torque/torque-parser.cc244
-rw-r--r--deps/v8/src/torque/torque.cc63
-rw-r--r--deps/v8/src/torque/type-oracle.h51
-rw-r--r--deps/v8/src/torque/types.cc187
-rw-r--r--deps/v8/src/torque/types.h128
-rw-r--r--deps/v8/src/torque/utils.cc52
-rw-r--r--deps/v8/src/torque/utils.h48
-rw-r--r--deps/v8/src/tracing/trace-event.h25
-rw-r--r--deps/v8/src/tracing/traced-value.cc1
-rw-r--r--deps/v8/src/transitions.cc18
-rw-r--r--deps/v8/src/transitions.h4
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc14
-rw-r--r--deps/v8/src/turbo-assembler.cc2
-rw-r--r--deps/v8/src/unicode-inl.h47
-rw-r--r--deps/v8/src/unicode.cc57
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/unoptimized-compilation-info.cc19
-rw-r--r--deps/v8/src/unoptimized-compilation-info.h6
-rw-r--r--deps/v8/src/utils.h121
-rw-r--r--deps/v8/src/v8.cc18
-rw-r--r--deps/v8/src/v8memory.h14
-rw-r--r--deps/v8/src/value-serializer.cc6
-rw-r--r--deps/v8/src/vector.h29
-rw-r--r--deps/v8/src/visitors.cc8
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h16
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h2
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc14
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h14
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc108
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h4
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h4
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h38
-rw-r--r--deps/v8/src/wasm/compilation-environment.h16
-rw-r--r--deps/v8/src/wasm/decoder.h6
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h269
-rw-r--r--deps/v8/src/wasm/function-compiler.cc18
-rw-r--r--deps/v8/src/wasm/function-compiler.h11
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc51
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc38
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h51
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc4
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc1
-rw-r--r--deps/v8/src/wasm/module-compiler.cc697
-rw-r--r--deps/v8/src/wasm/module-compiler.h7
-rw-r--r--deps/v8/src/wasm/module-decoder.cc80
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc296
-rw-r--r--deps/v8/src/wasm/module-instantiate.h7
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc49
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h6
-rw-r--r--deps/v8/src/wasm/value-type.h3
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc388
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h116
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc81
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc237
-rw-r--r--deps/v8/src/wasm/wasm-engine.h48
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc3
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc595
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h24
-rw-r--r--deps/v8/src/wasm/wasm-js.cc75
-rw-r--r--deps/v8/src/wasm/wasm-limits.h8
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc52
-rw-r--r--deps/v8/src/wasm/wasm-memory.h10
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h1
-rw-r--r--deps/v8/src/wasm/wasm-module.h5
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h18
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc247
-rw-r--r--deps/v8/src/wasm/wasm-objects.h126
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc22
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h40
-rw-r--r--deps/v8/src/wasm/wasm-result.h11
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc59
-rw-r--r--deps/v8/src/wasm/wasm-tier.h4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h89
-rw-r--r--deps/v8/src/x64/assembler-x64.cc165
-rw-r--r--deps/v8/src/x64/assembler-x64.h161
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc74
-rw-r--r--deps/v8/src/x64/disasm-x64.cc3
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h7
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc15
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc474
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h57
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc195
-rw-r--r--deps/v8/src/zone/accounting-allocator.h67
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h5
-rw-r--r--deps/v8/src/zone/zone-list-inl.h4
-rw-r--r--deps/v8/src/zone/zone-segment.h24
-rw-r--r--deps/v8/src/zone/zone-splay-tree.h38
-rw-r--r--deps/v8/src/zone/zone.cc21
-rw-r--r--deps/v8/src/zone/zone.h42
-rw-r--r--deps/v8/test/BUILD.gn21
-rw-r--r--deps/v8/test/OWNERS3
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status10
-rw-r--r--deps/v8/test/benchmarks/testcfg.py22
-rw-r--r--deps/v8/test/cctest/BUILD.gn24
-rw-r--r--deps/v8/test/cctest/OWNERS1
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc6
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h4
-rw-r--r--deps/v8/test/cctest/cctest.cc45
-rw-r--r--deps/v8/test/cctest/cctest.h14
-rw-r--r--deps/v8/test/cctest/cctest.status116
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc136
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h3
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc22
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc219
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.h42
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc141
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc33
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc139
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc126
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc1446
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc11
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h11
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc17
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h12
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc18
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc20
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc348
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc405
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc21
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc172
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc35
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden110
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden13
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden126
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden126
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden66
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden136
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden54
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden4506
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc46
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc3
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc157
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc1
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc54
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc4
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc28
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc18
-rw-r--r--deps/v8/test/cctest/scope-test-helper.h14
-rw-r--r--deps/v8/test/cctest/test-allocation.cc3
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc198
-rw-r--r--deps/v8/test/cctest/test-api.cc1252
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc88
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc82
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc28
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc11
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc12
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc1
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc26
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc20
-rw-r--r--deps/v8/test/cctest/test-compiler.cc28
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc233
-rw-r--r--deps/v8/test/cctest/test-debug.cc111
-rw-r--r--deps/v8/test/cctest/test-decls.cc55
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc10
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc6
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc3
-rw-r--r--deps/v8/test/cctest/test-factory.cc1
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc64
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc112
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc6
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc80
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc4
-rw-r--r--deps/v8/test/cctest/test-icache.cc16
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc150
-rw-r--r--deps/v8/test/cctest/test-intl.cc9
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc609
-rw-r--r--deps/v8/test/cctest/test-lockers.cc27
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc87
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc168
-rw-r--r--deps/v8/test/cctest/test-mementos.cc2
-rw-r--r--deps/v8/test/cctest/test-object.cc159
-rw-r--r--deps/v8/test/cctest/test-parsing.cc133
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc2
-rw-r--r--deps/v8/test/cctest/test-regexp.cc52
-rw-r--r--deps/v8/test/cctest/test-representation.cc69
-rw-r--r--deps/v8/test/cctest/test-roots.cc7
-rw-r--r--deps/v8/test/cctest/test-serialize.cc127
-rw-r--r--deps/v8/test/cctest/test-strings.cc7
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc108
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc20
-rw-r--r--deps/v8/test/cctest/test-transitions.cc2
-rw-r--r--deps/v8/test/cctest/test-types.cc2
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc13
-rw-r--r--deps/v8/test/cctest/test-unscopables-hidden-prototype.cc2
-rw-r--r--deps/v8/test/cctest/test-unwinder.cc1
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc38
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc5
-rw-r--r--deps/v8/test/cctest/testcfg.py15
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc32
-rw-r--r--deps/v8/test/cctest/unicode-helpers.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc31
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc328
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc78
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc74
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc220
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc126
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc105
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc118
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc310
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc16
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc57
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h58
-rw-r--r--deps/v8/test/common/assembler-tester.h3
-rw-r--r--deps/v8/test/common/types-fuzz.h (renamed from deps/v8/test/cctest/types-fuzz.h)12
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h37
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc5
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-dead-function-fails.js35
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-optimize.js2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js106
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js2
-rw-r--r--deps/v8/test/debugger/debug/regress-3225.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-1170187.js8
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-323936.js5
-rw-r--r--deps/v8/test/debugger/debug/wasm/frame-inspection.js1
-rw-r--r--deps/v8/test/debugger/debugger.status19
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-840288.js1
-rw-r--r--deps/v8/test/debugger/testcfg.py24
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status10
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc2
-rw-r--r--deps/v8/test/fuzzer/testcfg.py39
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc1
-rw-r--r--deps/v8/test/inspector/BUILD.gn1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt4
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js80
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js5
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt354
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values.js131
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints.js2
-rw-r--r--deps/v8/test/inspector/debugger/get-properties-paused-expected.txt89
-rw-r--r--deps/v8/test/inspector/debugger/get-properties-paused.js109
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt36
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties.js10
-rw-r--r--deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt40
-rw-r--r--deps/v8/test/inspector/debugger/set-async-call-stack-depth.js53
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-url-regex-expected.txt30
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-url-regex.js57
-rw-r--r--deps/v8/test/inspector/debugger/set-variable-value-expected.txt260
-rw-r--r--deps/v8/test/inspector/debugger/set-variable-value.js90
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js1
-rw-r--r--deps/v8/test/inspector/debugger/step-snapshot-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-reset-context-group.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-source.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js1
-rw-r--r--deps/v8/test/inspector/inspector.status17
-rw-r--r--deps/v8/test/inspector/isolate-data.cc26
-rw-r--r--deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt46
-rw-r--r--deps/v8/test/inspector/runtime/exceptionthrown-on-connect.js25
-rw-r--r--deps/v8/test/inspector/runtime/release-object-expected.txt157
-rw-r--r--deps/v8/test/inspector/runtime/release-object.js79
-rw-r--r--deps/v8/test/inspector/testcfg.py30
-rw-r--r--deps/v8/test/intl/assert.js63
-rw-r--r--deps/v8/test/intl/bigint/tolocalestring.js61
-rw-r--r--deps/v8/test/intl/break-iterator/subclass.js2
-rw-r--r--deps/v8/test/intl/date-format/check-hc-option.js6
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-style-order.js108
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-time-style-order.js109
-rw-r--r--deps/v8/test/intl/date-format/constructor-date-time-style.js33
-rw-r--r--deps/v8/test/intl/date-format/constructor-no-style-order.js114
-rw-r--r--deps/v8/test/intl/date-format/constructor-time-style-order.js108
-rw-r--r--deps/v8/test/intl/date-format/property-override-date-style.js54
-rw-r--r--deps/v8/test/intl/date-format/property-override-date-time-style.js59
-rw-r--r--deps/v8/test/intl/date-format/property-override-time-style.js54
-rw-r--r--deps/v8/test/intl/intl.status10
-rw-r--r--deps/v8/test/intl/list-format/constructor.js2
-rw-r--r--deps/v8/test/intl/list-format/format-en.js2
-rw-r--r--deps/v8/test/intl/list-format/format-to-parts.js2
-rw-r--r--deps/v8/test/intl/list-format/format.js2
-rw-r--r--deps/v8/test/intl/list-format/formatToParts-zh.js2
-rw-r--r--deps/v8/test/intl/list-format/resolved-options.js2
-rw-r--r--deps/v8/test/intl/list-format/supported-locale.js1
-rw-r--r--deps/v8/test/intl/regress-7770.js8
-rw-r--r--deps/v8/test/intl/regress-8030.js2
-rw-r--r--deps/v8/test/intl/regress-8031.js2
-rw-r--r--deps/v8/test/intl/regress-930304.js5
-rw-r--r--deps/v8/test/intl/relative-time-format/constructor.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/default-locale-fr-CA.js1
-rw-r--r--deps/v8/test/intl/relative-time-format/default-locale-pt-BR.js1
-rw-r--r--deps/v8/test/intl/relative-time-format/format-en.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts-en.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts-plural.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/format.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options-nu.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/supported-locale.js1
-rw-r--r--deps/v8/test/intl/testcfg.py25
-rw-r--r--deps/v8/test/js-perf-test/ArrayInOperator/run.js245
-rw-r--r--deps/v8/test/js-perf-test/Intl/constructor.js32
-rw-r--r--deps/v8/test/js-perf-test/Intl/run.js19
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json93
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/run.js20
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/tagged-template.js65
-rw-r--r--deps/v8/test/js-perf-test/Proxies/proxies.js9
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-normalize.js38
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-toLocaleCase.js35
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/filter-nospecies.js55
-rw-r--r--deps/v8/test/message/asm-function-undefined.out2
-rw-r--r--deps/v8/test/message/asm-table-undefined.out2
-rw-r--r--deps/v8/test/message/fail/computed-prop-fni.js9
-rw-r--r--deps/v8/test/message/fail/computed-prop-fni.out6
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz-arrow.out4
-rw-r--r--deps/v8/test/message/fail/default-parameter-tdz.out4
-rw-r--r--deps/v8/test/message/fail/destructuring-object-private-name.js13
-rw-r--r--deps/v8/test/message/fail/destructuring-object-private-name.out4
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-ellipsis.js27
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-ellipsis.out18
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-max-display-depth.js21
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out16
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-proxy.js28
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-proxy.out18
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-substructure.js9
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-substructure.out10
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular.js8
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular.out10
-rw-r--r--deps/v8/test/message/fail/list-format-style-narrow.js3
-rw-r--r--deps/v8/test/message/fail/list-format-style-narrow.out4
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.js1
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.js1
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.js1
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.js1
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-trap.js1
-rw-r--r--deps/v8/test/message/fail/wasm-trap.out2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup1.js (renamed from deps/v8/test/message/fail/weak-refs-weakfactory1.js)2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup2.js (renamed from deps/v8/test/message/fail/weak-refs-weakfactory2.js)2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.js (renamed from deps/v8/test/message/fail/weak-refs-makecell1.js)4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.js (renamed from deps/v8/test/message/fail/weak-refs-makecell2.js)4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory2.out6
-rw-r--r--deps/v8/test/message/message.status10
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js30
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out9
-rw-r--r--deps/v8/test/message/regress/fail/regress-900383.out2
-rw-r--r--deps/v8/test/message/testcfg.py17
-rw-r--r--deps/v8/test/message/wasm-finish-compilation.js1
-rw-r--r--deps/v8/test/message/wasm-function-name-async.js1
-rw-r--r--deps/v8/test/message/wasm-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.js1
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-async.js1
-rw-r--r--deps/v8/test/message/wasm-module-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-async.js1
-rw-r--r--deps/v8/test/message/wasm-no-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js1
-rw-r--r--deps/v8/test/mjsunit/arguments.js11
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js34
-rw-r--r--deps/v8/test/mjsunit/array-push5.js1
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js7
-rw-r--r--deps/v8/test/mjsunit/code-coverage-ad-hoc.js16
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-precise.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/accessor-exceptions1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/accessor-exceptions2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/alloc-object-huge.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/alloc-object.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/arguments-object.js171
-rw-r--r--deps/v8/test/mjsunit/compiler/array-buffer-is-view.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/array-constructor.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/array-every.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-find.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-findindex.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-is-array.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/array-length.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-1.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-2.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/array-push-3.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/array-slice-clone.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/array-some.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-species.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/array-subclass.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/assignment-deopt.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/boolean-protototype.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/capture-context.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim2.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-objeq-elim.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-proto-change.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/constructor-inlining.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/context-sensitivity.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/count-deopt.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-constant.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-get.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-neutered.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-nonconstant.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-set.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code6.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-loops-neg.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-loops.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-string-add-warm.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors6.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors7.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-args.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-builtins.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-push.js43
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-bool.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-bool2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-closure.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-eager-and-lazy.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-eager-var-mutation-ite.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-eager-with-freeze.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-followed-by-gc.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-lazy-freeze.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-lazy-shape-mutation.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-lazy-var-mutation.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-many-lazy.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-now-lazy.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-simple-eager.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-simple-lazy.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-soft-simple.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-tonumber-compare.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-tonumber-shift.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-twice-on-call.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-twice.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/deoptimize-lazy-weak.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/division-by-constant.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/double-array-to-global.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/eager-deopt-simple.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-11.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-12.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-13.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-15.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-16.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-17.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-18.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-6.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-7.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-8.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-9.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-array.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-cycle.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-6.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-framestate-use-at-branchpoint.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-replacement.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-representation.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-type-none-in-object-state.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-typeguard.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/eval-introduced-closure.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/feedback-after-throw.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/for-in-5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/function-apply.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/function-bind.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/function-caller.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/global-delete.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/global-var-delete.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/increment-typefeedback.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-accessors1.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-accessors2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arity-mismatch.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-closures.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-compare.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-construct.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-context-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-context-slots.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-exception-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-exception-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-function-apply.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-global-access.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-literals.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-omit-arguments-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-omit-arguments-object.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-omit-arguments.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-param.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-surplus-arguments-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-surplus-arguments-object.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-surplus-arguments.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-throw.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-two.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call-mapcheck.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof-opt1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof-opt2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof-opt3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof2.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof3.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/int64.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/integral32-add-sub.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/lazy-const-lookup.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/lazy-deopt-in-literal.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/literals-optimized.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-global.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-osr.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-params.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/math-ceil.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/math-floor-global.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/math-floor-local.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/math-imul.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/math-max.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/math-min.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/math-mul.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/math-round.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/math-sign.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/math-trunc.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/minus-zero.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/mul-div-52bit.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/multiply-add.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/multiply-sub.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/native-context-specialization-string-concat.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/new-cons-string.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/number-abs.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/number-add.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-ceil.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-comparison-truncations.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/number-constructor-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-divide.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/number-floor.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isfinite-inl.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isfinite.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isinteger-inl.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isinteger.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isnan.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-issafeinteger.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-max.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-min.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-modulus.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-multiply.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/number-round.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-subtract.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/number-toboolean.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-trunc.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/object-constructor.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/object-create.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/object-getprototypeof.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/object-is.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/object-isprototypeof.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimize-bitnot.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-closures.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float32array-length.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float64array-length.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-function-calls.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-int32array-length.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-with.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-sar.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/phi-representations.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/pic.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/polymorphic-symbols.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-constructor.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-then.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-species.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/property-calls.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/property-refs.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/property-static.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/property-stores.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/proto-chain-constant.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/proto-chain-load.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/receiver-conversion.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/recursive-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/redundancy-elimination.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-apply.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-construct.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-get.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-has.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-106351.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1085.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1394.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-177883.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-3218915.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-411262.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4207.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-6.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4413-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4470-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-491578.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5074.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5100.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5129.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5158.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5278.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5320.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5538.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-600593.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-621147.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-621423.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-626986.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-628403.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-633497.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-638132.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-639210.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-644048.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-664117.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-664490.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-665680.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-668760.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-671574.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-675704.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-700883.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-7121.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-713367.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-714483.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-715651.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-726554.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-731495.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-733181.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-736567.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-739902.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-758096.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-758983.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-761892.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-762057.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-772420.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-772872.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-773954.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-780658.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-786521.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-788539.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-793863.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-796041.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-797596.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-799263.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-801097.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-817225.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-8380.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-841117.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-884052.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-888923.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-890620.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-895799.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-910838.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-913232.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-924151.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-932392.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-934175.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-944062-1.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-944062-2.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-arguments.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-compare-negate.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-const.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-540593.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-funarguments.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-funcaller.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-gvn.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-intoverflow.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-lazy-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-lbranch-double.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-loadfield.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-or.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-rep-change.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-shared-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-shift-left.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-shift-right-logical.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-shift-right.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-stacktrace-methods.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-toint32.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-5573.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-5756.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-6077.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-6631.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-variable-liveness-let.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-variable-liveness.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regresss-933331.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/rest-parameters.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/rotate.js34
-rw-r--r--deps/v8/test/mjsunit/compiler/shift-shr.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/simple-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/simple-inlining.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/smi-stores-opt.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/spread-call.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/store-elimination.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-number.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-receiver.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-symbol.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-recompile.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/string-add-try-catch.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/string-comparison-opt.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/string-concat-deopt.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/string-concat-try-catch.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/string-concat-yield.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/string-from-code-point.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/string-length.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/string-slice.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/switch-bailout.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/symbol-protototype.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/try-binop.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/try-catch-deopt.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/try-context.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/try-deopt.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/try-finally-deopt.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/turbo-number-feedback.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/typed-array-constructor.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/uint32.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/uint8-clamped-array.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/unsigned-min-max.js2
-rw-r--r--deps/v8/test/mjsunit/es6/for-each-in-catch.js226
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-ownkeys-clone.js25
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-ownkeys.js12
-rw-r--r--deps/v8/test/mjsunit/es6/string-endswith.js10
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-sort.js25
-rw-r--r--deps/v8/test/mjsunit/es8/object-entries.js24
-rw-r--r--deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js16
-rw-r--r--deps/v8/test/mjsunit/es8/object-values.js15
-rw-r--r--deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-flat-species.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-flat.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-flatMap-species.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-flatMap.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-8808.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/symbol-description.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js166
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js)49
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js)22
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js)23
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js49
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js)21
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js)19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js)38
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js)30
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js)24
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js)30
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js)19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js40
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js)24
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js)20
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js)27
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js50
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js)14
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js7
-rw-r--r--deps/v8/test/mjsunit/integrity-level-map-update.js166
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic-module-export.js9
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic-module-import.js70
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic.js402
-rw-r--r--deps/v8/test/mjsunit/messages.js7
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js109
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status189
-rw-r--r--deps/v8/test/mjsunit/object-freeze.js109
-rw-r--r--deps/v8/test/mjsunit/object-get-own-property-names.js10
-rw-r--r--deps/v8/test/mjsunit/object-keys.js25
-rw-r--r--deps/v8/test/mjsunit/object-prevent-extensions.js87
-rw-r--r--deps/v8/test/mjsunit/object-seal.js102
-rw-r--r--deps/v8/test/mjsunit/optimized-includes-polymorph.js117
-rw-r--r--deps/v8/test/mjsunit/optimized-reduce.js24
-rw-r--r--deps/v8/test/mjsunit/optimized-reduceright.js24
-rw-r--r--deps/v8/test/mjsunit/parallel-optimize-disabled.js2
-rw-r--r--deps/v8/test/mjsunit/regress-930045.js35
-rw-r--r--deps/v8/test/mjsunit/regress-932101.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3218530.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3255.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5888.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5911.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-687.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7254.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-813440.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-863810.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8913.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8947.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-912162.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-917755.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-926036.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-930486.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-932953.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-933179.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-933776.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-936077.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-940361.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715455.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772056.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-913222.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-926819.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-926856.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-930948-base.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-930948.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-931664.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-932034.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-933214.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-934138.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-934166.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-935932.js90
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-936302.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-937618.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-937649.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-941743.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5848.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8799.js11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5531.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5800.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5860.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5884.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6054.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6164.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-644682.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-648079.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-651961.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-654377.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-663994.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-667745.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-684858.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-688876.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-689450.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-6931.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-699485.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-702460.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7033.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7035.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7049.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-708714.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-709684.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-710844.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-711203.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-715216b.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-722445.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724846.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724851.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724972.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-727222.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-727560.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-729991.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734246.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734345.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7364.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-736584.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7366.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-737069.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-739768.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7422.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7499.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7508.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-752423.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7565.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-757217.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7579.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7582.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763439.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763697.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-766003.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-769637.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-771243.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-772332.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-775366.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7785.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-778917.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-782280.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-784050.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7914.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-791810.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-793551.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-797846.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-800756.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801785.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801850.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-802244.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803427.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803788.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8059.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808012.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808848.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808980.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8094.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8095.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-812005.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-817380.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-819869.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-820802.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-824681.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-827806.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-831463.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834619.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834624.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834693.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-836141.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-837417.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-840757.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-842501.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-843563.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8505.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8533.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854011.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854050.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-864509.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-875556.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8846.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8896.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-894307.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-894374.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-905815.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-910824.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-913804.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-916869.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917412.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588b.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918149.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918284.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918917.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919308.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919533.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922432.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922670.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922933.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924843.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924905.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-925671.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-935138.js19
-rw-r--r--deps/v8/test/mjsunit/string-external-cached.js2
-rw-r--r--deps/v8/test/mjsunit/string-externalize.js16
-rw-r--r--deps/v8/test/mjsunit/switch.js10
-rw-r--r--deps/v8/test/mjsunit/testcfg.py29
-rw-r--r--deps/v8/test/mjsunit/tools/compiler-trace-flags.js1
-rw-r--r--deps/v8/test/mjsunit/ubsan-fuzzerbugs.js19
-rw-r--r--deps/v8/test/mjsunit/wasm/adapter-frame.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/add-getters.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js162
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref.js61
-rw-r--r--deps/v8/test/mjsunit/wasm/async-compile.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/bigint.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-64bit.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/code-space-exhaustion.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-limits.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-management.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/data-segments.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/disallow-codegen.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/divrem-trap.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/empirical_max_memory.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js190
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-export.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-import.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/export-global.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/export-mutable-global.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/export-table.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/float-constant-folding.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/function-names.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/function-prototype.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/futex.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-buffer.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-frame.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-detaching.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/huge-memory.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/import-function.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/import-mutable-global.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-gc.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-run-basic.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js371
-rw-r--r--deps/v8/test/mjsunit/wasm/large-offset.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/lazy-compilation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-rotation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/many-parameters.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-external-call.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-instance-validation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-size.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_1gb_oob.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_2gb_oob.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_4gb_oob.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/names.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/origin-trial-flags.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/parallel_compilation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/params.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/print-code.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/receiver.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/serialize-lazy-module.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/table-init.js128
-rw-r--r--deps/v8/test/mjsunit/wasm/table-limits.js42
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/tier-up-testing-flag.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-handler-fallback.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode-validation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable-validation.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-common.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-api-overloading.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js511
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js632
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-interpreter.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-module.js1
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc4
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.status5
-rw-r--r--deps/v8/test/mkgrokdump/testcfg.py11
-rw-r--r--deps/v8/test/mozilla/mozilla.status5
-rw-r--r--deps/v8/test/mozilla/testcfg.py90
-rw-r--r--deps/v8/test/preparser/preparser.status5
-rw-r--r--deps/v8/test/preparser/testcfg.py24
-rw-r--r--deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-src-byteoffset-internal.js45
-rw-r--r--deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-target-byteoffset-internal.js44
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/NumberFormat/fraction-digit-options-read-once.js18
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js30
-rw-r--r--deps/v8/test/test262/test262.status173
-rw-r--r--deps/v8/test/test262/testcfg.py72
-rw-r--r--deps/v8/test/torque/test-torque.tq108
-rw-r--r--deps/v8/test/unittests/BUILD.gn6
-rw-r--r--deps/v8/test/unittests/background-compile-task-unittest.cc1
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc3
-rw-r--r--deps/v8/test/unittests/base/template-utils-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc61
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc107
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc42
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc98
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc109
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc95
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h52
-rw-r--r--deps/v8/test/unittests/compiler/opcodes-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc27
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/bitmap-test-utils.h35
-rw-r--r--deps/v8/test/unittests/heap/bitmap-unittest.cc137
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc72
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc63
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc36
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc162
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc15
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc20
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc15
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc32
-rw-r--r--deps/v8/test/unittests/microtask-queue-unittest.cc325
-rw-r--r--deps/v8/test/unittests/parser/ast-value-unittest.cc3
-rw-r--r--deps/v8/test/unittests/testcfg.py25
-rw-r--r--deps/v8/test/unittests/torque/ls-json-unittest.cc103
-rw-r--r--deps/v8/test/unittests/torque/ls-message-unittest.cc117
-rw-r--r--deps/v8/test/unittests/torque/torque-utils-unittest.cc30
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc6
-rw-r--r--deps/v8/test/unittests/unittests.status11
-rw-r--r--deps/v8/test/unittests/utils-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc2615
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc37
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc69
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc6
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc18
-rw-r--r--deps/v8/test/unittests/zone/segmentpool-unittest.cc32
-rw-r--r--deps/v8/test/wasm-js/testcfg.py45
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status14
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py19
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status9
-rw-r--r--deps/v8/test/webkit/JSON-stringify-replacer-expected.txt16
-rw-r--r--deps/v8/test/webkit/class-syntax-name-expected.txt4
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt8
-rw-r--r--deps/v8/test/webkit/testcfg.py27
-rw-r--r--deps/v8/test/webkit/webkit.status5
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py11
-rw-r--r--deps/v8/third_party/inspector_protocol/README.md5
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/check_protocol_compatibility.py5
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py30
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gni4
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gypi2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Allocator_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Array_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/CBOR_cpp.template827
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/CBOR_h.template425
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template59
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template43
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template13
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Maybe_h.template8
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template17
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template285
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template55
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template311
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template149
-rw-r--r--deps/v8/third_party/inspector_protocol/pdl.py3
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/roll.py162
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/Exported_h.template18
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/Imported_h.template45
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template41
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template10
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq1511
-rw-r--r--deps/v8/tools/OWNERS2
-rw-r--r--deps/v8/tools/PRESUBMIT.py2
-rwxr-xr-xdeps/v8/tools/android-run.py7
-rwxr-xr-xdeps/v8/tools/avg.py172
-rwxr-xr-xdeps/v8/tools/bigint-tester.py5
-rwxr-xr-xdeps/v8/tools/callstats.py45
-rw-r--r--deps/v8/tools/clusterfuzz/OWNERS5
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt46
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_1.py7
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_2.py7
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_3.py7
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py134
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py19
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py6
-rw-r--r--deps/v8/tools/clusterfuzz/v8_sanity_checks.js22
-rw-r--r--deps/v8/tools/concatenate-files.py7
-rwxr-xr-xdeps/v8/tools/deprecation_stats.py3
-rwxr-xr-xdeps/v8/tools/dev/gen-tags.py4
-rwxr-xr-xdeps/v8/tools/dev/gm.py9
-rwxr-xr-xdeps/v8/tools/dev/v8gen.py21
-rw-r--r--deps/v8/tools/dump-cpp.py11
-rwxr-xr-xdeps/v8/tools/eval_gc_nvp.py6
-rwxr-xr-xdeps/v8/tools/find-commit-for-patch.py3
-rw-r--r--deps/v8/tools/find_depot_tools.py5
-rwxr-xr-xdeps/v8/tools/gc-nvp-to-csv.py11
-rwxr-xr-xdeps/v8/tools/gc-nvp-trace-processor.py21
-rwxr-xr-xdeps/v8/tools/gcmole/parallel.py9
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py7
-rw-r--r--deps/v8/tools/gdb-v8-support.py5
-rw-r--r--deps/v8/tools/gdbinit16
-rw-r--r--deps/v8/tools/gen-inlining-tests.py4
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py29
-rwxr-xr-xdeps/v8/tools/generate-builtins-tests.py3
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py5
-rwxr-xr-xdeps/v8/tools/get_landmines.py44
-rwxr-xr-xdeps/v8/tools/grokdump.py307
-rwxr-xr-xdeps/v8/tools/ignition/bytecode_dispatches_report.py23
-rwxr-xr-xdeps/v8/tools/ignition/linux_perf_bytecode_annotate.py13
-rwxr-xr-xdeps/v8/tools/js2c.py9
-rwxr-xr-xdeps/v8/tools/ll_prof.py77
-rw-r--r--deps/v8/tools/lldb_commands.py3
-rwxr-xr-xdeps/v8/tools/locs.py58
-rwxr-xr-xdeps/v8/tools/mb/mb.py9
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py2
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py9
-rwxr-xr-xdeps/v8/tools/node/node_common.py11
-rwxr-xr-xdeps/v8/tools/node/update_node.py19
-rwxr-xr-xdeps/v8/tools/perf-compare.py5
-rw-r--r--deps/v8/tools/predictable_wrapper.py23
-rwxr-xr-xdeps/v8/tools/release/auto_push.py9
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py9
-rwxr-xr-xdeps/v8/tools/release/auto_tag.py19
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py4
-rw-r--r--deps/v8/tools/release/common_includes.py45
-rwxr-xr-xdeps/v8/tools/release/create_release.py13
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py17
-rwxr-xr-xdeps/v8/tools/release/mergeinfo.py35
-rwxr-xr-xdeps/v8/tools/release/push_to_candidates.py9
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py21
-rwxr-xr-xdeps/v8/tools/release/script_test.py5
-rwxr-xr-xdeps/v8/tools/release/search_related_commits.py21
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py9
-rwxr-xr-xdeps/v8/tools/run-clang-tidy.py25
-rwxr-xr-xdeps/v8/tools/run_perf.py19
-rwxr-xr-xdeps/v8/tools/sanitizers/sancov_formatter.py16
-rwxr-xr-xdeps/v8/tools/sanitizers/sancov_merger.py2
-rwxr-xr-xdeps/v8/tools/sanitizers/sanitize_pcs.py7
-rwxr-xr-xdeps/v8/tools/stats-viewer.py17
-rw-r--r--deps/v8/tools/testrunner/OWNERS5
-rw-r--r--deps/v8/tools/testrunner/PRESUBMIT.py16
-rw-r--r--deps/v8/tools/testrunner/base_runner.py77
-rw-r--r--deps/v8/tools/testrunner/local/android.py2
-rw-r--r--deps/v8/tools/testrunner/local/command.py30
-rw-r--r--deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py22
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/pool.py21
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py7
-rwxr-xr-xdeps/v8/tools/testrunner/local/statusfile_unittest.py1
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py182
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py28
-rw-r--r--deps/v8/tools/testrunner/local/utils.py4
-rw-r--r--deps/v8/tools/testrunner/local/variants.py2
-rw-r--r--deps/v8/tools/testrunner/local/verbose.py18
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py15
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py13
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py42
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py14
-rw-r--r--deps/v8/tools/testrunner/testproc/combiner.py19
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py4
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py17
-rw-r--r--deps/v8/tools/testrunner/testproc/loader.py35
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py123
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py4
-rw-r--r--deps/v8/tools/testrunner/testproc/seed.py21
-rw-r--r--deps/v8/tools/testrunner/testproc/shard.py20
-rwxr-xr-xdeps/v8/tools/testrunner/testproc/shard_unittest.py54
-rw-r--r--deps/v8/tools/testrunner/testproc/sigproc.py7
-rw-r--r--deps/v8/tools/testrunner/testproc/variant.py11
-rwxr-xr-xdeps/v8/tools/testrunner/testproc/variant_unittest.py172
-rw-r--r--deps/v8/tools/testrunner/utils/dump_build_config_gyp.py5
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py67
-rwxr-xr-xdeps/v8/tools/torque/make-torque-parser.py2
-rw-r--r--deps/v8/tools/torque/vscode-torque/.npmrc1
-rw-r--r--deps/v8/tools/torque/vscode-torque/README.md33
-rw-r--r--deps/v8/tools/torque/vscode-torque/out/extension.js99
-rw-r--r--deps/v8/tools/torque/vscode-torque/package.json96
-rw-r--r--deps/v8/tools/torque/vscode-torque/src/extension.ts104
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json203
-rw-r--r--deps/v8/tools/torque/vscode-torque/tsconfig.json17
-rw-r--r--deps/v8/tools/torque/vscode-torque/tslint.json11
-rwxr-xr-xdeps/v8/tools/trace-maps-processor.py5
-rwxr-xr-xdeps/v8/tools/try_perf.py13
-rw-r--r--deps/v8/tools/turbolizer-perf.py5
-rwxr-xr-xdeps/v8/tools/turbolizer/deploy.sh2
-rw-r--r--deps/v8/tools/turbolizer/package-lock.json20
-rw-r--r--deps/v8/tools/turbolizer/package.json2
-rw-r--r--deps/v8/tools/turbolizer/src/schedule-view.ts1
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css4
-rw-r--r--deps/v8/tools/ubsan/blacklist.txt11
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py7
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py68
-rw-r--r--deps/v8/tools/unittests/testdata/d8_mocked1.py7
-rw-r--r--deps/v8/tools/unittests/testdata/d8_mocked2.py13
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json139
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json94
-rw-r--r--deps/v8/tools/unittests/testdata/predictable_mocked.py9
-rw-r--r--deps/v8/tools/unittests/testdata/results_processor.py7
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py5
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py15
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py5
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py13
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rwxr-xr-xdeps/v8/tools/update-object-macros-undef.py1
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py59
-rw-r--r--deps/v8/tools/v8heapconst.py657
-rw-r--r--deps/v8/tools/whitespace.txt6
2265 files changed, 75537 insertions, 52489 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 6cf6ab4e91..7fc0f66b37 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -23,6 +23,7 @@
*~
.#*
.*.sw?
+.ccls-cache
.cpplint-cache
.cproject
.d8_history
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index ecf0e5d1fb..a32b13c669 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -52,6 +52,7 @@ Andreas Anyuru <andreas.anyuru@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <anna@addaleax.net>
+Anton Bikineev <ant.bikineev@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com>
Daniel Shelton <d1.shelton@samsung.com>
Ben Coe <bencoe@gmail.com>
@@ -84,6 +85,7 @@ Geoffrey Garside <ggarside@gmail.com>
Gergely Nagy <ngg@ngg.hu>
Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
+Hannu Trey <hannu.trey@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
@@ -164,6 +166,7 @@ Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Tobias NieƟen <tniessen@tnie.de>
Ujjwal Sharma <usharma1998@gmail.com>
+Vadim Gorbachev <bmsdave@gmail.com>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 16e0b60ca7..fddd525297 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -98,10 +98,6 @@ declare_args() {
v8_enable_pointer_compression = false
v8_enable_31bit_smis_on_64bit_arch = false
- # Interpreted regexp engine exists as platform-independent alternative
- # based where the regular expression is compiled to a bytecode.
- v8_interpreted_regexp = false
-
# Sets -dOBJECT_PRINT.
v8_enable_object_print = ""
@@ -175,13 +171,11 @@ declare_args() {
# setting the "check_v8_header_includes" gclient variable to run a
# specific hook).
v8_check_header_includes = false
-
- # We reuse the snapshot toolchain for building torque and other generators to
- # avoid building v8_libbase on the host more than once. On mips with big
- # endian, the snapshot toolchain is the target toolchain and, hence, can't be
- # used.
}
+# We reuse the snapshot toolchain for building torque and other generators to
+# avoid building v8_libbase on the host more than once. On mips with big endian,
+# the snapshot toolchain is the target toolchain and, hence, can't be used.
v8_generator_toolchain = v8_snapshot_toolchain
if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
@@ -226,28 +220,29 @@ v8_toolset_for_shell = "host"
###############################################################################
# Configurations
#
-config("internal_config") {
+
+config("internal_config_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [
".",
"$target_gen_dir",
]
-
- configs = [ "//build/config/compiler:wexit_time_destructors" ]
-
- if (is_component_build) {
- defines = [ "BUILDING_V8_SHARED" ]
- }
}
-config("internal_config_base") {
+config("internal_config") {
+ defines = []
visibility = [ ":*" ] # Only targets in this file can depend on this.
- include_dirs = [
- ".",
- "$target_gen_dir",
+ configs = [
+ "//build/config/compiler:wexit_time_destructors",
+ ":internal_config_base",
+ ":v8_header_features",
]
+
+ if (is_component_build) {
+ defines += [ "BUILDING_V8_SHARED" ]
+ }
}
# This config should be applied to code using the libplatform.
@@ -278,18 +273,10 @@ config("libsampler_config") {
# itself.
config("external_config") {
defines = []
+ configs = [ ":v8_header_features" ]
if (is_component_build) {
defines += [ "USING_V8_SHARED" ]
}
- if (v8_enable_v8_checks) {
- defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
- }
- if (v8_deprecation_warnings) {
- defines += [ "V8_DEPRECATION_WARNINGS" ]
- }
- if (v8_imminent_deprecation_warnings) {
- defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
- }
include_dirs = [
"include",
"$target_gen_dir/include",
@@ -304,11 +291,39 @@ config("external_startup_data") {
}
}
+# Put defines that are used in public headers here; public headers are
+# defined in "v8_headers" and are included by embedders of V8.
+config("v8_header_features") {
+ visibility = [ ":*" ]
+
+ defines = []
+
+ if (v8_enable_v8_checks) {
+ defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
+ }
+ if (v8_enable_pointer_compression) {
+ defines += [ "V8_COMPRESS_POINTERS" ]
+ }
+ if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
+ defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
+ }
+ if (v8_deprecation_warnings) {
+ defines += [ "V8_DEPRECATION_WARNINGS" ]
+ }
+ if (v8_imminent_deprecation_warnings) {
+ defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
+ }
+}
+
+# Put defines here that are only used in our internal files and NEVER in
+# external headers that embedders (such as chromium and node) might include.
config("features") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = []
+ configs = [ ":v8_header_features" ]
+
if (v8_embedder_string != "") {
defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ]
}
@@ -340,12 +355,6 @@ config("features") {
if (v8_enable_minor_mc) {
defines += [ "ENABLE_MINOR_MC" ]
}
- if (v8_enable_pointer_compression) {
- defines += [ "V8_COMPRESS_POINTERS" ]
- }
- if (v8_enable_31bit_smis_on_64bit_arch) {
- defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
- }
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
@@ -368,18 +377,6 @@ config("features") {
defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ]
defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ]
}
- if (v8_enable_v8_checks) {
- defines += [ "V8_ENABLE_CHECKS" ]
- }
- if (v8_interpreted_regexp || v8_enable_lite_mode) {
- defines += [ "V8_INTERPRETED_REGEXP" ]
- }
- if (v8_deprecation_warnings) {
- defines += [ "V8_DEPRECATION_WARNINGS" ]
- }
- if (v8_imminent_deprecation_warnings) {
- defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
- }
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
}
@@ -600,16 +597,23 @@ config("toolchain") {
}
if (v8_no_inline) {
- cflags += [
- "-fno-inline-functions",
- "-fno-inline",
- ]
+ if (is_win) {
+ cflags += [ "/Ob0" ]
+ } else {
+ cflags += [
+ "-fno-inline-functions",
+ "-fno-inline",
+ ]
+ }
}
if (is_clang) {
cflags += [
"-Wmissing-field-initializers",
+ # TODO(thakis): Remove once enabled globally, https://crbug.com/926235
+ "-Wextra-semi",
+
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
@@ -659,6 +663,37 @@ config("toolchain") {
"-Wno-return-type",
]
}
+
+ # Chromium uses a hand-picked subset of UBSan coverage. We want everything.
+ if (is_ubsan) {
+ cflags += [ "-fsanitize=undefined" ]
+ }
+}
+
+config("default_optimization") {
+ if (is_debug && !v8_optimized_debug) {
+ configs = [ "//build/config/compiler:no_optimize" ]
+ } else {
+ # TODO(crbug.com/621335) Rework this so that we don't have the confusion
+ # between "optimize_speed" and "optimize_max".
+ if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) {
+ configs = [ "//build/config/compiler:optimize_speed" ]
+ } else {
+ configs = [ "//build/config/compiler:optimize_max" ]
+ }
+ }
+}
+
+# For code that is only run a few times during the build, C++ optimizations
+# are a waste of time.
+config("unoptimized_initializer") {
+ configs = [ ":internal_config" ]
+ if (using_sanitizer) {
+ # Some sanitizers rely on optimizations.
+ configs += [ ":default_optimization" ]
+ } else {
+ configs += [ "//build/config/compiler:no_optimize" ]
+ }
}
# Configs for code coverage with gcov. Separate configs for cflags and ldflags
@@ -830,7 +865,9 @@ action("postmortem-metadata") {
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
"src/objects/map.h",
+ "src/objects/map.cc",
"src/objects/map-inl.h",
+ "src/objects/js-objects.cc",
"src/objects/name.h",
"src/objects/name-inl.h",
"src/objects/oddball-inl.h",
@@ -840,7 +877,10 @@ action("postmortem-metadata") {
"src/objects/script-inl.h",
"src/objects/shared-function-info.h",
"src/objects/shared-function-info-inl.h",
+ "src/objects/string.cc",
"src/objects/string.h",
+ "src/objects/string-comparator.cc",
+ "src/objects/string-comparator.h",
"src/objects/string-inl.h",
"src/objects/struct.h",
"src/objects/struct-inl.h",
@@ -856,42 +896,79 @@ action("postmortem-metadata") {
torque_files = [
"src/builtins/base.tq",
+ "src/builtins/growable-fixed-array.tq",
"src/builtins/frames.tq",
"src/builtins/arguments.tq",
"src/builtins/array.tq",
"src/builtins/array-copywithin.tq",
+ "src/builtins/array-every.tq",
"src/builtins/array-filter.tq",
+ "src/builtins/array-find.tq",
+ "src/builtins/array-findindex.tq",
"src/builtins/array-foreach.tq",
"src/builtins/array-join.tq",
"src/builtins/array-lastindexof.tq",
"src/builtins/array-of.tq",
+ "src/builtins/array-map.tq",
+ "src/builtins/array-reduce.tq",
+ "src/builtins/array-reduce-right.tq",
"src/builtins/array-reverse.tq",
"src/builtins/array-slice.tq",
+ "src/builtins/array-some.tq",
"src/builtins/array-splice.tq",
"src/builtins/array-unshift.tq",
"src/builtins/collections.tq",
"src/builtins/data-view.tq",
"src/builtins/extras-utils.tq",
- "src/builtins/object.tq",
"src/builtins/object-fromentries.tq",
"src/builtins/iterator.tq",
+ "src/builtins/string-endswith.tq",
+ "src/builtins/string-startswith.tq",
"src/builtins/typed-array.tq",
"src/builtins/typed-array-createtypedarray.tq",
+ "src/builtins/typed-array-filter.tq",
+ "src/builtins/typed-array-foreach.tq",
+ "src/builtins/typed-array-reduce.tq",
+ "src/builtins/typed-array-reduceright.tq",
+ "src/builtins/typed-array-slice.tq",
+ "src/builtins/typed-array-subarray.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
torque_namespaces = [
- "base",
"arguments",
"array",
+ "array-copywithin",
+ "array-filter",
+ "array-find",
+ "array-findindex",
+ "array-foreach",
+ "array-join",
+ "array-map",
+ "array-of",
+ "array-reverse",
+ "array-slice",
+ "array-splice",
+ "array-unshift",
+ "array-lastindexof",
+ "base",
"collections",
- "iterator",
- "object",
- "typed-array",
"data-view",
"extras-utils",
+ "growable-fixed-array",
+ "iterator",
+ "object",
+ "string",
"test",
+ "typed-array",
+ "typed-array-createtypedarray",
+ "typed-array-filter",
+ "typed-array-foreach",
+ "typed-array-reduce",
+ "typed-array-reduceright",
+ "typed-array-slice",
+ "typed-array-subarray",
]
action("run_torque") {
@@ -911,6 +988,7 @@ action("run_torque") {
outputs = [
"$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h",
+ "$target_gen_dir/torque-generated/class-definitions-from-dsl.h",
]
foreach(namespace, torque_namespaces) {
outputs += [
@@ -954,7 +1032,8 @@ v8_source_set("torque_generated_initializers") {
]
}
- configs = [ ":internal_config" ]
+ remove_configs = [ v8_path_prefix + ":default_optimization" ]
+ configs = [ ":unoptimized_initializer" ]
}
action("generate_bytecode_builtins_list") {
@@ -1123,6 +1202,7 @@ action("v8_dump_build_config") {
"is_android=$is_android",
"is_asan=$is_asan",
"is_cfi=$is_cfi",
+ "is_clang=$is_clang",
"is_component_build=$is_component_build",
"is_debug=$v8_enable_debugging_features",
"is_gcov_coverage=$is_gcov_coverage",
@@ -1263,23 +1343,17 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
if (v8_use_multi_snapshots) {
deps += [ ":asm_to_inline_asm_trusted" ]
sources += [ "$target_gen_dir/embedded_trusted.cc" ]
-
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [ "$target_gen_dir/embedded_trusted.cc" ]
- }
+ jumbo_excluded_sources = [ "$target_gen_dir/embedded_trusted.cc" ]
}
} else if (v8_enable_embedded_builtins) {
sources += [ "$target_gen_dir/embedded.S" ]
if (v8_use_multi_snapshots) {
sources += [ "$target_gen_dir/embedded_trusted.S" ]
-
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # Duplicated symbols with embedded.S
- "$target_gen_dir/embedded_trusted.S",
- ]
- }
+ jumbo_excluded_sources = [
+ # Duplicated symbols with embedded.S
+ "$target_gen_dir/embedded_trusted.S",
+ ]
}
} else {
sources += [ "src/snapshot/embedded-empty.cc" ]
@@ -1297,6 +1371,9 @@ v8_source_set("v8_initializers") {
deps = [
":torque_generated_initializers",
+ ]
+
+ public_deps = [
":v8_base",
]
@@ -1376,18 +1453,16 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-intrinsics-generator.h",
]
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
- "src/builtins/builtins-async-iterator-gen.cc",
- "src/builtins/builtins-async-generator-gen.cc",
+ jumbo_excluded_sources = [
+ # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
+ "src/builtins/builtins-async-iterator-gen.cc",
+ "src/builtins/builtins-async-generator-gen.cc",
- # These source files take an unusually large amount of time to
- # compile. Build them separately to avoid bottlenecks.
- "src/builtins/builtins-regexp-gen.cc",
- "src/code-stub-assembler.cc",
- ]
- }
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
+ "src/builtins/builtins-regexp-gen.cc",
+ "src/code-stub-assembler.cc",
+ ]
if (v8_current_cpu == "x86") {
sources += [
@@ -1435,7 +1510,8 @@ v8_source_set("v8_initializers") {
sources -= [ "src/builtins/builtins-intl-gen.cc" ]
}
- configs = [ ":internal_config" ]
+ remove_configs = [ v8_path_prefix + ":default_optimization" ]
+ configs = [ ":unoptimized_initializer" ]
}
v8_source_set("v8_init") {
@@ -1474,6 +1550,7 @@ v8_header_set("v8_version") {
# can depend upon to get basic v8 types.
v8_header_set("v8_headers") {
configs = [ ":internal_config" ]
+ public_configs = [ ":v8_header_features" ]
sources = [
"include/v8-internal.h",
@@ -1481,19 +1558,33 @@ v8_header_set("v8_headers") {
"include/v8config.h",
]
- if (is_linux || is_mac) {
- sources += [ "include/v8-wasm-trap-handler-posix.h" ]
- }
-
- if (is_win) {
- sources += [ "include/v8-wasm-trap-handler-win.h" ]
- }
+ sources += [
+ # The following headers cannot be platform-specific. The include validation
+ # of `gn gen $dir --check` requires all header files to be available on all
+ # platforms.
+ "include/v8-wasm-trap-handler-posix.h",
+ "include/v8-wasm-trap-handler-win.h",
+ ]
deps = [
":v8_version",
]
}
+# This is split out to share basic headers with Torque.
+v8_header_set("v8_shared_internal_headers") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ configs = [ ":internal_config" ]
+
+ sources = [
+ "src/globals.h",
+ ]
+
+ deps = [
+ ":v8_headers",
+ ]
+}
+
v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -1543,7 +1634,6 @@ v8_source_set("v8_base") {
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-types.cc",
"src/asmjs/asm-types.h",
- "src/assembler-arch-inl.h",
"src/assembler-arch.h",
"src/assembler-inl.h",
"src/assembler.cc",
@@ -1632,6 +1722,8 @@ v8_source_set("v8_base") {
"src/checks.h",
"src/code-comments.cc",
"src/code-comments.h",
+ "src/code-desc.cc",
+ "src/code-desc.h",
"src/code-events.h",
"src/code-factory.cc",
"src/code-factory.h",
@@ -1958,22 +2050,25 @@ v8_source_set("v8_base") {
"src/flag-definitions.h",
"src/flags.cc",
"src/flags.h",
+ "src/flush-instruction-cache.cc",
+ "src/flush-instruction-cache.h",
"src/frame-constants.h",
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
+ "src/function-kind.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
"src/gdb-jit.h",
"src/global-handles.cc",
"src/global-handles.h",
- "src/globals.h",
"src/handler-table.cc",
"src/handler-table.h",
"src/handles-inl.h",
"src/handles.cc",
"src/handles.h",
+ "src/hash-seed-inl.h",
"src/heap-symbols.h",
"src/heap/array-buffer-collector.cc",
"src/heap/array-buffer-collector.h",
@@ -2025,6 +2120,8 @@ v8_source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/read-only-heap.cc",
+ "src/heap/read-only-heap.h",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
@@ -2163,6 +2260,8 @@ v8_source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
+ "src/objects/allocation-site-inl.h",
+ "src/objects/allocation-site.h",
"src/objects/api-callbacks-inl.h",
"src/objects/api-callbacks.h",
"src/objects/arguments-inl.h",
@@ -2173,9 +2272,11 @@ v8_source_set("v8_base") {
"src/objects/cell-inl.h",
"src/objects/cell.h",
"src/objects/code-inl.h",
+ "src/objects/code.cc",
"src/objects/code.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
+ "src/objects/data-handler.h",
"src/objects/debug-objects-inl.h",
"src/objects/debug-objects.cc",
"src/objects/debug-objects.h",
@@ -2232,6 +2333,7 @@ v8_source_set("v8_base") {
"src/objects/js-number-format.cc",
"src/objects/js-number-format.h",
"src/objects/js-objects-inl.h",
+ "src/objects/js-objects.cc",
"src/objects/js-objects.h",
"src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
@@ -2261,6 +2363,7 @@ v8_source_set("v8_base") {
"src/objects/managed.cc",
"src/objects/managed.h",
"src/objects/map-inl.h",
+ "src/objects/map.cc",
"src/objects/map.h",
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
@@ -2299,13 +2402,18 @@ v8_source_set("v8_base") {
"src/objects/slots-inl.h",
"src/objects/slots.h",
"src/objects/stack-frame-info-inl.h",
+ "src/objects/stack-frame-info.cc",
"src/objects/stack-frame-info.h",
+ "src/objects/string-comparator.cc",
+ "src/objects/string-comparator.h",
"src/objects/string-inl.h",
"src/objects/string-table-inl.h",
"src/objects/string-table.h",
+ "src/objects/string.cc",
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
+ "src/objects/template-objects-inl.h",
"src/objects/template-objects.cc",
"src/objects/template-objects.h",
"src/objects/templates-inl.h",
@@ -2326,7 +2434,6 @@ v8_source_set("v8_base") {
"src/parsing/parser.h",
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
- "src/parsing/pattern-rewriter.cc",
"src/parsing/preparse-data-impl.h",
"src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
@@ -2521,6 +2628,8 @@ v8_source_set("v8_base") {
"src/third_party/utf8-decoder/utf8-decoder.h",
"src/thread-id.cc",
"src/thread-id.h",
+ "src/thread-local-top.cc",
+ "src/thread-local-top.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
@@ -2650,6 +2759,7 @@ v8_source_set("v8_base") {
"src/zone/zone-list-inl.h",
"src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
+ "src/zone/zone-splay-tree.h",
"src/zone/zone.cc",
"src/zone/zone.h",
]
@@ -2661,20 +2771,18 @@ v8_source_set("v8_base") {
sources += check_header_includes_sources
}
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
- "src/profiler/heap-snapshot-generator.cc", # Macro clash in mman-linux.h
+ jumbo_excluded_sources = [
+ # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
+ "src/profiler/heap-snapshot-generator.cc", # Macro clash in mman-linux.h
- # These source files take an unusually large amount of time to
- # compile. Build them separately to avoid bottlenecks.
- "src/api.cc",
- "src/elements.cc",
- "src/heap/heap.cc",
- "src/objects.cc",
- "src/parsing/parser.cc",
- ]
- }
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
+ "src/api.cc",
+ "src/elements.cc",
+ "src/heap/heap.cc",
+ "src/objects.cc",
+ "src/parsing/parser.cc",
+ ]
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
@@ -2817,13 +2925,11 @@ v8_source_set("v8_base") {
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
- if (use_jumbo_build) {
- jumbo_excluded_sources += [
- # TODO(mostynb@vewd.com): fix this code so it doesn't need
- # to be excluded, see the comments inside.
- "src/arm64/instructions-arm64-constants.cc",
- ]
- }
+ jumbo_excluded_sources += [
+ # TODO(mostynb@vewd.com): fix this code so it doesn't need
+ # to be excluded, see the comments inside.
+ "src/arm64/instructions-arm64-constants.cc",
+ ]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/compiler/backend/mips/code-generator-mips.cc",
@@ -2938,18 +3044,21 @@ v8_source_set("v8_base") {
defines = []
deps = [
- ":generate_bytecode_builtins_list",
- ":run_torque",
":v8_headers",
":v8_libbase",
":v8_libsampler",
+ ":v8_shared_internal_headers",
+ ":v8_version",
"src/inspector:inspector",
]
+ public_deps = [
+ ":generate_bytecode_builtins_list",
+ ":run_torque",
+ ]
+
if (v8_enable_i18n_support) {
- public_deps = [
- "//third_party/icu",
- ]
+ public_deps += [ "//third_party/icu" ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
@@ -3033,8 +3142,12 @@ v8_source_set("torque_base") {
"src/torque/implementation-visitor.h",
"src/torque/instructions.cc",
"src/torque/instructions.h",
+ "src/torque/server-data.cc",
+ "src/torque/server-data.h",
"src/torque/source-positions.cc",
"src/torque/source-positions.h",
+ "src/torque/torque-compiler.cc",
+ "src/torque/torque-compiler.h",
"src/torque/torque-parser.cc",
"src/torque/torque-parser.h",
"src/torque/type-oracle.cc",
@@ -3047,6 +3160,31 @@ v8_source_set("torque_base") {
deps = [
":v8_libbase",
+ ":v8_shared_internal_headers",
+ ]
+
+ configs = [ ":internal_config" ]
+ if (is_win && is_asan) {
+ remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ }
+}
+
+v8_source_set("torque_ls_base") {
+ sources = [
+ "src/torque/ls/globals.h",
+ "src/torque/ls/json-parser.cc",
+ "src/torque/ls/json-parser.h",
+ "src/torque/ls/json.cc",
+ "src/torque/ls/json.h",
+ "src/torque/ls/message-handler.cc",
+ "src/torque/ls/message-handler.h",
+ "src/torque/ls/message-macros.h",
+ "src/torque/ls/message-pipe.h",
+ "src/torque/ls/message.h",
+ ]
+
+ deps = [
+ ":torque_base",
]
configs = [ ":internal_config" ]
@@ -3135,6 +3273,10 @@ v8_component("v8_libbase") {
public_configs = [ ":libbase_config" ]
+ deps = [
+ ":v8_headers",
+ ]
+
public_deps = []
data = []
@@ -3282,6 +3424,7 @@ v8_component("v8_libplatform") {
public_configs = [ ":libplatform_config" ]
deps = [
+ ":v8_headers",
":v8_libbase",
]
}
@@ -3292,7 +3435,7 @@ v8_source_set("v8_libsampler") {
"src/libsampler/sampler.h",
]
- configs = [ ":internal_config_base" ]
+ configs = [ ":internal_config" ]
public_configs = [ ":libsampler_config" ]
@@ -3422,6 +3565,25 @@ if (current_toolchain == v8_snapshot_toolchain) {
}
}
+v8_executable("torque-language-server") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "src/torque/ls/torque-language-server.cc",
+ ]
+
+ deps = [
+ ":torque_base",
+ ":torque_ls_base",
+ "//build/win:default_exe_manifest",
+ ]
+
+ configs = [ ":internal_config" ]
+ if (is_win && is_asan) {
+ remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ }
+}
+
###############################################################################
# Public targets
#
@@ -3542,7 +3704,9 @@ if (is_component_build) {
public_deps = [
":torque_base",
+ ":torque_ls_base",
":v8_base",
+ ":v8_headers",
":v8_maybe_snapshot",
]
@@ -3569,6 +3733,7 @@ if (is_component_build) {
public_deps = [
":torque_base",
+ ":torque_ls_base",
":v8_base",
":v8_maybe_snapshot",
]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 62b3ace776..23725637e6 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1458 @@
+2019-03-05: Version 7.4.288
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.287
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.286
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-05: Version 7.4.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.280
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.279
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.278
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.277
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.276
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.275
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.274
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.273
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.272
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-04: Version 7.4.271
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-03: Version 7.4.270
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-02: Version 7.4.269
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.268
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.267
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.266
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.265
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.264
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.263
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.262
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.261
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.260
+
+ Performance and stability improvements on all platforms.
+
+
+2019-03-01: Version 7.4.259
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.258
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.257
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.256
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.255
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.254
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.253
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.252
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.251
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.250
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.249
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.248
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.247
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-28: Version 7.4.246
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.245
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.244
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.243
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.242
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.241
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.240
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.239
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.238
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.237
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.236
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.235
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.234
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.233
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.232
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-27: Version 7.4.231
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.230
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.229
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.228
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.227
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.226
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.225
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.224
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.223
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-26: Version 7.4.222
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.221
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.220
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.219
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.218
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.217
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.216
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.215
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.214
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.213
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.212
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-25: Version 7.4.211
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-23: Version 7.4.210
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-23: Version 7.4.209
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.208
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.207
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.206
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.205
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.204
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.203
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.202
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.201
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.200
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.199
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-22: Version 7.4.198
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-21: Version 7.4.197
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.196
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.195
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.194
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.193
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.192
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.191
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.190
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.189
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.188
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-18: Version 7.4.187
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.186
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.185
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.184
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.183
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.182
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.181
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.180
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.179
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.178
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.177
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-15: Version 7.4.176
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.175
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.174
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.173
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.172
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.171
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.170
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.169
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.168
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-14: Version 7.4.167
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.166
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.165
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.164
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.163
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.162
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.161
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.160
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.159
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.158
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.157
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.156
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.155
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-13: Version 7.4.154
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.153
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.152
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.151
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.150
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.149
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.148
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.147
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.146
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.145
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.144
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.143
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.142
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-12: Version 7.4.141
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.140
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.139
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.138
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.137
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.136
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.135
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.134
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.133
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.132
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-11: Version 7.4.131
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.130
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.129
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.128
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.127
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.126
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.125
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-09: Version 7.4.124
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.123
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.122
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.121
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.120
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.119
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.118
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.117
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.116
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.115
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.114
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.113
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.112
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-08: Version 7.4.111
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.110
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.109
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.108
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.107
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.106
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.105
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.104
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.103
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.102
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-07: Version 7.4.101
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.100
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.99
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.98
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.97
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.96
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.95
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.94
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.93
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.92
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.91
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.90
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.89
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.88
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.87
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-06: Version 7.4.86
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.85
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.84
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.83
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.82
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.81
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.80
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.79
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-05: Version 7.4.78
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.77
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.76
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.75
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.74
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.73
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.72
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.71
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.70
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.69
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.68
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.67
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.66
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.65
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.64
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.63
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.62
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.61
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-04: Version 7.4.60
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-03: Version 7.4.59
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-03: Version 7.4.58
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-02: Version 7.4.57
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-02: Version 7.4.56
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.55
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.54
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.53
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.52
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.51
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.50
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.49
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.48
+
+ Performance and stability improvements on all platforms.
+
+
+2019-02-01: Version 7.4.47
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-31: Version 7.4.46
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-31: Version 7.4.45
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-31: Version 7.4.44
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-31: Version 7.4.43
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-31: Version 7.4.42
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.41
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.40
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.39
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.38
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.37
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.36
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.35
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.34
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.33
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.32
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-30: Version 7.4.31
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.30
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.29
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.28
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.27
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.26
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.25
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.24
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.23
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.22
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.21
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.20
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.19
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.18
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.17
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.16
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.15
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.14
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-29: Version 7.4.13
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.12
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.11
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.10
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.9
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.8
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.7
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-28: Version 7.4.6
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-27: Version 7.4.5
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-27: Version 7.4.4
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-26: Version 7.4.3
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-26: Version 7.4.2
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-26: Version 7.4.1
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.495
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.494
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.493
+
+ Performance and stability improvements on all platforms.
+
+
2019-01-23: Version 7.3.492
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index ec6045a90a..1837bd96bc 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -16,27 +16,35 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'f2ca77c3aa839107f36fed20dac81fe8b71b060e',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '80892bfe019dc854c6acdbfbb7304cca63986d4f',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f7971436824dd8eeb9b0cf19dabc3e32b369a904',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'cf661acb705fccc302901a1f8a251ad43ce2dd62',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '07e7295d964399ee7bee16a3ac7ca5a053b2cf0a',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '8c67416ccb4da42d817e7081ff83a2193b1aabe7',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2f02e1f363b1af2715536f38e239853f04ec1497',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '3e50219fc4503f461b2176a9976891b28d80f9ab',
+ 'v8/buildtools/clang_format/script':
+ Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
+ 'v8/buildtools/third_party/libc++/trunk':
+ Var('chromium_url') + '/chromium/llvm-project/libcxx.git' + '@' + '22d3f6dd25e5efc59124ba1c00b8f98b14be4201',
+ 'v8/buildtools/third_party/libc++abi/trunk':
+ Var('chromium_url') + '/chromium/llvm-project/libcxxabi.git' + '@' + '0d529660e32d77d9111912d73f2c74fc5fa2a858',
+ 'v8/buildtools/third_party/libunwind/trunk':
+ Var('chromium_url') + '/external/llvm.org/libunwind.git' + '@' + '69d9b84cca8354117b9fe9705a4430d789ee599b',
'v8/base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'e31a1706337ccb9a658b37d29a018c81695c6518',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '936ba8a963284a6b3737cf2f0474a7131073abee',
'v8/third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'e958d6ea74442d4e0849bb8a018d215a0e78981d',
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + '347a7c8078a009e98995985b7ab6ec6b35696dea',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'dd2de388fc4e3e8fa97a97515ec35c5b3834b753',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'ccc29087522abefc852d1294595ae6db7e86d649',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -44,23 +52,23 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5157be49c92d031a74192ee993f32a2a28c8b1c3',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '8e8db13b538ecb251e5ce9d5c781fc142f9752fd',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '9518a57428ae0a7ed450c1361768e84a2a38af5a',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'efecb0bfa687cf87836494f5d62868485c00fb66',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'd50a88f50782ba29076061b94c7b9d08a6c7e424',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '7a61cf37d6a0163f0ec02d495289a1d038e62457',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '4f1155c566a222238fd86f179c6635ecb4c289bb',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '565d7d5b7dd808d9267006b83ac4ea9c48f782cc',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/third_party/qemu-linux-x64': {
@@ -84,7 +92,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3a16568a56486d7d032b8ec7b8dae892413a9a7a',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '257c91cc44b07bd06ff03dde809ccbc46a22bec6',
'v8/tools/luci-go': {
'packages': [
{
@@ -104,11 +112,10 @@ deps = {
'dep_type': 'cipd',
},
'v8/test/wasm-js/data':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'b42efa9b07c5544079c31f6088a66bead617559c',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '44dfa15cb87b1e9bef36e033ad5d2cdc4c2008fd',
}
recursedeps = [
- 'v8/buildtools',
'v8/third_party/android_tools',
]
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index f9ef03f5ba..6837eb95d0 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -984,8 +984,6 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
// Macro to specify that two trace IDs are identical. For example,
// TRACE_LINK_IDS(
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 1ed8e0382a..2a691dfa60 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -53,6 +53,9 @@ declare_args() {
# Enable monolithic static library for embedders.
v8_monolithic = false
+
+ # Expose symbols for dynamic linking.
+ v8_expose_symbols = false
}
if (v8_use_external_startup_data == "") {
@@ -78,7 +81,7 @@ if (v8_enable_backtrace == "") {
# subdirectories.
v8_path_prefix = get_path_info("../", "abspath")
-v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.json"
+v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.pdl"
###############################################################################
# Templates
@@ -91,20 +94,8 @@ v8_add_configs = [
v8_path_prefix + ":toolchain",
]
-if (is_debug && !v8_optimized_debug) {
- v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
- v8_add_configs += [ "//build/config/compiler:no_optimize" ]
-} else {
- v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
-
- # TODO(crbug.com/621335) Rework this so that we don't have the confusion
- # between "optimize_speed" and "optimize_max".
- if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) {
- v8_add_configs += [ "//build/config/compiler:optimize_speed" ]
- } else {
- v8_add_configs += [ "//build/config/compiler:optimize_max" ]
- }
-}
+v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
+v8_add_configs += [ v8_path_prefix + ":default_optimization" ]
if (v8_code_coverage && !is_clang) {
v8_add_configs += [
@@ -113,7 +104,8 @@ if (v8_code_coverage && !is_clang) {
]
}
-if ((is_posix || is_fuchsia) && (v8_enable_backtrace || v8_monolithic)) {
+if ((is_posix || is_fuchsia) &&
+ (v8_enable_backtrace || v8_monolithic || v8_expose_symbols)) {
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
@@ -132,15 +124,11 @@ if (!build_with_chromium && is_clang) {
template("v8_source_set") {
if (defined(invoker.split_count) && invoker.split_count > 1 &&
defined(v8_static_library) && v8_static_library && is_win) {
- link_target_type = "split_static_library"
+ link_target_type = "jumbo_split_static_library"
} else if (defined(v8_static_library) && v8_static_library) {
- link_target_type = "static_library"
+ link_target_type = "jumbo_static_library"
} else {
- if (use_jumbo_build) {
- link_target_type = "jumbo_source_set"
- } else {
- link_target_type = "source_set"
- }
+ link_target_type = "jumbo_source_set"
}
target(link_target_type, target_name) {
forward_variables_from(invoker,
@@ -149,21 +137,21 @@ template("v8_source_set") {
"configs",
"remove_configs",
])
+ configs -= v8_remove_configs
+ configs += v8_add_configs
if (defined(invoker.remove_configs)) {
configs -= invoker.remove_configs
}
configs += invoker.configs
- configs -= v8_remove_configs
- configs += v8_add_configs
}
}
template("v8_header_set") {
jumbo_source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
- configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
+ configs += invoker.configs
}
}
@@ -175,12 +163,12 @@ template("v8_executable") {
"configs",
"remove_configs",
])
+ configs -= v8_remove_configs
+ configs += v8_add_configs
if (defined(invoker.remove_configs)) {
configs -= invoker.remove_configs
}
configs += invoker.configs
- configs -= v8_remove_configs
- configs += v8_add_configs
if (is_linux) {
# For enabling ASLR.
ldflags = [ "-pie" ]
@@ -203,9 +191,9 @@ template("v8_executable") {
template("v8_component") {
component(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
- configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
+ configs += invoker.configs
}
}
@@ -213,9 +201,9 @@ template("v8_static_library") {
static_library(target_name) {
complete_static_lib = true
forward_variables_from(invoker, "*", [ "configs" ])
- configs += invoker.configs
configs -= v8_remove_configs
configs -= [ "//build/config/compiler:thin_archive" ]
configs += v8_add_configs
+ configs += invoker.configs
}
}
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 13c0db9a85..6908aeaa88 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -41,15 +41,6 @@ V8_PLATFORM_EXPORT std::unique_ptr<v8::Platform> NewDefaultPlatform(
InProcessStackDumping::kDisabled,
std::unique_ptr<v8::TracingController> tracing_controller = {});
-V8_PLATFORM_EXPORT V8_DEPRECATED(
- "Use NewDefaultPlatform instead",
- v8::Platform* CreateDefaultPlatform(
- int thread_pool_size = 0,
- IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
- InProcessStackDumping in_process_stack_dumping =
- InProcessStackDumping::kDisabled,
- v8::TracingController* tracing_controller = nullptr));
-
/**
* Pumps the message loop for the given isolate.
*
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 250d5fbdb9..bc249cb9ec 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_V8_TRACING_H_
#define V8_LIBPLATFORM_V8_TRACING_H_
+#include <atomic>
#include <fstream>
#include <memory>
#include <unordered_set>
@@ -221,12 +222,10 @@ class V8_PLATFORM_EXPORT TraceConfig {
class V8_PLATFORM_EXPORT TracingController
: public V8_PLATFORM_NON_EXPORTED_BASE(v8::TracingController) {
public:
- enum Mode { DISABLED = 0, RECORDING_MODE };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
+ // The pointer returned from GetCategoryGroupEnabled() points to a value with
+ // zero or more of the following bits. Used in this class only. The
+ // TRACE_EVENT macros should only use the value as a bool. These values must
+ // be in sync with macro values in TraceEvent.h in Blink.
enum CategoryGroupEnabledFlags {
// Category group enabled for the recording mode.
ENABLED_FOR_RECORDING = 1 << 0,
@@ -273,7 +272,6 @@ class V8_PLATFORM_EXPORT TracingController
virtual int64_t CurrentCpuTimestampMicroseconds();
private:
- const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
void UpdateCategoryGroupEnabledFlag(size_t category_index);
void UpdateCategoryGroupEnabledFlags();
@@ -281,7 +279,7 @@ class V8_PLATFORM_EXPORT TracingController
std::unique_ptr<TraceConfig> trace_config_;
std::unique_ptr<base::Mutex> mutex_;
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
- Mode mode_ = DISABLED;
+ std::atomic_bool recording_{false};
// Disallow copy and assign
TracingController(const TracingController&) = delete;
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 7f9c27ebb9..bb69bb915d 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -31,7 +31,7 @@ static const Address kNullAddress = 0;
const int kApiSystemPointerSize = sizeof(void*);
const int kApiTaggedSize = kApiSystemPointerSize;
const int kApiDoubleSize = sizeof(double);
-const int kApiIntSize = sizeof(int);
+const int kApiInt32Size = sizeof(int32_t);
const int kApiInt64Size = sizeof(int64_t);
// Tag information for HeapObject.
@@ -88,16 +88,16 @@ struct SmiTagging<8> {
}
};
-#if defined(V8_COMPRESS_POINTERS)
+#ifdef V8_COMPRESS_POINTERS
static_assert(
kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
#endif
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
-typedef SmiTagging<kApiIntSize> PlatformSmiTagging;
+#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
+typedef SmiTagging<kApiInt32Size> PlatformSmiTagging;
#else
-typedef SmiTagging<kApiSystemPointerSize> PlatformSmiTagging;
+typedef SmiTagging<kApiTaggedSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
@@ -122,15 +122,13 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiIntSize;
- static const int kStringResourceOffset = 1 * kApiTaggedSize + 2 * kApiIntSize;
+ static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size;
+ static const int kStringResourceOffset =
+ 1 * kApiTaggedSize + 2 * kApiInt32Size;
static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
static const int kForeignAddressOffset = kApiTaggedSize;
static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
- static const int kJSObjectHeaderSizeForEmbedderFields =
- (kJSObjectHeaderSize + kApiSystemPointerSize - 1) &
- -kApiSystemPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize =
@@ -148,7 +146,7 @@ class Internals {
static const int kIsolateEmbedderDataOffset = 0;
static const int kExternalMemoryOffset =
- kNumIsolateDataSlots * kApiTaggedSize;
+ kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
@@ -163,8 +161,8 @@ class Internals {
static const int kFalseValueRootIndex = 8;
static const int kEmptyStringRootIndex = 9;
- static const int kNodeClassIdOffset = 1 * kApiTaggedSize;
- static const int kNodeFlagsOffset = 1 * kApiTaggedSize + 3;
+ static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
+ static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
@@ -172,9 +170,9 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
- static const int kFirstNonstringType = 0x80;
- static const int kOddballType = 0x83;
- static const int kForeignType = 0x87;
+ static const int kFirstNonstringType = 0x40;
+ static const int kOddballType = 0x43;
+ static const int kForeignType = 0x47;
static const int kJSSpecialApiObjectType = 0x410;
static const int kJSApiObjectType = 0x420;
static const int kJSObjectType = 0x421;
@@ -182,6 +180,12 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
+ // Constants used by PropertyCallbackInfo to check if we should throw when an
+ // error occurs.
+ static const int kThrowOnError = 0;
+ static const int kDontThrow = 1;
+ static const int kInferShouldThrowMode = 2;
+
// Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
@@ -367,6 +371,11 @@ V8_INLINE void PerformCastCheck(T* data) {
// that's guaranteed to never be in ReadOnlySpace.
V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
+// Returns if we need to throw when an error occurs. This infers the language
+// mode based on the current context and the closure. This returns true if the
+// language mode is strict.
+V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index fc008979f6..556407d876 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -71,6 +71,17 @@ class TaskRunner {
double delay_in_seconds) = 0;
/**
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
+ * implementation takes ownership of |task|. The |task| cannot be nested
+ * within other task executions.
+ *
+ * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
+ */
+ virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) {}
+
+ /**
* Schedules an idle task to be invoked by this TaskRunner. The task is
* scheduled when the embedder is idle. Requires that
* |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
@@ -90,6 +101,11 @@ class TaskRunner {
*/
virtual bool NonNestableTasksEnabled() const { return false; }
+ /**
+ * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
+ */
+ virtual bool NonNestableDelayedTasksEnabled() const { return false; }
+
TaskRunner() = default;
virtual ~TaskRunner() = default;
@@ -430,7 +446,7 @@ class Platform {
* since epoch. Useful for implementing |CurrentClockTimeMillis| if
* nothing special needed.
*/
- static double SystemClockTimeMillis();
+ V8_EXPORT static double SystemClockTimeMillis();
};
} // namespace v8
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 94d3fcfcf6..3adce79be5 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -746,33 +746,6 @@ class V8_EXPORT HeapProfiler {
kSamplingForceGC = 1 << 0,
};
- typedef std::unordered_set<const v8::PersistentBase<v8::Value>*>
- RetainerChildren;
- typedef std::vector<std::pair<v8::RetainedObjectInfo*, RetainerChildren>>
- RetainerGroups;
- typedef std::vector<std::pair<const v8::PersistentBase<v8::Value>*,
- const v8::PersistentBase<v8::Value>*>>
- RetainerEdges;
-
- struct RetainerInfos {
- RetainerGroups groups;
- RetainerEdges edges;
- };
-
- /**
- * Callback function invoked to retrieve all RetainerInfos from the embedder.
- */
- typedef RetainerInfos (*GetRetainerInfosCallback)(v8::Isolate* isolate);
-
- /**
- * Callback function invoked for obtaining RetainedObjectInfo for
- * the given JavaScript wrapper object. It is prohibited to enter V8
- * while the callback is running: only getters on the handle and
- * GetPointerFromInternalField on the objects are allowed.
- */
- typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
- Local<Value> wrapper);
-
/**
* Callback function invoked during heap snapshot generation to retrieve
* the embedder object graph. The callback should use graph->AddEdge(..) to
@@ -925,16 +898,6 @@ class V8_EXPORT HeapProfiler {
*/
void DeleteAllHeapSnapshots();
- /** Binds a callback to embedder's class ID. */
- V8_DEPRECATED(
- "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
- void SetWrapperClassInfoProvider(uint16_t class_id,
- WrapperInfoCallback callback));
-
- V8_DEPRECATED(
- "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
- void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback));
-
V8_DEPRECATED(
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
void SetBuildEmbedderGraphCallback(
@@ -959,80 +922,6 @@ class V8_EXPORT HeapProfiler {
};
/**
- * Interface for providing information about embedder's objects
- * held by global handles. This information is reported in two ways:
- *
- * 1. When calling AddObjectGroup, an embedder may pass
- * RetainedObjectInfo instance describing the group. To collect
- * this information while taking a heap snapshot, V8 calls GC
- * prologue and epilogue callbacks.
- *
- * 2. When a heap snapshot is collected, V8 additionally
- * requests RetainedObjectInfos for persistent handles that
- * were not previously reported via AddObjectGroup.
- *
- * Thus, if an embedder wants to provide information about native
- * objects for heap snapshots, it can do it in a GC prologue
- * handler, and / or by assigning wrapper class ids in the following way:
- *
- * 1. Bind a callback to class id by calling SetWrapperClassInfoProvider.
- * 2. Call SetWrapperClassId on certain persistent handles.
- *
- * V8 takes ownership of RetainedObjectInfo instances passed to it and
- * keeps them alive only during snapshot collection. Afterwards, they
- * are freed by calling the Dispose class function.
- */
-class V8_EXPORT RetainedObjectInfo { // NOLINT
- public:
- /** Called by V8 when it no longer needs an instance. */
- virtual void Dispose() = 0;
-
- /** Returns whether two instances are equivalent. */
- virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
-
- /**
- * Returns hash value for the instance. Equivalent instances
- * must have the same hash value.
- */
- virtual intptr_t GetHash() = 0;
-
- /**
- * Returns human-readable label. It must be a null-terminated UTF-8
- * encoded string. V8 copies its contents during a call to GetLabel.
- */
- virtual const char* GetLabel() = 0;
-
- /**
- * Returns human-readable group label. It must be a null-terminated UTF-8
- * encoded string. V8 copies its contents during a call to GetGroupLabel.
- * Heap snapshot generator will collect all the group names, create
- * top level entries with these names and attach the objects to the
- * corresponding top level group objects. There is a default
- * implementation which is required because embedders don't have their
- * own implementation yet.
- */
- virtual const char* GetGroupLabel() { return GetLabel(); }
-
- /**
- * Returns element count in case if a global handle retains
- * a subgraph by holding one of its nodes.
- */
- virtual intptr_t GetElementCount() { return -1; }
-
- /** Returns embedder's object size in bytes. */
- virtual intptr_t GetSizeInBytes() { return -1; }
-
- protected:
- RetainedObjectInfo() = default;
- virtual ~RetainedObjectInfo() = default;
-
- private:
- RetainedObjectInfo(const RetainedObjectInfo&);
- RetainedObjectInfo& operator=(const RetainedObjectInfo&);
-};
-
-
-/**
* A struct for exporting HeapStats data from V8, using "push" model.
* See HeapProfiler::GetHeapStats.
*/
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 7f12ead16b..466b99fd6b 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -198,12 +198,9 @@ class PersistentValueMapBase {
* Call V8::RegisterExternallyReferencedObject with the map value for given
* key.
*/
- void RegisterExternallyReferencedObject(K& key) {
- assert(Contains(key));
- V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Address*>(FromVal(Traits::Get(&impl_, key))),
- reinterpret_cast<internal::Isolate*>(GetIsolate()));
- }
+ V8_DEPRECATE_SOON(
+ "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference",
+ inline void RegisterExternallyReferencedObject(K& key));
/**
* Return value for key and remove it from the map.
@@ -355,6 +352,15 @@ class PersistentValueMapBase {
const char* label_;
};
+template <typename K, typename V, typename Traits>
+inline void
+PersistentValueMapBase<K, V, Traits>::RegisterExternallyReferencedObject(
+ K& key) {
+ assert(Contains(key));
+ V8::RegisterExternallyReferencedObject(
+ reinterpret_cast<internal::Address*>(FromVal(Traits::Get(&impl_, key))),
+ reinterpret_cast<internal::Isolate*>(GetIsolate()));
+}
template <typename K, typename V, typename Traits>
class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index abf640228f..402da028c3 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 3
-#define V8_BUILD_NUMBER 492
-#define V8_PATCH_LEVEL 25
+#define V8_MINOR_VERSION 4
+#define V8_BUILD_NUMBER 288
+#define V8_PATCH_LEVEL 13
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index c5d9fc3a97..6ecc48af33 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -54,6 +54,7 @@ class Integer;
class Isolate;
template <class T>
class Maybe;
+class MicrotaskQueue;
class Name;
class Number;
class NumberObject;
@@ -92,6 +93,8 @@ template <class T, class M = NonCopyablePersistentTraits<T> >
class Persistent;
template <class T>
class Global;
+template <class T>
+class TracedGlobal;
template<class K, class V, class T> class PersistentValueMap;
template <class K, class V, class T>
class PersistentValueMapBase;
@@ -117,6 +120,7 @@ class Heap;
class HeapObject;
class Isolate;
class LocalEmbedderHeapTracer;
+class MicrotaskQueue;
class NeverReadOnlySpaceObject;
struct ScriptStreamingData;
template<typename T> class CustomArguments;
@@ -275,6 +279,7 @@ class Local {
V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that);
V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that);
+ V8_INLINE static Local<T> New(Isolate* isolate, const TracedGlobal<T>& that);
private:
friend class Utils;
@@ -303,6 +308,8 @@ class Local {
template<class F1, class F2> friend class PersistentValueVector;
template <class F>
friend class ReturnValue;
+ template <class F>
+ friend class TracedGlobal;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
@@ -542,7 +549,9 @@ template <class T> class PersistentBase {
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
*/
- V8_INLINE void RegisterExternalReference(Isolate* isolate) const;
+ V8_DEPRECATE_SOON(
+ "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference",
+ V8_INLINE void RegisterExternalReference(Isolate* isolate) const);
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -550,9 +559,10 @@ template <class T> class PersistentBase {
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
- V8_DEPRECATE_SOON(
- "Objects are always considered independent. "
- "Use MarkActive to avoid collecting otherwise dead weak handles.",
+ V8_DEPRECATED(
+ "Weak objects are always considered independent. "
+ "Use TracedGlobal when trying to use EmbedderHeapTracer. "
+ "Use a strong handle when trying to keep an object alive.",
V8_INLINE void MarkIndependent());
/**
@@ -562,22 +572,19 @@ template <class T> class PersistentBase {
*
* This bit is cleared after the each garbage collection pass.
*/
- V8_INLINE void MarkActive();
+ V8_DEPRECATE_SOON("Use TracedGlobal.", V8_INLINE void MarkActive());
- V8_DEPRECATE_SOON("See MarkIndependent.",
- V8_INLINE bool IsIndependent() const);
+ V8_DEPRECATED("See MarkIndependent.", V8_INLINE bool IsIndependent() const);
/** Checks if the handle holds the only reference to an object. */
- V8_DEPRECATE_SOON(
- "Garbage collection internal state should not be relied on.",
- V8_INLINE bool IsNearDeath() const);
+ V8_DEPRECATED("Garbage collection internal state should not be relied on.",
+ V8_INLINE bool IsNearDeath() const);
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
/**
- * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
- * description in v8-profiler.h for details.
+ * Assigns a wrapper class ID to the handle.
*/
V8_INLINE void SetWrapperClassId(uint16_t class_id);
@@ -758,6 +765,7 @@ class Global : public PersistentBase<T> {
* A Global with no storage cell.
*/
V8_INLINE Global() : PersistentBase<T>(nullptr) {}
+
/**
* Construct a Global from a Local.
* When the Local is non-empty, a new storage cell is created
@@ -768,6 +776,7 @@ class Global : public PersistentBase<T> {
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
TYPE_CHECK(T, S);
}
+
/**
* Construct a Global from a PersistentBase.
* When the Persistent is non-empty, a new storage cell is created
@@ -778,26 +787,20 @@ class Global : public PersistentBase<T> {
: PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
TYPE_CHECK(T, S);
}
+
/**
* Move constructor.
*/
- V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) {
- other.val_ = nullptr;
- }
+ V8_INLINE Global(Global&& other);
+
V8_INLINE ~Global() { this->Reset(); }
+
/**
* Move via assignment.
*/
template <class S>
- V8_INLINE Global& operator=(Global<S>&& rhs) { // NOLINT
- TYPE_CHECK(T, S);
- if (this != &rhs) {
- this->Reset();
- this->val_ = rhs.val_;
- rhs.val_ = nullptr;
- }
- return *this;
- }
+ V8_INLINE Global& operator=(Global<S>&& rhs);
+
/**
* Pass allows returning uniques from functions, etc.
*/
@@ -822,6 +825,151 @@ class Global : public PersistentBase<T> {
template <class T>
using UniquePersistent = Global<T>;
+/**
+ * A traced handle with move semantics, similar to std::unique_ptr. The handle
+ * is to be used together with |v8::EmbedderHeapTracer| and specifies edges from
+ * the embedder into V8's heap.
+ *
+ * The exact semantics are:
+ * - Tracing garbage collections use |v8::EmbedderHeapTracer|.
+ * - Non-tracing garbage collections refer to
+ * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
+ * be treated as root or not.
+ */
+template <typename T>
+class V8_EXPORT TracedGlobal {
+ public:
+ /**
+ * An empty TracedGlobal without storage cell.
+ */
+ TracedGlobal() = default;
+ ~TracedGlobal() { Reset(); }
+
+ /**
+ * Construct a TracedGlobal from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
+ template <class S>
+ TracedGlobal(Isolate* isolate, Local<S> that)
+ : val_(New(isolate, *that, &val_)) {
+ TYPE_CHECK(T, S);
+ }
+
+ /**
+ * Move constructor initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal(TracedGlobal&& other);
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
+
+ /**
+ * TracedGlobal only supports move semantics and forbids copying.
+ */
+ TracedGlobal(const TracedGlobal&) = delete;
+ void operator=(const TracedGlobal&) = delete;
+
+ /**
+ * Returns true if this TracedGlobal is empty, i.e., has not been assigned an
+ * object.
+ */
+ bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
+ * true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ /**
+ * Construct a Local<T> from this handle.
+ */
+ Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+
+ template <class S>
+ V8_INLINE TracedGlobal<S>& As() const {
+ return reinterpret_cast<TracedGlobal<S>&>(
+ const_cast<TracedGlobal<T>&>(*this));
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const TracedGlobal<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const TracedGlobal<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ /**
+ * Adds a finalization callback to the handle. The type of this callback is
+ * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
+ * parameter and the first two internal fields of the object.
+ *
+ * The callback is then supposed to reset the handle in the callback. No
+ * further V8 API may be called in this callback. In case additional work
+ * involving V8 needs to be done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ V8_INLINE void SetFinalizationCallback(
+ void* parameter, WeakCallbackInfo<void>::Callback callback);
+
+ private:
+ V8_INLINE static T* New(Isolate* isolate, T* that, T** slot);
+
+ T* operator*() const { return this->val_; }
+
+ T* val_ = nullptr;
+
+ friend class EmbedderHeapTracer;
+ template <typename F>
+ friend class Local;
+ friend class Object;
+ template <typename F>
+ friend class ReturnValue;
+};
/**
* A stack-allocated class that governs a number of local handles.
@@ -1861,10 +2009,6 @@ class V8_EXPORT JSON {
/**
* Value serialization compatible with the HTML structured clone algorithm.
* The format is backward-compatible (i.e. safe to store to disk).
- *
- * WARNING: This API is under development, and changes (including incompatible
- * changes to the API or wire format) may occur without notice until this
- * warning is removed.
*/
class V8_EXPORT ValueSerializer {
public:
@@ -1985,10 +2129,6 @@ class V8_EXPORT ValueSerializer {
/**
* Deserializes values from data written with ValueSerializer, or a compatible
* implementation.
- *
- * WARNING: This API is under development, and changes (including incompatible
- * changes to the API or wire format) may occur without notice until this
- * warning is removed.
*/
class V8_EXPORT ValueDeserializer {
public:
@@ -2416,9 +2556,9 @@ class V8_EXPORT Value : public Data {
bool BooleanValue(Isolate* isolate) const;
- V8_DEPRECATE_SOON("BooleanValue can never throw. Use Isolate version.",
- V8_WARN_UNUSED_RESULT Maybe<bool> BooleanValue(
- Local<Context> context) const);
+ V8_DEPRECATED("BooleanValue can never throw. Use Isolate version.",
+ V8_WARN_UNUSED_RESULT Maybe<bool> BooleanValue(
+ Local<Context> context) const);
V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
Local<Context> context) const;
@@ -2615,10 +2755,6 @@ class V8_EXPORT String : public Name {
public:
virtual ~ExternalStringResourceBase() = default;
- V8_DEPRECATED("Use IsCacheable().", virtual bool IsCompressible() const) {
- return false;
- }
-
/**
* If a string is cacheable, the value returned by
* ExternalStringResource::data() may be cached, otherwise it is not
@@ -3381,7 +3517,6 @@ class V8_EXPORT Object : public Value {
* array returned by this method contains the same values as would
* be enumerated by a for-in statement over this object.
*/
- V8_DEPRECATED("Use maybe version", Local<Array> GetPropertyNames());
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
Local<Context> context);
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
@@ -3394,7 +3529,6 @@ class V8_EXPORT Object : public Value {
* the returned array doesn't contain the names of properties from
* prototype objects.
*/
- V8_DEPRECATED("Use maybe version", Local<Array> GetOwnPropertyNames());
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
Local<Context> context);
@@ -3450,12 +3584,17 @@ class V8_EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
- /** Same as above, but works for Persistents */
+ /** Same as above, but works for PersistentBase. */
V8_INLINE static int InternalFieldCount(
const PersistentBase<Object>& object) {
return object.val_->InternalFieldCount();
}
+ /** Same as above, but works for TracedGlobal. */
+ V8_INLINE static int InternalFieldCount(const TracedGlobal<Object>& object) {
+ return object.val_->InternalFieldCount();
+ }
+
/** Gets the value from an internal field. */
V8_INLINE Local<Value> GetInternalField(int index);
@@ -3469,12 +3608,18 @@ class V8_EXPORT Object : public Value {
*/
V8_INLINE void* GetAlignedPointerFromInternalField(int index);
- /** Same as above, but works for Persistents */
+ /** Same as above, but works for PersistentBase. */
V8_INLINE static void* GetAlignedPointerFromInternalField(
const PersistentBase<Object>& object, int index) {
return object.val_->GetAlignedPointerFromInternalField(index);
}
+ /** Same as above, but works for TracedGlobal. */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const TracedGlobal<Object>& object, int index) {
+ return object.val_->GetAlignedPointerFromInternalField(index);
+ }
+
/**
* Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
* a field, GetAlignedPointerFromInternalField must be used, everything else
@@ -3493,8 +3638,6 @@ class V8_EXPORT Object : public Value {
Local<Name> key);
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
uint32_t index);
- V8_DEPRECATED("Use maybe version",
- bool HasRealNamedProperty(Local<String> key));
/**
* Use HasRealNamedProperty() if you want to check if an object has an own
* property without causing side effects, i.e., without calling interceptors.
@@ -3510,12 +3653,8 @@ class V8_EXPORT Object : public Value {
*/
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
Local<Name> key);
- V8_DEPRECATED("Use maybe version",
- bool HasRealIndexedProperty(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
Local<Context> context, uint32_t index);
- V8_DEPRECATED("Use maybe version",
- bool HasRealNamedCallbackProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
Local<Context> context, Local<Name> key);
@@ -3761,6 +3900,8 @@ class ReturnValue {
template <typename S>
V8_INLINE void Set(const Global<S>& handle);
template <typename S>
+ V8_INLINE void Set(const TracedGlobal<S>& handle);
+ template <typename S>
V8_INLINE void Set(const Local<S> handle);
// Fast primitive setters
V8_INLINE void Set(bool value);
@@ -3984,11 +4125,6 @@ class V8_EXPORT Function : public Object {
Local<Value> data = Local<Value>(), int length = 0,
ConstructorBehavior behavior = ConstructorBehavior::kAllow,
SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
- static V8_DEPRECATED("Use maybe version",
- Local<Function> New(Isolate* isolate,
- FunctionCallback callback,
- Local<Value> data = Local<Value>(),
- int length = 0));
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
@@ -4007,9 +4143,6 @@ class V8_EXPORT Function : public Object {
Local<Context> context, int argc, Local<Value> argv[],
SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
- V8_DEPRECATED("Use maybe version",
- Local<Value> Call(Local<Value> recv, int argc,
- Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]);
@@ -4194,14 +4327,6 @@ class V8_EXPORT PropertyDescriptor {
// GenericDescriptor
PropertyDescriptor();
- // DataDescriptor (implicit / DEPRECATED)
- // Templatized such that the explicit constructor is chosen first.
- // TODO(clemensh): Remove after 7.3 branch.
- template <std::nullptr_t = nullptr>
- V8_DEPRECATED(
- "Use explicit constructor",
- PropertyDescriptor(Local<Value> value)); // NOLINT(runtime/explicit)
-
// DataDescriptor
explicit PropertyDescriptor(Local<Value> value);
@@ -4242,11 +4367,6 @@ class V8_EXPORT PropertyDescriptor {
PrivateData* private_;
};
-// TODO(clemensh): Remove after 7.3 branch.
-template <std::nullptr_t>
-PropertyDescriptor::PropertyDescriptor(Local<Value> value)
- : PropertyDescriptor(value) {}
-
/**
* An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
* 26.2.1).
@@ -4336,27 +4456,6 @@ class V8_EXPORT CompiledWasmModule {
// An instance of WebAssembly.Module.
class V8_EXPORT WasmModuleObject : public Object {
public:
- // TODO(clemensh): Remove after 7.3 branch.
- typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
-
- /**
- * A unowned reference to a byte buffer.
- * TODO(clemensh): Remove after 7.3 branch.
- */
- struct BufferReference {
- const uint8_t* start;
- size_t size;
- BufferReference(const uint8_t* start, size_t size)
- : start(start), size(size) {}
-
- // Implicit conversion to and from MemorySpan<const uint8_t>.
- BufferReference(MemorySpan<const uint8_t> span) // NOLINT(runtime/explicit)
- : start(span.data()), size(span.size()) {}
- operator MemorySpan<const uint8_t>() const {
- return MemorySpan<const uint8_t>{start, size};
- }
- };
-
/**
* An opaque, native heap object for transferring wasm modules. It
* supports move semantics, and does not support copy semantics.
@@ -4399,25 +4498,12 @@ class V8_EXPORT WasmModuleObject : public Object {
Isolate* isolate, const TransferrableModule&);
/**
- * Get the wasm-encoded bytes that were used to compile this module.
- */
- V8_DEPRECATED("Use CompiledWasmModule::GetWireBytesRef()",
- BufferReference GetWasmWireBytesRef());
-
- /**
* Get the compiled module for this module object. The compiled module can be
* shared by several module objects.
*/
CompiledWasmModule GetCompiledModule();
/**
- * Serialize the compiled module. The serialized data does not include the
- * uncompiled bytes.
- */
- V8_DEPRECATED("Use CompiledWasmModule::Serialize()",
- SerializedModule Serialize());
-
- /**
* If possible, deserialize the module, otherwise compile it from the provided
* uncompiled bytes.
*/
@@ -5079,7 +5165,8 @@ class V8_EXPORT SharedArrayBuffer : public Object {
allocation_length_(0),
allocation_mode_(Allocator::AllocationMode::kNormal),
deleter_(nullptr),
- deleter_data_(nullptr) {}
+ deleter_data_(nullptr),
+ is_growable_(false) {}
void* AllocationBase() const { return allocation_base_; }
size_t AllocationLength() const { return allocation_length_; }
@@ -5091,12 +5178,13 @@ class V8_EXPORT SharedArrayBuffer : public Object {
size_t ByteLength() const { return byte_length_; }
DeleterCallback Deleter() const { return deleter_; }
void* DeleterData() const { return deleter_data_; }
+ bool IsGrowable() const { return is_growable_; }
private:
Contents(void* data, size_t byte_length, void* allocation_base,
size_t allocation_length,
Allocator::AllocationMode allocation_mode, DeleterCallback deleter,
- void* deleter_data);
+ void* deleter_data, bool is_growable);
void* data_;
size_t byte_length_;
@@ -5105,6 +5193,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
Allocator::AllocationMode allocation_mode_;
DeleterCallback deleter_;
void* deleter_data_;
+ bool is_growable_;
friend class SharedArrayBuffer;
};
@@ -5133,6 +5222,14 @@ class V8_EXPORT SharedArrayBuffer : public Object {
ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
/**
+ * Create a new SharedArrayBuffer over an existing memory block. Propagate
+ * flags to indicate whether the underlying buffer can be grown.
+ */
+ static Local<SharedArrayBuffer> New(
+ Isolate* isolate, const SharedArrayBuffer::Contents&,
+ ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
+
+ /**
* Returns true if SharedArrayBuffer is externalized, that is, does not
* own its memory block.
*/
@@ -5193,6 +5290,21 @@ class V8_EXPORT Date : public Object {
V8_INLINE static Date* Cast(Value* obj);
/**
+ * Time zone redetection indicator for
+ * DateTimeConfigurationChangeNotification.
+ *
+ * kSkip indicates V8 that the notification should not trigger redetecting
+ * host time zone. kRedetect indicates V8 that host time zone should be
+ * redetected, and used to set the default time zone.
+ *
+ * The host time zone detection may require file system access or similar
+ * operations unlikely to be available inside a sandbox. If v8 is run inside a
+ * sandbox, the host time zone has to be detected outside the sandbox before
+ * calling DateTimeConfigurationChangeNotification function.
+ */
+ enum class TimeZoneDetection { kSkip, kRedetect };
+
+ /**
* Notification that the embedder has changed the time zone,
* daylight savings time, or other date / time configuration
* parameters. V8 keeps a cache of various values used for
@@ -5204,7 +5316,11 @@ class V8_EXPORT Date : public Object {
* This API should not be called more than needed as it will
* negatively impact the performance of date operations.
*/
- static void DateTimeConfigurationChangeNotification(Isolate* isolate);
+ V8_DEPRECATE_SOON(
+ "Use Isolate::DateTimeConfigurationChangeNotification",
+ static void DateTimeConfigurationChangeNotification(
+ Isolate* isolate,
+ TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip));
private:
static void CheckCast(Value* obj);
@@ -5829,7 +5945,6 @@ class V8_EXPORT FunctionTemplate : public Template {
SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
/** Returns the unique function instance in the current execution context.*/
- V8_DEPRECATED("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
Local<Context> context);
@@ -5904,7 +6019,8 @@ class V8_EXPORT FunctionTemplate : public Template {
* function template. By default, instances of a function template
* are not ignored.
*/
- void SetHiddenPrototype(bool value);
+ V8_DEPRECATED("This feature is incompatible with ES6+.",
+ void SetHiddenPrototype(bool value));
/**
* Sets the ReadOnly flag in the attributes of the 'prototype' property
@@ -6126,7 +6242,6 @@ class V8_EXPORT ObjectTemplate : public Template {
size_t index);
/** Creates a new instance of this template.*/
- V8_DEPRECATED("Use maybe version", Local<Object> NewInstance());
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
/**
@@ -6346,20 +6461,6 @@ class V8_EXPORT AccessorSignature : public Data {
// --- Extensions ---
-V8_DEPRECATED("Implementation detail", class)
-V8_EXPORT ExternalOneByteStringResourceImpl
- : public String::ExternalOneByteStringResource {
- public:
- ExternalOneByteStringResourceImpl() : data_(nullptr), length_(0) {}
- ExternalOneByteStringResourceImpl(const char* data, size_t length)
- : data_(data), length_(length) {}
- const char* data() const override { return data_; }
- size_t length() const override { return length_; }
-
- private:
- const char* data_;
- size_t length_;
-};
/**
* Ignore
@@ -6381,8 +6482,8 @@ class V8_EXPORT Extension { // NOLINT
const String::ExternalOneByteStringResource* source() const {
return source_;
}
- int dependency_count() { return dep_count_; }
- const char** dependencies() { return deps_; }
+ int dependency_count() const { return dep_count_; }
+ const char** dependencies() const { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; }
bool auto_enable() { return auto_enable_; }
@@ -6399,9 +6500,11 @@ class V8_EXPORT Extension { // NOLINT
bool auto_enable_;
};
+V8_DEPRECATED(
+ "Use unique_ptr version or stop using extension (http://crbug.com/334679).",
+ void V8_EXPORT RegisterExtension(Extension* extension));
-void V8_EXPORT RegisterExtension(Extension* extension);
-
+void V8_EXPORT RegisterExtension(std::unique_ptr<Extension>);
// --- Statics ---
@@ -6461,8 +6564,14 @@ class V8_EXPORT ResourceConstraints {
void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb;
}
- size_t max_zone_pool_size() const { return max_zone_pool_size_; }
- void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; }
+ V8_DEPRECATE_SOON("Zone does not pool memory any more.",
+ size_t max_zone_pool_size() const) {
+ return max_zone_pool_size_;
+ }
+ V8_DEPRECATE_SOON("Zone does not pool memory any more.",
+ void set_max_zone_pool_size(size_t bytes)) {
+ max_zone_pool_size_ = bytes;
+ }
private:
// max_semi_space_size_ is in KB
@@ -6636,6 +6745,7 @@ typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
typedef void (*MicrotasksCompletedCallback)(Isolate*);
+typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -6648,6 +6758,80 @@ typedef void (*MicrotaskCallback)(void* data);
*/
enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
+/**
+ * Represents the microtask queue, where microtasks are stored and processed.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#microtask-queue
+ * https://html.spec.whatwg.org/multipage/webappapis.html#enqueuejob(queuename,-job,-arguments)
+ * https://html.spec.whatwg.org/multipage/webappapis.html#perform-a-microtask-checkpoint
+ *
+ * A MicrotaskQueue instance may be associated to multiple Contexts by passing
+ * it to Context::New(), and they can be detached by Context::DetachGlobal().
+ * The embedder must keep the MicrotaskQueue instance alive until all associated
+ * Contexts are gone or detached.
+ *
+ * Use the same instance of MicrotaskQueue for all Contexts that may access each
+ * other synchronously. E.g. for Web embedding, use the same instance for all
+ * origins that share the same URL scheme and eTLD+1.
+ */
+class V8_EXPORT MicrotaskQueue {
+ public:
+ /**
+ * Creates an empty MicrotaskQueue instance.
+ */
+ static std::unique_ptr<MicrotaskQueue> New();
+
+ virtual ~MicrotaskQueue() = default;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(Isolate* isolate,
+ Local<Function> microtask) = 0;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(v8::Isolate* isolate,
+ MicrotaskCallback callback,
+ void* data = nullptr) = 0;
+
+ /**
+ * Adds a callback to notify the embedder after microtasks were run. The
+ * callback is triggered by explicit RunMicrotasks call or automatic
+ * microtasks execution (see Isolate::SetMicrotasksPolicy).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scripts inside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ virtual void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ virtual void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Runs microtasks if no microtask is running on this MicrotaskQueue instance.
+ */
+ virtual void PerformCheckpoint(Isolate* isolate) = 0;
+
+ /**
+ * Returns true if a microtask is running on this MicrotaskQueue instance.
+ */
+ virtual bool IsRunningMicrotasks() const = 0;
+
+ private:
+ friend class internal::MicrotaskQueue;
+ MicrotaskQueue() = default;
+ MicrotaskQueue(const MicrotaskQueue&) = delete;
+ MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
+};
/**
* This scope is used to control microtasks when kScopeMicrotasksInvocation
@@ -6663,6 +6847,7 @@ class V8_EXPORT MicrotasksScope {
enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
MicrotasksScope(Isolate* isolate, Type type);
+ MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
~MicrotasksScope();
/**
@@ -6686,6 +6871,7 @@ class V8_EXPORT MicrotasksScope {
private:
internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
bool run_;
};
@@ -6874,8 +7060,6 @@ class V8_EXPORT HeapCodeStatistics {
friend class Isolate;
};
-class RetainedObjectInfo;
-
/**
* A JIT code event is issued each time code is added, moved or removed.
*
@@ -6956,7 +7140,7 @@ struct JitCodeEvent {
* See documentation https://developers.google.com/web/tools/chrome-devtools/
* profile/evaluate-performance/rail
*/
-enum RAILMode {
+enum RAILMode : unsigned {
// Response performance mode: In this mode very low virtual machine latency
// is provided. V8 will try to avoid JavaScript execution interruptions.
// Throughput may be throttled.
@@ -7037,9 +7221,24 @@ class V8_EXPORT EmbedderHeapTracer {
kEmpty,
};
+ /**
+ * Interface for iterating through TracedGlobal handles.
+ */
+ class V8_EXPORT TracedGlobalHandleVisitor {
+ public:
+ virtual ~TracedGlobalHandleVisitor() = default;
+ virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) = 0;
+ };
+
virtual ~EmbedderHeapTracer() = default;
/**
+ * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
+ * attached to.
+ */
+ void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
+
+ /**
* Called by v8 to register internal fields of found wrappers.
*
* The embedder is expected to store them somewhere and trace reachable
@@ -7048,6 +7247,8 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) = 0;
+ void RegisterEmbedderReference(const TracedGlobal<v8::Value>& ref);
+
/**
* Called at the beginning of a GC cycle.
*/
@@ -7084,15 +7285,6 @@ class V8_EXPORT EmbedderHeapTracer {
*/
virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
- /**
- * Called when tracing is aborted.
- *
- * The embedder is expected to throw away all intermediate data and reset to
- * the initial state.
- */
- V8_DEPRECATED("Obsolete as V8 will not abort tracing anymore.",
- virtual void AbortTracing()) {}
-
/*
* Called by the embedder to request immediate finalization of the currently
* running tracing phase that has been started with TracePrologue and not
@@ -7104,6 +7296,17 @@ class V8_EXPORT EmbedderHeapTracer {
*/
void FinalizeTracing();
+ /**
+ * Returns true if the TracedGlobal handle should be considered as root for
+ * the currently running non-tracing garbage collection and false otherwise.
+ *
+ * Default implementation will keep all TracedGlobal references as roots.
+ */
+ virtual bool IsRootForNonTracingGC(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ return true;
+ }
+
/*
* Called by the embedder to immediately perform a full garbage collection.
*
@@ -7314,6 +7517,7 @@ class V8_EXPORT Isolate {
class V8_EXPORT SuppressMicrotaskExecutionScope {
public:
explicit SuppressMicrotaskExecutionScope(Isolate* isolate);
+ explicit SuppressMicrotaskExecutionScope(MicrotaskQueue* microtask_queue);
~SuppressMicrotaskExecutionScope();
// Prevent copying of Scope objects.
@@ -7324,6 +7528,7 @@ class V8_EXPORT Isolate {
private:
internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
};
/**
@@ -7434,6 +7639,7 @@ class V8_EXPORT Isolate {
kRegExpMatchIsTrueishOnNonJSRegExp = 72,
kRegExpMatchIsFalseishOnJSRegExp = 73,
kDateGetTimezoneOffset = 74,
+ kStringNormalize = 75,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -7987,18 +8193,18 @@ class V8_EXPORT Isolate {
void SetPromiseRejectCallback(PromiseRejectCallback callback);
/**
- * Runs the Microtask Work Queue until empty
+ * Runs the default MicrotaskQueue until it gets empty.
* Any exceptions thrown by microtask callbacks are swallowed.
*/
void RunMicrotasks();
/**
- * Enqueues the callback to the Microtask Work Queue
+ * Enqueues the callback to the default MicrotaskQueue
*/
void EnqueueMicrotask(Local<Function> microtask);
/**
- * Enqueues the callback to the Microtask Work Queue
+ * Enqueues the callback to the default MicrotaskQueue
*/
void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
@@ -8014,14 +8220,15 @@ class V8_EXPORT Isolate {
/**
* Adds a callback to notify the host application after
- * microtasks were run. The callback is triggered by explicit RunMicrotasks
- * call or automatic microtasks execution (see SetAutorunMicrotasks).
+ * microtasks were run on the default MicrotaskQueue. The callback is
+ * triggered by explicit RunMicrotasks call or automatic microtasks execution
+ * (see SetMicrotaskPolicy).
*
* Callback will trigger even if microtasks were attempted to run,
* but the microtasks queue was empty and no single microtask was actually
* executed.
*
- * Executing scriptsinside the callback will not re-trigger microtasks and
+ * Executing scripts inside the callback will not re-trigger microtasks and
* the callback.
*/
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
@@ -8244,10 +8451,6 @@ class V8_EXPORT Isolate {
void SetWasmModuleCallback(ExtensionCallback callback);
void SetWasmInstanceCallback(ExtensionCallback callback);
- V8_DEPRECATED(
- "The callback set in SetWasmStreamingCallback is used now",
- void SetWasmCompileStreamingCallback(ApiImplementationCallback callback));
-
void SetWasmStreamingCallback(WasmStreamingCallback callback);
void SetWasmThreadsEnabledCallback(WasmThreadsEnabledCallback callback);
@@ -8321,7 +8524,7 @@ class V8_EXPORT Isolate {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use VisitHandlesWithClassIds",
void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor));
@@ -8345,6 +8548,45 @@ class V8_EXPORT Isolate {
*/
void SetAllowAtomicsWait(bool allow);
+ /**
+ * Time zone redetection indicator for
+ * DateTimeConfigurationChangeNotification.
+ *
+ * kSkip indicates V8 that the notification should not trigger redetecting
+ * host time zone. kRedetect indicates V8 that host time zone should be
+ * redetected, and used to set the default time zone.
+ *
+ * The host time zone detection may require file system access or similar
+ * operations unlikely to be available inside a sandbox. If v8 is run inside a
+ * sandbox, the host time zone has to be detected outside the sandbox before
+ * calling DateTimeConfigurationChangeNotification function.
+ */
+ enum class TimeZoneDetection { kSkip, kRedetect };
+
+ /**
+ * Notification that the embedder has changed the time zone, daylight savings
+ * time or other date / time configuration parameters. V8 keeps a cache of
+ * various values used for date / time computation. This notification will
+ * reset those cached values for the current context so that date / time
+ * configuration changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of date operations.
+ */
+ void DateTimeConfigurationChangeNotification(
+ TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip);
+
+ /**
+ * Notification that the embedder has changed the locale. V8 keeps a cache of
+ * various values used for locale computation. This notification will reset
+ * those cached values for the current context so that locale configuration
+ * changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of locale operations.
+ */
+ void LocaleConfigurationChangeNotification();
+
Isolate() = delete;
~Isolate() = delete;
Isolate(const Isolate&) = delete;
@@ -8548,13 +8790,6 @@ class V8_EXPORT V8 {
#endif // V8_OS_POSIX
/**
- * Enable the default signal handler rather than using one provided by the
- * embedder.
- */
- V8_DEPRECATED("Use EnableWebAssemblyTrapHandler",
- static bool RegisterDefaultSignalHandler());
-
- /**
* Activate trap-based bounds checking for WebAssembly.
*
* \param use_v8_signal_handler Whether V8 should install its own signal
@@ -8567,13 +8802,24 @@ class V8_EXPORT V8 {
static internal::Address* GlobalizeReference(internal::Isolate* isolate,
internal::Address* handle);
- static internal::Address* CopyPersistent(internal::Address* handle);
+ static internal::Address* GlobalizeTracedReference(internal::Isolate* isolate,
+ internal::Address* handle,
+ internal::Address* slot);
+ static void MoveGlobalReference(internal::Address** from,
+ internal::Address** to);
+ static void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to);
+ static internal::Address* CopyGlobalReference(internal::Address* from);
static void DisposeGlobal(internal::Address* global_handle);
+ static void DisposeTracedGlobal(internal::Address* global_handle);
static void MakeWeak(internal::Address* location, void* data,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type);
static void MakeWeak(internal::Address** location_addr);
static void* ClearWeak(internal::Address* location);
+ static void SetFinalizationCallbackTraced(
+ internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback);
static void AnnotateStrongRetainer(internal::Address* location,
const char* label);
static Value* Eternalize(Isolate* isolate, Value* handle);
@@ -8587,12 +8833,16 @@ class V8_EXPORT V8 {
static void FromJustIsNothing();
static void ToLocalEmpty();
static void InternalFieldOutOfBounds(int index);
+ template <class T>
+ friend class Global;
template <class T> friend class Local;
template <class T>
friend class MaybeLocal;
template <class T>
friend class Maybe;
template <class T>
+ friend class TracedGlobal;
+ template <class T>
friend class WeakCallbackInfo;
template <class T> friend class Eternal;
template <class T> friend class PersistentBase;
@@ -9048,7 +9298,8 @@ class V8_EXPORT Context {
MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
MaybeLocal<Value> global_object = MaybeLocal<Value>(),
DeserializeInternalFieldsCallback internal_fields_deserializer =
- DeserializeInternalFieldsCallback());
+ DeserializeInternalFieldsCallback(),
+ MicrotaskQueue* microtask_queue = nullptr);
/**
* Create a new context from a (non-default) context snapshot. There
@@ -9068,13 +9319,13 @@ class V8_EXPORT Context {
*
* \param global_object See v8::Context::New.
*/
-
static MaybeLocal<Context> FromSnapshot(
Isolate* isolate, size_t context_snapshot_index,
DeserializeInternalFieldsCallback embedder_fields_deserializer =
DeserializeInternalFieldsCallback(),
ExtensionConfiguration* extensions = nullptr,
- MaybeLocal<Value> global_object = MaybeLocal<Value>());
+ MaybeLocal<Value> global_object = MaybeLocal<Value>(),
+ MicrotaskQueue* microtask_queue = nullptr);
/**
* Returns an global object that isn't backed by an actual context.
@@ -9452,6 +9703,10 @@ Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
return New(isolate, that.val_);
}
+template <class T>
+Local<T> Local<T>::New(Isolate* isolate, const TracedGlobal<T>& that) {
+ return New(isolate, that.val_);
+}
template <class T>
Local<T> Local<T>::New(Isolate* isolate, T* that) {
@@ -9514,7 +9769,7 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
this->Reset();
if (that.IsEmpty()) return;
internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
- this->val_ = reinterpret_cast<T*>(V8::CopyPersistent(p));
+ this->val_ = reinterpret_cast<T*>(V8::CopyGlobalReference(p));
M::Copy(that, this);
}
@@ -9647,6 +9902,109 @@ uint16_t PersistentBase<T>::WrapperClassId() const {
return *reinterpret_cast<uint16_t*>(addr);
}
+template <class T>
+Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
+ if (other.val_ != nullptr) {
+ V8::MoveGlobalReference(reinterpret_cast<internal::Address**>(&other.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ other.val_ = nullptr;
+ }
+}
+
+template <class T>
+template <class S>
+Global<T>& Global<T>::operator=(Global<S>&& rhs) {
+ TYPE_CHECK(T, S);
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ this->val_ = rhs.val_;
+ V8::MoveGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ rhs.val_ = nullptr;
+ }
+ }
+ return *this;
+}
+
+template <class T>
+T* TracedGlobal<T>::New(Isolate* isolate, T* that, T** slot) {
+ if (that == nullptr) return nullptr;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
+ return reinterpret_cast<T*>(V8::GlobalizeTracedReference(
+ reinterpret_cast<internal::Isolate*>(isolate), p,
+ reinterpret_cast<internal::Address*>(slot)));
+}
+
+template <class T>
+void TracedGlobal<T>::Reset() {
+ if (IsEmpty()) return;
+ V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
+ val_ = nullptr;
+}
+
+template <class T>
+template <class S>
+void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ TYPE_CHECK(T, S);
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_, &val_);
+}
+
+template <class T>
+TracedGlobal<T>::TracedGlobal(TracedGlobal&& other) : val_(other.val_) {
+ if (other.val_ != nullptr) {
+ V8::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&other.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ other.val_ = nullptr;
+ }
+}
+
+template <class T>
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
+ TYPE_CHECK(T, S);
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ this->val_ = rhs.val_;
+ V8::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ rhs.val_ = nullptr;
+ }
+ }
+ return *this;
+}
+
+template <class T>
+void TracedGlobal<T>::SetWrapperClassId(uint16_t class_id) {
+ typedef internal::Internals I;
+ if (IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+template <class T>
+uint16_t TracedGlobal<T>::WrapperClassId() const {
+ typedef internal::Internals I;
+ if (IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
+template <class T>
+void TracedGlobal<T>::SetFinalizationCallback(
+ void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
+ V8::SetFinalizationCallbackTraced(
+ reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
+}
+
template <typename T>
ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
@@ -9674,6 +10032,17 @@ void ReturnValue<T>::Set(const Global<S>& handle) {
template <typename T>
template <typename S>
+void ReturnValue<T>::Set(const TracedGlobal<S>& handle) {
+ TYPE_CHECK(T, S);
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ }
+}
+
+template <typename T>
+template <typename S>
void ReturnValue<T>::Set(const Local<S> handle) {
TYPE_CHECK(T, S);
if (V8_UNLIKELY(handle.IsEmpty())) {
@@ -9954,8 +10323,7 @@ Local<Value> Object::GetInternalField(int index) {
if (instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) {
- int offset = I::kJSObjectHeaderSizeForEmbedderFields +
- (I::kEmbedderDataSlotSize * index);
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
A value = I::ReadTaggedAnyField(obj, offset);
internal::Isolate* isolate =
internal::IsolateFromNeverReadOnlySpaceObject(obj);
@@ -9978,8 +10346,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
if (V8_LIKELY(instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
- int offset = I::kJSObjectHeaderSizeForEmbedderFields +
- (I::kEmbedderDataSlotSize * index);
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
return I::ReadRawField<void*>(obj, offset);
}
#endif
@@ -10478,10 +10845,14 @@ ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
template <typename T>
bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
typedef internal::Internals I;
- return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(0);
+ if (args_[kShouldThrowOnErrorIndex] !=
+ I::IntToSmi(I::kInferShouldThrowMode)) {
+ return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(I::kDontThrow);
+ }
+ return v8::internal::ShouldThrowOnError(
+ reinterpret_cast<v8::internal::Isolate*>(GetIsolate()));
}
-
Local<Primitive> Undefined(Isolate* isolate) {
typedef internal::Address S;
typedef internal::Internals I;
diff --git a/deps/v8/infra/OWNERS b/deps/v8/infra/OWNERS
index ea6f5446ee..c05d1d3921 100644
--- a/deps/v8/infra/OWNERS
+++ b/deps/v8/infra/OWNERS
@@ -1,3 +1,4 @@
machenbach@chromium.org
sergiyb@chromium.org
tandrii@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/infra/config/OWNERS b/deps/v8/infra/config/OWNERS
deleted file mode 100644
index 1d89078df7..0000000000
--- a/deps/v8/infra/config/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-sergiyb@chromium.org
diff --git a/deps/v8/infra/config/PRESUBMIT.py b/deps/v8/infra/config/PRESUBMIT.py
deleted file mode 100644
index 3d20f403f6..0000000000
--- a/deps/v8/infra/config/PRESUBMIT.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2018 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Presubmit script for changes in the infrastructure configs.
-
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into gcl.
-"""
-
-
-def _CommonChecks(input_api, output_api):
- """Checks common to both upload and commit."""
- results = []
- results.extend(
- input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
- return results
-
-
-def CheckChangeOnUpload(input_api, output_api):
- results = []
- results.extend(_CommonChecks(input_api, output_api))
- return results
-
-
-def CheckChangeOnCommit(input_api, output_api):
- results = []
- results.extend(_CommonChecks(input_api, output_api))
- return results
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
deleted file mode 100644
index e58723719e..0000000000
--- a/deps/v8/infra/config/cq.cfg
+++ /dev/null
@@ -1,158 +0,0 @@
-# See http://luci-config.appspot.com/schemas/projects/refs:cq.cfg for the
-# documentation of this file format.
-
-version: 1
-cq_status_url: "https://chromium-cq-status.appspot.com"
-git_repo_url: "https://chromium.googlesource.com/v8/v8"
-commit_burst_delay: 60
-max_commit_burst: 1
-
-gerrit {}
-
-verifiers {
- gerrit_cq_ability {
- committer_list: "project-v8-committers"
- dry_run_access_list: "project-v8-tryjob-access"
- }
-
- tree_status {
- tree_status_url: "https://v8-status.appspot.com"
- }
-
- try_job {
- buckets {
- name: "luci.v8.try"
- builders { name: "v8_android_arm_compile_rel" }
- builders { name: "v8_fuchsia_rel_ng" }
- builders { name: "v8_linux64_asan_rel_ng" }
- builders {
- name: "v8_linux64_asan_rel_ng_triggered"
- triggered_by: "v8_linux64_asan_rel_ng"
- }
- builders { name: "v8_linux64_dbg_ng" }
- builders {
- name: "v8_linux64_dbg_ng_triggered"
- triggered_by: "v8_linux64_dbg_ng"
- }
- builders { name: "v8_linux64_gcc_compile_dbg" }
- builders { name: "v8_linux64_header_includes_dbg" }
- builders { name: "v8_linux64_jumbo_compile_rel" }
- builders { name: "v8_linux64_rel_ng" }
- builders {
- name: "v8_linux64_rel_ng_triggered"
- triggered_by: "v8_linux64_rel_ng"
- }
- # TODO(machenbach): Figure out if bot should be removed or if
- # functionality should be revived.
- builders {
- name: "v8_linux64_sanitizer_coverage_rel"
- experiment_percentage: 10
- }
- builders { name: "v8_linux64_shared_compile_rel" }
- builders { name: "v8_linux64_verify_csa_rel_ng" }
- builders {
- name: "v8_linux64_verify_csa_rel_ng_triggered"
- triggered_by: "v8_linux64_verify_csa_rel_ng"
- }
- builders { name: "v8_linux_arm64_rel_ng" }
- builders {
- name: "v8_linux_arm64_rel_ng_triggered"
- triggered_by: "v8_linux_arm64_rel_ng"
- }
- builders { name: "v8_linux_arm_rel_ng" }
- builders {
- name: "v8_linux_arm_rel_ng_triggered"
- triggered_by: "v8_linux_arm_rel_ng"
- }
- builders {
- name: "v8_linux_blink_rel"
- experiment_percentage: 100
- }
- builders { name: "v8_linux_chromium_gn_rel" }
- builders { name: "v8_linux_gcc_compile_rel" }
- builders { name: "v8_linux_nodcheck_rel_ng" }
- builders {
- name: "v8_linux_nodcheck_rel_ng_triggered"
- triggered_by: "v8_linux_nodcheck_rel_ng"
- }
- builders { name: "v8_linux_rel_ng" }
- builders {
- name: "v8_linux_rel_ng_triggered"
- triggered_by: "v8_linux_rel_ng"
- }
- builders { name: "v8_linux_verify_csa_rel_ng" }
- builders {
- name: "v8_linux_verify_csa_rel_ng_triggered"
- triggered_by: "v8_linux_verify_csa_rel_ng"
- }
- builders { name: "v8_mac64_rel_ng" }
- builders {
- name: "v8_mac64_rel_ng_triggered"
- triggered_by: "v8_mac64_rel_ng"
- }
- builders { name: "v8_node_linux64_rel" }
- builders {
- name: "v8_presubmit"
- disable_reuse: true
- }
- builders { name: "v8_win64_msvc_compile_rel" }
- builders { name: "v8_win64_rel_ng" }
- builders {
- name: "v8_win64_rel_ng_triggered"
- triggered_by: "v8_win64_rel_ng"
- }
- builders { name: "v8_win_compile_dbg" }
- builders { name: "v8_win_nosnap_shared_rel_ng" }
- builders {
- name: "v8_win_nosnap_shared_rel_ng_triggered"
- triggered_by: "v8_win_nosnap_shared_rel_ng"
- }
- builders { name: "v8_win_rel_ng" }
- builders {
- name: "v8_win_rel_ng_triggered"
- triggered_by: "v8_win_rel_ng"
- }
- builders {
- name: "v8_linux_noi18n_rel_ng"
- path_regexp: ".*intl.*"
- path_regexp: ".*test262.*"
- }
- }
-
- buckets {
- name: "luci.chromium.try"
- builders {
- name: "linux_chromium_rel_ng"
- path_regexp: "include/.+\\.h"
- path_regexp: "src/api\\.cc"
- path_regexp: "src/message-template\\.h"
- }
- builders {
- name: "linux_chromium_headless_rel"
- path_regexp: "src/inspector/.+"
- path_regexp: "test/inspector/.+"
- }
- builders {
- name: "linux-blink-rel"
- path_regexp: "src/inspector/.+"
- path_regexp: "test/inspector/.+"
- }
- # TODO(machenbach): Uncomment path_regexp after testing, as currently,
- # path_regexp can not be combined with experiment_percentage. See more
- # details at crbug.com/v8/8058.
- builders {
- name: "cast_shell_android"
- #path_regexp: "include/.+\\.h"
- #path_regexp: "src/api\\.cc"
- experiment_percentage: 20
- }
- builders {
- name: "cast_shell_linux"
- #path_regexp: "include/.+\\.h"
- #path_regexp: "src/api\\.cc"
- experiment_percentage: 20
- }
- }
- }
-}
-
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 8a6cba71e0..cdf4b81a41 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -89,6 +89,9 @@
'V8 Linux64 TSAN - builder': 'release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan',
# Misc.
+ 'V8 Linux gcc': 'release_x86_gcc_no_goma',
+ 'V8 Linux64 gcc - debug': 'debug_x64_gcc_no_goma',
+ # TODO(machenbach): Remove after switching.
'V8 Linux gcc 4.8': 'release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'debug_x64_gcc',
# FYI.
@@ -101,12 +104,10 @@
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
'V8 Linux64 UBSan': 'release_x64_ubsan',
- 'V8 Linux64 UBSanVptr': 'release_x64_ubsan_vptr',
'V8 Linux - vtunejit': 'debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
'V8 Linux - predictable': 'release_x86_predictable',
'V8 Linux - full debug': 'full_debug_x86',
- 'V8 Linux - interpreted regexp': 'release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'debug_x64',
},
'client.v8.clusterfuzz': {
@@ -140,8 +141,6 @@
'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan',
'V8 Clusterfuzz Linux64 UBSan - release builder':
'release_x64_ubsan_recover_edge',
- 'V8 Clusterfuzz Linux64 UBSanVptr - release builder':
- 'release_x64_ubsan_vptr_recover_edge',
},
'client.v8.ports': {
# Arm.
@@ -212,7 +211,8 @@
'release_simulate_arm64_pointer_compression',
'v8_linux64_compile_rel_xg': 'release_x64_test_features_trybot',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
- 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
+ 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom',
+ 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc_no_goma',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
@@ -230,7 +230,6 @@
'v8_linux64_tsan_isolates_rel_ng':
'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_minimal_symbols',
- 'v8_linux64_ubsan_vptr_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
'v8_odroid_arm_rel_ng': 'release_arm',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_dbg': 'debug_x86_trybot',
@@ -465,12 +464,6 @@
'release_bot', 'x64', 'ubsan', 'minimal_symbols'],
'release_x64_ubsan_recover_edge': [
'release_bot', 'x64', 'edge', 'ubsan_recover'],
- 'release_x64_ubsan_vptr': [
- 'release_bot', 'x64', 'ubsan_vptr'],
- 'release_x64_ubsan_vptr_minimal_symbols': [
- 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'],
- 'release_x64_ubsan_vptr_recover_edge': [
- 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover'],
'release_x64_shared_verify_heap': [
'release_bot', 'x64', 'shared', 'v8_verify_heap'],
'release_x64_verify_csa': [
@@ -491,6 +484,9 @@
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
'debug_bot', 'x64', 'gcc'],
+ 'debug_x64_gcc_no_goma': [
+ 'debug', 'shared', 'v8_enable_slow_dchecks', 'v8_optimized_debug', 'x64',
+ 'gcc', 'no_goma'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_jumbo': [
@@ -501,6 +497,8 @@
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
+ 'debug_x64_trybot_custom': [
+ 'debug_trybot', 'x64', 'v8_snapshot_custom'],
# Debug configs for x86.
'debug_x86': [
@@ -532,14 +530,15 @@
'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'],
'release_x86_gcc': [
'release_bot', 'x86', 'gcc'],
+ # TODO(machenbach): Enable back goma once supported.
'release_x86_gcc_minimal_symbols': [
- 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
+ 'release', 'static', 'x86', 'gcc', 'minimal_symbols', 'no_goma'],
+ 'release_x86_gcc_no_goma': [
+ 'release', 'static', 'x86', 'gcc', 'no_goma'],
'release_x86_gcmole': [
'release_bot', 'x86', 'gcmole'],
'release_x86_gcmole_trybot': [
'release_trybot', 'x86', 'gcmole'],
- 'release_x86_interpreted_regexp': [
- 'release_bot', 'x86', 'v8_interpreted_regexp'],
'release_x86_minimal_symbols': [
'release_bot', 'x86', 'minimal_symbols'],
'release_x86_no_i18n_trybot': [
@@ -705,6 +704,10 @@
'gn_args': 'use_custom_libcxx=false',
},
+ 'no_goma': {
+ 'gn_args': 'use_goma=false',
+ },
+
'no_sysroot': {
'gn_args': 'use_sysroot=false',
},
@@ -778,20 +781,6 @@
'gn_args': 'is_ubsan=true is_ubsan_no_recover=false',
},
- 'ubsan_vptr': {
- 'mixins': ['v8_enable_test_features'],
- # TODO(krasin): Remove is_ubsan_no_recover=true when
- # https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
- # ubsan_vptr instead.
- 'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=true',
- },
-
- 'ubsan_vptr_recover': {
- 'mixins': ['v8_enable_test_features'],
- # Ubsan vptr with recovery.
- 'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
- },
-
'v8_check_header_includes': {
'gn_args': 'v8_check_header_includes=true',
},
@@ -833,10 +822,6 @@
'gn_args': 'v8_optimized_debug=false',
},
- 'v8_interpreted_regexp': {
- 'gn_args': 'v8_interpreted_regexp=true',
- },
-
'v8_optimized_debug': {
# This is the default in gn for debug.
},
diff --git a/deps/v8/infra/testing/OWNERS b/deps/v8/infra/testing/OWNERS
index f0129f758e..c8693c972c 100644
--- a/deps/v8/infra/testing/OWNERS
+++ b/deps/v8/infra/testing/OWNERS
@@ -1,4 +1,5 @@
set noparent
machenbach@chromium.org
-sergiyb@chromium.org \ No newline at end of file
+sergiyb@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py
index d8047bc244..b8e059724e 100644
--- a/deps/v8/infra/testing/PRESUBMIT.py
+++ b/deps/v8/infra/testing/PRESUBMIT.py
@@ -11,6 +11,10 @@ For simplicity, we check all pyl files on any changes in this folder.
import ast
import os
+try:
+ basestring # Python 2
+except NameError: # Python 3
+ basestring = str
SUPPORTED_BUILDER_SPEC_KEYS = [
'swarming_dimensions',
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index f15358405c..86349a48a4 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -41,8 +41,9 @@
'tests': [
{'name': 'benchmarks', 'variant': 'default'},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262_variants', 'variant': 'default', 'shards': 6},
+ {'name': 'test262_variants', 'variant': 'default', 'shards': 7},
],
},
##############################################################################
@@ -70,6 +71,9 @@
],
},
'v8_linux_gcc_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-16.04',
+ },
'tests': [
{'name': 'v8testing'},
],
@@ -103,7 +107,7 @@
},
'v8_linux_nosnap_rel': {
'tests': [
- {'name': 'v8testing', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'default', 'shards': 6},
],
},
'v8_linux_nosnap_dbg': {
@@ -202,6 +206,7 @@
{'name': 'test262'},
{'name': 'v8testing', 'shards': 7},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'v8_linux_arm_lite_rel_ng_triggered': {
@@ -215,7 +220,8 @@
{'name': 'mozilla', 'shards': 2},
{'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 8},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
##############################################################################
@@ -254,6 +260,15 @@
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
],
},
+ 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
+ 'tests': [
+ {
+ 'name': 'mjsunit',
+ 'test_args': ['--gc-stress', '--no-harness'],
+ 'shards': 3,
+ },
+ ],
+ },
'v8_linux64_fyi_rel_ng_triggered': {
'tests': [
# Stress sampling.
@@ -329,11 +344,6 @@
{'name': 'v8testing', 'shards': 2},
],
},
- 'v8_linux64_ubsan_vptr_rel_ng_triggered': {
- 'tests': [
- {'name': 'v8testing', 'shards': 2},
- ],
- },
'v8_linux64_verify_csa_rel_ng_triggered': {
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -348,6 +358,7 @@
{'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 3},
],
},
'v8_linux_arm64_gc_stress_dbg': {
@@ -362,6 +373,7 @@
{'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 9},
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 3},
],
},
'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
@@ -419,6 +431,7 @@
'os': 'Windows-7-SP1',
},
'tests': [
+ {'name': 'mozilla'},
{'name': 'test262'},
{'name': 'v8testing', 'shards': 2},
],
@@ -462,6 +475,7 @@
'os': 'Windows-7-SP1',
},
'tests': [
+ {'name': 'mozilla'},
{'name': 'test262'},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
@@ -485,7 +499,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262'},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
@@ -669,16 +683,6 @@
},
],
},
- 'V8 Linux - interpreted regexp': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 3600,
- 'priority': 35,
- },
- 'tests': [
- {'name': 'v8testing'},
- ],
- },
'V8 Linux - noi18n - debug': {
'tests': [
{'name': 'mozilla', 'variant': 'default'},
@@ -694,7 +698,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'variant': 'default', 'shards': 3},
],
},
@@ -727,7 +731,19 @@
{'name': 'v8testing'},
],
},
+ 'V8 Linux gcc': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-16.04',
+ },
+ 'tests': [
+ {'name': 'v8testing'},
+ ],
+ },
+ # TODO(machenbach): Remove after switching.
'V8 Linux gcc 4.8': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-16.04',
+ },
'tests': [
{'name': 'v8testing'},
],
@@ -929,11 +945,6 @@
{'name': 'v8testing', 'variant': 'extra'},
],
},
- 'V8 Linux64 UBSanVptr': {
- 'tests': [
- {'name': 'v8testing'},
- ],
- },
'V8 Mac64': {
'swarming_dimensions': {
'cpu': 'x86-64',
@@ -941,7 +952,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262'},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
],
@@ -1057,8 +1068,9 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 5},
+ {'name': 'test262', 'variant': 'default', 'shards': 6},
{'name': 'v8testing', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 3},
],
},
'V8 Arm': {
@@ -1193,6 +1205,7 @@
{'name': 'test262'},
{'name': 'v8testing', 'shards': 4},
{'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'v8testing', 'variant': 'trusted'},
# Armv8-a.
{
'name': 'mozilla',
@@ -1227,7 +1240,8 @@
{'name': 'mozilla'},
{'name': 'test262'},
{'name': 'v8testing', 'shards': 8},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
# Armv8-a.
{
'name': 'mozilla',
@@ -1284,6 +1298,7 @@
{'name': 'test262'},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'v8testing', 'variant': 'trusted'},
],
},
'V8 Linux - arm64 - sim - debug': {
@@ -1297,6 +1312,7 @@
{'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'V8 Linux - arm64 - sim - gc stress': {
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index f8190e8fd9..74c48a6ddd 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -8,12 +8,14 @@ include_rules = [
"+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
+ "+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
+ "+src/heap/read-only-heap.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index a368bbd81e..bf8db184f4 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -9,6 +9,7 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
+#include "src/field-index-inl.h"
#include "src/frames-inl.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
@@ -796,7 +797,8 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
isolate,
Object::SetProperty(
isolate, error, isolate->factory()->stack_trace_symbol(),
- isolate->factory()->undefined_value(), LanguageMode::kStrict),
+ isolate->factory()->undefined_value(), StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
JSReceiver);
return error;
}
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 4be8fb4084..09d07920b3 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -64,7 +64,7 @@ class PageAllocatorInitializer {
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
- GetPageTableInitializer);
+ GetPageTableInitializer)
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 1e2e9ed807..7f83708b96 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -17,6 +17,17 @@
namespace v8 {
namespace internal {
+void Object::VerifyApiCallResultType() {
+#if DEBUG
+ if (IsSmi()) return;
+ DCHECK(IsHeapObject());
+ if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
+ IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) {
+ FATAL("API call returned invalid object");
+ }
+#endif // DEBUG
+}
+
CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index b706050b30..76e821cad7 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -9,17 +9,19 @@
namespace v8 {
namespace internal {
-PropertyCallbackArguments::PropertyCallbackArguments(Isolate* isolate,
- Object data, Object self,
- JSObject holder,
- ShouldThrow should_throw)
+PropertyCallbackArguments::PropertyCallbackArguments(
+ Isolate* isolate, Object data, Object self, JSObject holder,
+ Maybe<ShouldThrow> should_throw)
: Super(isolate) {
slot_at(T::kThisIndex).store(self);
slot_at(T::kHolderIndex).store(holder);
slot_at(T::kDataIndex).store(data);
slot_at(T::kIsolateIndex).store(Object(reinterpret_cast<Address>(isolate)));
- slot_at(T::kShouldThrowOnErrorIndex)
- .store(Smi::FromInt(should_throw == kThrowOnError ? 1 : 0));
+ int value = Internals::kInferShouldThrowMode;
+ if (should_throw.IsJust()) {
+ value = should_throw.FromJust();
+ }
+ slot_at(T::kShouldThrowOnErrorIndex).store(Smi::FromInt(value));
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 6b025bdbb3..4f1ea8c85a 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -72,7 +72,7 @@ class PropertyCallbackArguments
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object data, Object self,
- JSObject holder, ShouldThrow should_throw);
+ JSObject holder, Maybe<ShouldThrow> should_throw);
// -------------------------------------------------------------------------
// Accessor Callbacks
diff --git a/deps/v8/src/api-inl.h b/deps/v8/src/api-inl.h
index 024dc88537..9ccb9e4a6a 100644
--- a/deps/v8/src/api-inl.h
+++ b/deps/v8/src/api-inl.h
@@ -94,11 +94,11 @@ MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
+MAKE_TO_LOCAL(StackFrameToLocal, StackTraceFrame, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-MAKE_TO_LOCAL(ToLocal, BigInt, BigInt);
+MAKE_TO_LOCAL(ToLocal, BigInt, BigInt)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index d0088bbf1c..2e34595ab5 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -115,8 +115,9 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
}
#endif
- MAYBE_RETURN_NULL(Object::AddDataProperty(
- &it, value, attributes, kThrowOnError, StoreOrigin::kNamed));
+ MAYBE_RETURN_NULL(Object::AddDataProperty(&it, value, attributes,
+ Just(ShouldThrow::kThrowOnError),
+ StoreOrigin::kNamed));
return value;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 85306decd7..168522f8c3 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -45,6 +45,7 @@
#include "src/gdb-jit.h"
#include "src/global-handles.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h"
#include "src/icu_util.h"
#include "src/isolate-inl.h"
#include "src/json-parser.h"
@@ -55,6 +56,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
@@ -251,11 +253,9 @@ class InternalEscapableScope : public v8::EscapableHandleScope {
// TODO(jochen): This should be #ifdef DEBUG
#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
-void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
- auto handle_scope_implementer = isolate->handle_scope_implementer();
- auto* microtask_queue = isolate->default_microtask_queue();
- if (handle_scope_implementer->microtasks_policy() ==
- v8::MicrotasksPolicy::kScoped) {
+void CheckMicrotasksScopesConsistency(i::MicrotaskQueue* microtask_queue) {
+ if (microtask_queue &&
+ microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kScoped) {
DCHECK(microtask_queue->GetMicrotasksScopeDepth() ||
!microtask_queue->DebugMicrotasksScopeDepthIsZero());
}
@@ -292,15 +292,19 @@ class CallDepthScope {
if (do_callback) isolate_->FireBeforeCallEnteredCallback();
}
~CallDepthScope() {
+ i::MicrotaskQueue* microtask_queue = isolate_->default_microtask_queue();
if (!context_.IsEmpty()) {
i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
isolate_->set_context(impl->RestoreContext());
+
+ i::Handle<i::Context> env = Utils::OpenHandle(*context_);
+ microtask_queue = env->native_context()->microtask_queue();
}
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
- if (do_callback) isolate_->FireCallCompletedCallback();
+ if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
// TODO(jochen): This should be #ifdef DEBUG
#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
- if (do_callback) CheckMicrotasksScopesConsistency(isolate_);
+ if (do_callback) CheckMicrotasksScopesConsistency(microtask_queue);
#endif
isolate_->set_next_v8_call_is_safe_for_termination(safe_for_termination_);
}
@@ -312,7 +316,7 @@ class CallDepthScope {
handle_scope_implementer->DecrementCallDepth();
bool clear_exception =
handle_scope_implementer->CallDepthIsZero() &&
- isolate_->thread_local_top()->try_catch_handler() == nullptr;
+ isolate_->thread_local_top()->try_catch_handler_ == nullptr;
isolate_->OptionalRescheduleException(clear_exception);
}
@@ -580,7 +584,7 @@ SnapshotCreator::SnapshotCreator(Isolate* isolate,
internal_isolate->set_snapshot_blob(blob);
i::Snapshot::Initialize(internal_isolate);
} else {
- internal_isolate->Init(nullptr);
+ internal_isolate->InitWithoutSnapshot();
}
data_ = data;
}
@@ -895,15 +899,27 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
RegisteredExtension* RegisteredExtension::first_extension_ = nullptr;
RegisteredExtension::RegisteredExtension(Extension* extension)
- : extension_(extension) { }
+ : legacy_unowned_extension_(extension) {}
+RegisteredExtension::RegisteredExtension(std::unique_ptr<Extension> extension)
+ : extension_(std::move(extension)) {}
-void RegisteredExtension::Register(RegisteredExtension* that) {
- that->next_ = first_extension_;
- first_extension_ = that;
+// static
+void RegisteredExtension::Register(Extension* extension) {
+ RegisteredExtension* new_extension = new RegisteredExtension(extension);
+ new_extension->next_ = first_extension_;
+ first_extension_ = new_extension;
}
+// static
+void RegisteredExtension::Register(std::unique_ptr<Extension> extension) {
+ RegisteredExtension* new_extension =
+ new RegisteredExtension(std::move(extension));
+ new_extension->next_ = first_extension_;
+ first_extension_ = new_extension;
+}
+// static
void RegisteredExtension::UnregisterAll() {
RegisteredExtension* re = first_extension_;
while (re != nullptr) {
@@ -930,11 +946,11 @@ class ExtensionResource : public String::ExternalOneByteStringResource {
};
} // anonymous namespace
-void RegisterExtension(Extension* that) {
- RegisteredExtension* extension = new RegisteredExtension(that);
- RegisteredExtension::Register(extension);
-}
+void RegisterExtension(Extension* that) { RegisteredExtension::Register(that); }
+void RegisterExtension(std::unique_ptr<Extension> extension) {
+ RegisteredExtension::Register(std::move(extension));
+}
Extension::Extension(const char* name,
const char* source,
@@ -964,7 +980,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
- set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most
@@ -980,12 +995,10 @@ void SetResourceConstraints(i::Isolate* isolate,
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
size_t old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
- size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
code_range_size);
}
- isolate->allocator()->ConfigureSegmentPool(max_pool_size);
if (constraints.stack_limit() != nullptr) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
@@ -1004,11 +1017,33 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
return result.location();
}
-i::Address* V8::CopyPersistent(i::Address* obj) {
- i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
+i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
+ internal::Address* slot) {
+ LOG_API(isolate, TracedGlobal, New);
+ i::Handle<i::Object> result =
+ isolate->global_handles()->CreateTraced(*obj, slot);
+#ifdef VERIFY_HEAP
+ if (i::FLAG_verify_heap) {
+ i::Object(*obj)->ObjectVerify(isolate);
+ }
+#endif // VERIFY_HEAP
+ return result.location();
+}
+
+i::Address* V8::CopyGlobalReference(i::Address* from) {
+ i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
}
+void V8::MoveGlobalReference(internal::Address** from, internal::Address** to) {
+ i::GlobalHandles::MoveGlobal(from, to);
+}
+
+void V8::MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to) {
+ i::GlobalHandles::MoveTracedGlobal(from, to);
+}
+
void V8::RegisterExternallyReferencedObject(i::Address* location,
i::Isolate* isolate) {
isolate->heap()->RegisterExternallyReferencedObject(location);
@@ -1036,6 +1071,17 @@ void V8::DisposeGlobal(i::Address* location) {
i::GlobalHandles::Destroy(location);
}
+void V8::DisposeTracedGlobal(internal::Address* location) {
+ i::GlobalHandles::DestroyTraced(location);
+}
+
+void V8::SetFinalizationCallbackTraced(
+ internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
+ callback);
+}
+
Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Object object = *Utils::OpenHandle(value);
@@ -2901,8 +2947,8 @@ Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(v8_isolate);
auto obj = handle(Utils::OpenHandle(this)->get(index), isolate);
- auto info = i::Handle<i::StackFrameInfo>::cast(obj);
- return scope.Escape(Utils::StackFrameToLocal(info));
+ auto frame = i::Handle<i::StackTraceFrame>::cast(obj);
+ return scope.Escape(Utils::StackFrameToLocal(frame));
}
int StackTrace::GetFrameCount() const {
@@ -2925,29 +2971,26 @@ Local<StackTrace> StackTrace::CurrentStackTrace(
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- int v = Utils::OpenHandle(this)->line_number();
- return v ? v : Message::kNoLineNumberInfo;
+ return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this));
}
int StackFrame::GetColumn() const {
- int v = Utils::OpenHandle(this)->column_number();
- return v ? v : Message::kNoLineNumberInfo;
+ return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this));
}
int StackFrame::GetScriptId() const {
- int v = Utils::OpenHandle(this)->script_id();
- return v ? v : Message::kNoScriptIdInfo;
+ return i::StackTraceFrame::GetScriptId(Utils::OpenHandle(this));
}
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj(self->script_name(), isolate);
- return obj->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
+ i::Handle<i::Object> name =
+ i::StackTraceFrame::GetFileName(Utils::OpenHandle(this));
+ return name->IsString()
+ ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
@@ -2955,10 +2998,10 @@ Local<String> StackFrame::GetScriptName() const {
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj(self->script_name_or_source_url(), isolate);
- return obj->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
+ i::Handle<i::Object> name =
+ i::StackTraceFrame::GetScriptNameOrSourceUrl(Utils::OpenHandle(this));
+ return name->IsString()
+ ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
@@ -2966,21 +3009,24 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::StackFrameInfo> self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj(self->function_name(), isolate);
- return obj->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
+ i::Handle<i::Object> name =
+ i::StackTraceFrame::GetFunctionName(Utils::OpenHandle(this));
+ return name->IsString()
+ ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
-bool StackFrame::IsEval() const { return Utils::OpenHandle(this)->is_eval(); }
+bool StackFrame::IsEval() const {
+ return i::StackTraceFrame::IsEval(Utils::OpenHandle(this));
+}
bool StackFrame::IsConstructor() const {
- return Utils::OpenHandle(this)->is_constructor();
+ return i::StackTraceFrame::IsConstructor(Utils::OpenHandle(this));
}
-bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->is_wasm(); }
-
+bool StackFrame::IsWasm() const {
+ return i::StackTraceFrame::IsWasm(Utils::OpenHandle(this));
+}
// --- J S O N ---
@@ -3615,6 +3661,11 @@ i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
i::HeapObject::cast(i::Object(obj)));
}
+bool i::ShouldThrowOnError(i::Isolate* isolate) {
+ return i::GetShouldThrow(isolate, Nothing<i::ShouldThrow>()) ==
+ i::ShouldThrow::kThrowOnError;
+}
+
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(),
@@ -3993,8 +4044,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
- i::LanguageMode::kSloppy,
- i::StoreOrigin::kMaybeKeyed)
+ i::StoreOrigin::kMaybeKeyed,
+ Just(i::ShouldThrow::kDontThrow))
.is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@@ -4014,7 +4065,7 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
- i::LanguageMode::kSloppy)
+ i::ShouldThrow::kDontThrow)
.is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@@ -4038,7 +4089,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
Maybe<bool> result = i::JSReceiver::CreateDataProperty(
- isolate, self, key_obj, value_obj, i::kDontThrow);
+ isolate, self, key_obj, value_obj, Just(i::kDontThrow));
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4056,7 +4107,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
Maybe<bool> result =
- i::JSReceiver::CreateDataProperty(&it, value_obj, i::kDontThrow);
+ i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4174,7 +4225,7 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
i::HandleScope);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &desc, i::kDontThrow);
+ isolate, self, key_obj, &desc, Just(i::kDontThrow));
// Even though we said kDontThrow, there might be accessors that do throw.
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
@@ -4184,7 +4235,7 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
ENTER_V8_NO_SCRIPT(isolate, context, Object, DefineOwnProperty,
Nothing<bool>(), i::HandleScope);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &desc, i::kDontThrow);
+ isolate, self, key_obj, &desc, Just(i::kDontThrow));
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
}
@@ -4200,7 +4251,8 @@ Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
- isolate, self, key_obj, &descriptor.get_private()->desc, i::kDontThrow);
+ isolate, self, key_obj, &descriptor.get_private()->desc,
+ Just(i::kDontThrow));
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return success;
}
@@ -4221,7 +4273,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
desc.set_value(value_obj);
return i::JSProxy::SetPrivateSymbol(
isolate, i::Handle<i::JSProxy>::cast(self),
- i::Handle<i::Symbol>::cast(key_obj), &desc, i::kDontThrow);
+ i::Handle<i::Symbol>::cast(key_obj), &desc, Just(i::kDontThrow));
}
auto js_object = i::Handle<i::JSObject>::cast(self);
i::LookupIterator it(js_object, key_obj, js_object);
@@ -4386,22 +4438,11 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
RETURN_ESCAPED(Utils::ToLocal(result));
}
-
-Local<Array> v8::Object::GetPropertyNames() {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetPropertyNames(context), Array);
-}
-
MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
return GetOwnPropertyNames(
context, static_cast<v8::PropertyFilter>(ONLY_ENUMERABLE | SKIP_SYMBOLS));
}
-Local<Array> v8::Object::GetOwnPropertyNames() {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
-}
-
MaybeLocal<Array> v8::Object::GetOwnPropertyNames(
Local<Context> context, PropertyFilter filter,
KeyConversionMode key_conversion) {
@@ -4663,12 +4704,6 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
}
-bool v8::Object::HasRealNamedProperty(Local<String> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return HasRealNamedProperty(context, key).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4683,13 +4718,6 @@ Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
return result;
}
-
-bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return HasRealIndexedProperty(context, index).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4705,13 +4733,6 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
return result;
}
-
-bool v8::Object::HasRealNamedCallbackProperty(Local<String> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return HasRealNamedCallbackProperty(context, key).FromMaybe(false);
-}
-
-
bool v8::Object::HasNamedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
return self->IsJSObject() &&
@@ -4823,7 +4844,8 @@ Local<v8::Object> v8::Object::Clone() {
Local<v8::Context> v8::Object::CreationContext() {
auto self = Utils::OpenHandle(this);
- return Utils::ToLocal(self->GetCreationContext());
+ i::Handle<i::Context> context = self->GetCreationContext();
+ return Utils::ToLocal(context);
}
@@ -4897,14 +4919,6 @@ MaybeLocal<Function> Function::New(Local<Context> context,
return templ->GetFunction(context);
}
-
-Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
- Local<Value> data, int length) {
- return Function::New(v8_isolate->GetCurrentContext(), callback, data, length,
- ConstructorBehavior::kAllow)
- .FromMaybe(Local<Function>());
-}
-
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
return NewInstanceWithSideEffectType(context, argc, argv,
@@ -4980,14 +4994,6 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
RETURN_ESCAPED(result);
}
-
-Local<v8::Value> Function::Call(v8::Local<v8::Value> recv, int argc,
- v8::Local<v8::Value> argv[]) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Call(context, recv, argc, argv), Value);
-}
-
-
void Function::SetName(v8::Local<v8::String> name) {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) return;
@@ -5147,7 +5153,7 @@ int String::Length() const {
bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- return str->HasOnlyOneByteChars();
+ return str->IsOneByteRepresentation();
}
@@ -5260,7 +5266,7 @@ class ContainsOnlyOneByteHelper {
bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (str->HasOnlyOneByteChars()) return true;
+ if (str->IsOneByteRepresentation()) return true;
ContainsOnlyOneByteHelper helper;
return helper.Check(*str);
}
@@ -5588,7 +5594,7 @@ Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Isolate* isolate;
- if (!i::Isolate::FromWritableHeapObject(*sym, &isolate)) {
+ if (!i::GetIsolateFromWritableObject(*sym, &isolate)) {
// If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
// objects are immovable we can use the Handle(Address*) constructor with
// the address of the name field in the Symbol object without needing an
@@ -5784,10 +5790,6 @@ bool TryHandleWebAssemblyTrapWindows(EXCEPTION_POINTERS* exception) {
}
#endif
-bool V8::RegisterDefaultSignalHandler() {
- return v8::internal::trap_handler::RegisterDefaultTrapHandler();
-}
-
bool V8::EnableWebAssemblyTrapHandler(bool use_v8_signal_handler) {
return v8::internal::trap_handler::EnableTrapHandler(use_v8_signal_handler);
}
@@ -5875,10 +5877,11 @@ struct InvokeBootstrapper<i::Context> {
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
+ v8::DeserializeInternalFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
return isolate->bootstrapper()->CreateEnvironment(
maybe_global_proxy, global_proxy_template, extensions,
- context_snapshot_index, embedder_fields_deserializer);
+ context_snapshot_index, embedder_fields_deserializer, microtask_queue);
}
};
@@ -5888,7 +5891,8 @@ struct InvokeBootstrapper<i::JSGlobalProxy> {
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
+ v8::DeserializeInternalFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
USE(extensions);
USE(context_snapshot_index);
return isolate->bootstrapper()->NewRemoteContext(maybe_global_proxy,
@@ -5901,7 +5905,8 @@ static i::Handle<ObjectType> CreateEnvironment(
i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
v8::MaybeLocal<ObjectTemplate> maybe_global_template,
v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index,
- v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
+ v8::DeserializeInternalFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
i::Handle<ObjectType> result;
{
@@ -5977,9 +5982,9 @@ static i::Handle<ObjectType> CreateEnvironment(
}
// Create the environment.
InvokeBootstrapper<ObjectType> invoke;
- result =
- invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
- context_snapshot_index, embedder_fields_deserializer);
+ result = invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
+ context_snapshot_index, embedder_fields_deserializer,
+ microtask_queue);
// Restore the access check info and interceptors on the global template.
if (!maybe_global_template.IsEmpty()) {
@@ -6005,7 +6010,8 @@ Local<Context> NewContext(
v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions,
v8::MaybeLocal<ObjectTemplate> global_template,
v8::MaybeLocal<Value> global_object, size_t context_snapshot_index,
- v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
+ v8::DeserializeInternalFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
@@ -6019,7 +6025,7 @@ Local<Context> NewContext(
if (extensions == nullptr) extensions = &no_extensions;
i::Handle<i::Context> env = CreateEnvironment<i::Context>(
isolate, extensions, global_template, global_object,
- context_snapshot_index, embedder_fields_deserializer);
+ context_snapshot_index, embedder_fields_deserializer, microtask_queue);
if (env.is_null()) {
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return Local<Context>();
@@ -6031,15 +6037,18 @@ Local<Context> v8::Context::New(
v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions,
v8::MaybeLocal<ObjectTemplate> global_template,
v8::MaybeLocal<Value> global_object,
- DeserializeInternalFieldsCallback internal_fields_deserializer) {
+ DeserializeInternalFieldsCallback internal_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
return NewContext(external_isolate, extensions, global_template,
- global_object, 0, internal_fields_deserializer);
+ global_object, 0, internal_fields_deserializer,
+ microtask_queue);
}
MaybeLocal<Context> v8::Context::FromSnapshot(
v8::Isolate* external_isolate, size_t context_snapshot_index,
v8::DeserializeInternalFieldsCallback embedder_fields_deserializer,
- v8::ExtensionConfiguration* extensions, MaybeLocal<Value> global_object) {
+ v8::ExtensionConfiguration* extensions, MaybeLocal<Value> global_object,
+ v8::MicrotaskQueue* microtask_queue) {
size_t index_including_default_context = context_snapshot_index + 1;
if (!i::Snapshot::HasContextSnapshot(
reinterpret_cast<i::Isolate*>(external_isolate),
@@ -6048,7 +6057,7 @@ MaybeLocal<Context> v8::Context::FromSnapshot(
}
return NewContext(external_isolate, extensions, MaybeLocal<ObjectTemplate>(),
global_object, index_including_default_context,
- embedder_fields_deserializer);
+ embedder_fields_deserializer, microtask_queue);
}
MaybeLocal<Object> v8::Context::NewRemoteContext(
@@ -6069,9 +6078,9 @@ MaybeLocal<Object> v8::Context::NewRemoteContext(
"v8::Context::NewRemoteContext",
"Global template needs to have access check handlers.");
i::Handle<i::JSGlobalProxy> global_proxy =
- CreateEnvironment<i::JSGlobalProxy>(isolate, nullptr, global_template,
- global_object, 0,
- DeserializeInternalFieldsCallback());
+ CreateEnvironment<i::JSGlobalProxy>(
+ isolate, nullptr, global_template, global_object, 0,
+ DeserializeInternalFieldsCallback(), nullptr);
if (global_proxy.is_null()) {
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeLocal<Object>();
@@ -6198,14 +6207,6 @@ MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
RETURN_ESCAPED(result);
}
-
-Local<v8::Object> ObjectTemplate::NewInstance() {
- Local<Context> context =
- reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate())
- ->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
-}
-
void v8::ObjectTemplate::CheckCast(Data* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsObjectTemplateInfo(), "v8::ObjectTemplate::Cast",
@@ -6240,14 +6241,6 @@ MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
RETURN_ESCAPED(result);
}
-
-Local<v8::Function> FunctionTemplate::GetFunction() {
- Local<Context> context =
- reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate())
- ->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(GetFunction(context), Function);
-}
-
MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -6508,10 +6501,10 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false;
}
- // It is safe to call FromWritable because SupportsExternalization already
- // checked that the object is writable.
+ // It is safe to call GetIsolateFromWritableHeapObject because
+ // SupportsExternalization already checked that the object is writable.
i::Isolate* isolate;
- i::Isolate::FromWritableHeapObject(obj, &isolate);
+ i::GetIsolateFromWritableObject(obj, &isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
@@ -6537,17 +6530,16 @@ bool v8::String::MakeExternal(
return false;
}
- // It is safe to call FromWritable because SupportsExternalization already
- // checked that the object is writable.
+ // It is safe to call GetIsolateFromWritableHeapObject because
+ // SupportsExternalization already checked that the object is writable.
i::Isolate* isolate;
- i::Isolate::FromWritableHeapObject(obj, &isolate);
+ i::GetIsolateFromWritableObject(obj, &isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
- DCHECK(result);
- DCHECK(obj->IsExternalString());
+ DCHECK_IMPLIES(result, obj->IsExternalString());
return result;
}
@@ -6565,7 +6557,7 @@ bool v8::String::CanMakeExternal() {
}
// Only old space strings should be externalized.
- return !i::Heap::InNewSpace(obj);
+ return !i::Heap::InYoungGeneration(obj);
}
bool v8::String::StringEquals(Local<String> that) {
@@ -6774,23 +6766,25 @@ double v8::Date::ValueOf() const {
return jsdate->value()->Number();
}
+// Assert that the static TimeZoneDetection cast in
+// DateTimeConfigurationChangeNotification is valid.
+#define TIME_ZONE_DETECTION_ASSERT_EQ(value) \
+ STATIC_ASSERT( \
+ static_cast<int>(v8::Isolate::TimeZoneDetection::value) == \
+ static_cast<int>(base::TimezoneCache::TimeZoneDetection::value)); \
+ STATIC_ASSERT(static_cast<int>(v8::Isolate::TimeZoneDetection::value) == \
+ static_cast<int>(v8::Date::TimeZoneDetection::value));
+TIME_ZONE_DETECTION_ASSERT_EQ(kSkip)
+TIME_ZONE_DETECTION_ASSERT_EQ(kRedetect)
+#undef TIME_ZONE_DETECTION_ASSERT_EQ
-void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i_isolate->date_cache()->ResetDateCache();
-#ifdef V8_INTL_SUPPORT
- i_isolate->clear_cached_icu_object(
- i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormat);
- i_isolate->clear_cached_icu_object(
- i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForTime);
- i_isolate->clear_cached_icu_object(
- i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForDate);
-#endif // V8_INTL_SUPPORT
+// static
+void v8::Date::DateTimeConfigurationChangeNotification(
+ Isolate* isolate, TimeZoneDetection time_zone_detection) {
+ isolate->DateTimeConfigurationChangeNotification(
+ static_cast<v8::Isolate::TimeZoneDetection>(time_zone_detection));
}
-
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
Local<String> pattern, Flags flags) {
PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
@@ -7301,10 +7295,6 @@ MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
return {bytes_vec.start(), bytes_vec.size()};
}
-WasmModuleObject::BufferReference WasmModuleObject::GetWasmWireBytesRef() {
- return GetCompiledModule().GetWireBytesRef();
-}
-
WasmModuleObject::TransferrableModule
WasmModuleObject::GetTransferrableModule() {
if (i::FLAG_wasm_shared_code) {
@@ -7346,12 +7336,6 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::FromTransferrableModule(
}
}
-WasmModuleObject::SerializedModule WasmModuleObject::Serialize() {
- // TODO(clemensh): Deprecated; remove after M-73 branch.
- OwnedBuffer serialized = GetCompiledModule().Serialize();
- return {std::move(serialized.buffer), serialized.size};
-}
-
MaybeLocal<WasmModuleObject> WasmModuleObject::Deserialize(
Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
MemorySpan<const uint8_t> wire_bytes) {
@@ -7701,6 +7685,27 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
return Utils::ToLocal(obj);
}
+namespace {
+i::Handle<i::JSArrayBuffer> SetupSharedArrayBuffer(
+ Isolate* isolate, void* data, size_t byte_length,
+ ArrayBufferCreationMode mode) {
+ CHECK(i::FLAG_harmony_sharedarraybuffer);
+ // Embedders must guarantee that the external backing store is valid.
+ CHECK(byte_length == 0 || data != nullptr);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, SharedArrayBuffer, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
+ bool is_wasm_memory =
+ i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data);
+ i::JSArrayBuffer::Setup(obj, i_isolate,
+ mode == ArrayBufferCreationMode::kExternalized, data,
+ byte_length, i::SharedFlag::kShared, is_wasm_memory);
+ return obj;
+}
+
+} // namespace
bool v8::SharedArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
@@ -7723,14 +7728,15 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
v8::SharedArrayBuffer::Contents::Contents(
void* data, size_t byte_length, void* allocation_base,
size_t allocation_length, Allocator::AllocationMode allocation_mode,
- DeleterCallback deleter, void* deleter_data)
+ DeleterCallback deleter, void* deleter_data, bool is_growable)
: data_(data),
byte_length_(byte_length),
allocation_base_(allocation_base),
allocation_length_(allocation_length),
allocation_mode_(allocation_mode),
deleter_(deleter),
- deleter_data_(deleter_data) {
+ deleter_data_(deleter_data),
+ is_growable_(is_growable) {
DCHECK_LE(allocation_base_, data_);
DCHECK_LE(byte_length_, allocation_length_);
}
@@ -7748,7 +7754,8 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
: reinterpret_cast<Contents::DeleterCallback>(ArrayBufferDeleter),
self->is_wasm_memory()
? static_cast<void*>(self->GetIsolate()->wasm_engine())
- : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
+ : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()),
+ self->is_growable());
return contents;
}
@@ -7778,22 +7785,19 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, void* data, size_t byte_length,
ArrayBufferCreationMode mode) {
- CHECK(i::FLAG_harmony_sharedarraybuffer);
- // Embedders must guarantee that the external backing store is valid.
- CHECK(byte_length == 0 || data != nullptr);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, SharedArrayBuffer, New);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- bool is_wasm_memory =
- i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data);
- i::JSArrayBuffer::Setup(obj, i_isolate,
- mode == ArrayBufferCreationMode::kExternalized, data,
- byte_length, i::SharedFlag::kShared, is_wasm_memory);
- return Utils::ToLocalShared(obj);
+ i::Handle<i::JSArrayBuffer> buffer =
+ SetupSharedArrayBuffer(isolate, data, byte_length, mode);
+ return Utils::ToLocalShared(buffer);
}
+Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
+ Isolate* isolate, const SharedArrayBuffer::Contents& contents,
+ ArrayBufferCreationMode mode) {
+ i::Handle<i::JSArrayBuffer> buffer = SetupSharedArrayBuffer(
+ isolate, contents.Data(), contents.ByteLength(), mode);
+ buffer->set_is_growable(contents.IsGrowable());
+ return Utils::ToLocalShared(buffer);
+}
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -8194,7 +8198,7 @@ void Isolate::Initialize(Isolate* isolate,
}
base::ElapsedTimer timer;
if (i::FLAG_profile_deserialization) timer.Start();
- i_isolate->Init(nullptr);
+ i_isolate->InitWithoutSnapshot();
if (i::FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
i::PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
@@ -8328,17 +8332,16 @@ Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
delete reinterpret_cast<i::NoDumpOnJavascriptExecution*>(internal_dump_);
}
-
Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
Isolate* isolate)
- : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+ microtask_queue_(isolate_->default_microtask_queue()) {
isolate_->handle_scope_implementer()->IncrementCallDepth();
- isolate_->default_microtask_queue()->IncrementMicrotasksSuppressions();
+ microtask_queue_->IncrementMicrotasksSuppressions();
}
-
Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
- isolate_->default_microtask_queue()->DecrementMicrotasksSuppressions();
+ microtask_queue_->DecrementMicrotasksSuppressions();
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
@@ -8536,11 +8539,14 @@ void Isolate::RunMicrotasks() {
isolate->default_microtask_queue()->RunMicrotasks(isolate);
}
-void Isolate::EnqueueMicrotask(Local<Function> function) {
+void Isolate::EnqueueMicrotask(Local<Function> v8_function) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::Handle<i::CallableTask> microtask = isolate->factory()->NewCallableTask(
- Utils::OpenHandle(*function), isolate->native_context());
- isolate->default_microtask_queue()->EnqueueMicrotask(*microtask);
+ i::Handle<i::JSReceiver> function = Utils::OpenHandle(*v8_function);
+ i::Handle<i::NativeContext> handler_context;
+ if (!i::JSReceiver::GetContextForMicrotask(function).ToHandle(
+ &handler_context))
+ handler_context = isolate->native_context();
+ handler_context->microtask_queue()->EnqueueMicrotask(this, v8_function);
}
void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
@@ -8555,22 +8561,31 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->handle_scope_implementer()->set_microtasks_policy(policy);
+ isolate->default_microtask_queue()->set_microtasks_policy(policy);
}
MicrotasksPolicy Isolate::GetMicrotasksPolicy() const {
i::Isolate* isolate =
reinterpret_cast<i::Isolate*>(const_cast<Isolate*>(this));
- return isolate->handle_scope_implementer()->microtasks_policy();
+ return isolate->default_microtask_queue()->microtasks_policy();
}
+namespace {
+
+void MicrotasksCompletedCallbackAdapter(v8::Isolate* isolate, void* data) {
+ auto callback = reinterpret_cast<MicrotasksCompletedCallback>(data);
+ callback(isolate);
+}
+
+} // namespace
void Isolate::AddMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
DCHECK(callback);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->default_microtask_queue()->AddMicrotasksCompletedCallback(callback);
+ isolate->default_microtask_queue()->AddMicrotasksCompletedCallback(
+ &MicrotasksCompletedCallbackAdapter, reinterpret_cast<void*>(callback));
}
@@ -8578,7 +8593,7 @@ void Isolate::RemoveMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->default_microtask_queue()->RemoveMicrotasksCompletedCallback(
- callback);
+ &MicrotasksCompletedCallbackAdapter, reinterpret_cast<void*>(callback));
}
@@ -8666,7 +8681,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
- isolate->allocator()->MemoryPressureNotification(level);
}
void Isolate::EnableMemorySavingsMode() {
@@ -8753,9 +8767,6 @@ CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
CALLBACK_SETTER(WasmModuleCallback, ExtensionCallback, wasm_module_callback)
CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
-CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback,
- wasm_compile_streaming_callback)
-
CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback,
wasm_streaming_callback)
@@ -8865,14 +8876,14 @@ void Isolate::VisitHandlesForPartialDependence(
PersistentHandleVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::DisallowHeapAllocation no_allocation;
- isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(visitor);
+ isolate->global_handles()->IterateAllYoungRootsWithClassIds(visitor);
}
void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::DisallowHeapAllocation no_allocation;
- isolate->global_handles()->IterateWeakRootsInNewSpaceWithClassIds(visitor);
+ isolate->global_handles()->IterateYoungWeakRootsWithClassIds(visitor);
}
void Isolate::SetAllowAtomicsWait(bool allow) {
@@ -8880,51 +8891,79 @@ void Isolate::SetAllowAtomicsWait(bool allow) {
isolate->set_allow_atomics_wait(allow);
}
+void v8::Isolate::DateTimeConfigurationChangeNotification(
+ TimeZoneDetection time_zone_detection) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ LOG_API(i_isolate, Isolate, DateTimeConfigurationChangeNotification);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i_isolate->date_cache()->ResetDateCache(
+ static_cast<base::TimezoneCache::TimeZoneDetection>(time_zone_detection));
+#ifdef V8_INTL_SUPPORT
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormat);
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForTime);
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForDate);
+#endif // V8_INTL_SUPPORT
+}
+
+void v8::Isolate::LocaleConfigurationChangeNotification() {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ LOG_API(i_isolate, Isolate, LocaleConfigurationChangeNotification);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+
+#ifdef V8_INTL_SUPPORT
+ i_isolate->ResetDefaultLocale();
+#endif // V8_INTL_SUPPORT
+}
+
MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
+ : MicrotasksScope(
+ isolate,
+ reinterpret_cast<i::Isolate*>(isolate)->default_microtask_queue(),
+ type) {}
+
+MicrotasksScope::MicrotasksScope(Isolate* isolate,
+ MicrotaskQueue* microtask_queue,
+ MicrotasksScope::Type type)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+ microtask_queue_(static_cast<i::MicrotaskQueue*>(microtask_queue)),
run_(type == MicrotasksScope::kRunMicrotasks) {
- auto* microtask_queue = isolate_->default_microtask_queue();
- if (run_) microtask_queue->IncrementMicrotasksScopeDepth();
+ if (run_) microtask_queue_->IncrementMicrotasksScopeDepth();
#ifdef DEBUG
- if (!run_) microtask_queue->IncrementDebugMicrotasksScopeDepth();
+ if (!run_) microtask_queue_->IncrementDebugMicrotasksScopeDepth();
#endif
}
-
MicrotasksScope::~MicrotasksScope() {
- auto handle_scope_implementer = isolate_->handle_scope_implementer();
- auto* microtask_queue = isolate_->default_microtask_queue();
if (run_) {
- microtask_queue->DecrementMicrotasksScopeDepth();
- if (MicrotasksPolicy::kScoped ==
- handle_scope_implementer->microtasks_policy()) {
+ microtask_queue_->DecrementMicrotasksScopeDepth();
+ if (MicrotasksPolicy::kScoped == microtask_queue_->microtasks_policy()) {
PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
}
}
#ifdef DEBUG
- if (!run_) microtask_queue->DecrementDebugMicrotasksScopeDepth();
+ if (!run_) microtask_queue_->DecrementDebugMicrotasksScopeDepth();
#endif
}
-
-void MicrotasksScope::PerformCheckpoint(Isolate* v8Isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+void MicrotasksScope::PerformCheckpoint(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
auto* microtask_queue = isolate->default_microtask_queue();
- if (!microtask_queue->GetMicrotasksScopeDepth() &&
- !microtask_queue->HasMicrotasksSuppressions()) {
- microtask_queue->RunMicrotasks(isolate);
- }
+ microtask_queue->PerformCheckpoint(v8_isolate);
}
-
-int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
- return isolate->default_microtask_queue()->GetMicrotasksScopeDepth();
+int MicrotasksScope::GetCurrentDepth(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ auto* microtask_queue = isolate->default_microtask_queue();
+ return microtask_queue->GetMicrotasksScopeDepth();
}
-bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
- return isolate->default_microtask_queue()->IsRunningMicrotasks();
+bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ auto* microtask_queue = isolate->default_microtask_queue();
+ return microtask_queue->IsRunningMicrotasks();
}
String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
@@ -9053,6 +9092,18 @@ MaybeLocal<Array> debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
+MaybeLocal<Array> debug::GetPrivateFields(Local<Context> context,
+ Local<Object> value) {
+ PREPARE_FOR_EXECUTION(context, debug, GetPrivateFields, Array);
+ i::Handle<i::JSReceiver> val = Utils::OpenHandle(*value);
+ i::Handle<i::JSArray> result;
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ has_pending_exception =
+ !(internal_isolate->debug()->GetPrivateFields(val).ToHandle(&result));
+ RETURN_ON_FAILED_EXECUTION(Array);
+ RETURN_ESCAPED(Utils::ToLocal(result));
+}
+
void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->ChangeBreakOnException(
@@ -9625,8 +9676,11 @@ debug::Location debug::GeneratorObject::SuspendedLocation() {
CHECK(obj->is_suspended());
i::Object maybe_script = obj->function()->shared()->script();
if (!maybe_script->IsScript()) return debug::Location();
- i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Handle<i::Script> script(i::Script::cast(maybe_script), isolate);
i::Script::PositionInfo info;
+ i::SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate, i::handle(obj->function()->shared(), isolate));
i::Script::GetPositionInfo(script, obj->source_position(), &info,
i::Script::WITH_OFFSET);
return debug::Location(info.line, info.column);
@@ -9801,7 +9855,7 @@ debug::Coverage debug::Coverage::CollectBestEffort(Isolate* isolate) {
i::Coverage::CollectBestEffort(reinterpret_cast<i::Isolate*>(isolate)));
}
-void debug::Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
+void debug::Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
i::Coverage::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
}
@@ -9841,7 +9895,7 @@ debug::TypeProfile debug::TypeProfile::Collect(Isolate* isolate) {
}
void debug::TypeProfile::SelectMode(Isolate* isolate,
- debug::TypeProfile::Mode mode) {
+ debug::TypeProfileMode mode) {
i::TypeProfile::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
}
@@ -10004,13 +10058,13 @@ const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
const CpuProfileNode* CpuProfile::GetSample(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return reinterpret_cast<const CpuProfileNode*>(profile->sample(index));
+ return reinterpret_cast<const CpuProfileNode*>(profile->sample(index).node);
}
int64_t CpuProfile::GetSampleTimestamp(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->sample_timestamp(index) - base::TimeTicks())
+ return (profile->sample(index).timestamp - base::TimeTicks())
.InMicroseconds();
}
@@ -10371,19 +10425,6 @@ void HeapProfiler::DeleteAllHeapSnapshots() {
reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
}
-
-void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
- WrapperInfoCallback callback) {
- reinterpret_cast<i::HeapProfiler*>(this)->DefineWrapperClass(class_id,
- callback);
-}
-
-void HeapProfiler::SetGetRetainerInfosCallback(
- GetRetainerInfosCallback callback) {
- reinterpret_cast<i::HeapProfiler*>(this)->SetGetRetainerInfosCallback(
- callback);
-}
-
void HeapProfiler::SetBuildEmbedderGraphCallback(
LegacyBuildEmbedderGraphCallback callback) {
reinterpret_cast<i::HeapProfiler*>(this)->AddBuildEmbedderGraphCallback(
@@ -10493,6 +10534,22 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
kGCCallbackFlagForced);
}
+void EmbedderHeapTracer::RegisterEmbedderReference(
+ const TracedGlobal<v8::Value>& ref) {
+ if (ref.IsEmpty()) return;
+
+ i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
+ heap->RegisterExternallyReferencedObject(
+ reinterpret_cast<i::Address*>(ref.val_));
+}
+
+void EmbedderHeapTracer::IterateTracedGlobalHandles(
+ TracedGlobalHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ i::DisallowHeapAllocation no_allocation;
+ isolate->global_handles()->IterateTracedNodes(visitor);
+}
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index d9a0efbf2a..40f4e18025 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -7,7 +7,6 @@
#include "include/v8-testing.h"
#include "src/contexts.h"
-#include "src/debug/debug-interface.h"
#include "src/detachable-vector.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -28,6 +27,12 @@ namespace internal {
class JSArrayBufferView;
} // namespace internal
+namespace debug {
+class GeneratorObject;
+class Script;
+class WeakMap;
+} // namespace debug
+
// Constants used in the implementation of the API. The most natural thing
// would usually be to place these with the classes that use them, but
// we want to keep them out of v8.h because it is an externally
@@ -66,15 +71,22 @@ class ApiFunction {
class RegisteredExtension {
public:
- explicit RegisteredExtension(Extension* extension);
- static void Register(RegisteredExtension* that);
+ static void Register(Extension*);
+ static void Register(std::unique_ptr<Extension>);
static void UnregisterAll();
- Extension* extension() { return extension_; }
- RegisteredExtension* next() { return next_; }
+ Extension* extension() const {
+ return legacy_unowned_extension_ ? legacy_unowned_extension_
+ : extension_.get();
+ }
+ RegisteredExtension* next() const { return next_; }
static RegisteredExtension* first_extension() { return first_extension_; }
private:
- Extension* extension_;
- RegisteredExtension* next_;
+ explicit RegisteredExtension(Extension*);
+ explicit RegisteredExtension(std::unique_ptr<Extension>);
+ // TODO(clemensh): Remove this after the 7.4 branch.
+ Extension* legacy_unowned_extension_ = nullptr;
+ std::unique_ptr<Extension> extension_;
+ RegisteredExtension* next_ = nullptr;
static RegisteredExtension* first_extension_;
};
@@ -116,7 +128,7 @@ class RegisteredExtension {
V(Context, Context) \
V(External, Object) \
V(StackTrace, FixedArray) \
- V(StackFrame, StackFrameInfo) \
+ V(StackFrame, StackTraceFrame) \
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
@@ -207,7 +219,7 @@ class Utils {
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
- v8::internal::Handle<v8::internal::StackFrameInfo> obj);
+ v8::internal::Handle<v8::internal::StackTraceFrame> obj);
static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal(
@@ -355,7 +367,6 @@ class HandleScopeImplementer {
: isolate_(isolate),
spare_(nullptr),
call_depth_(0),
- microtasks_policy_(v8::MicrotasksPolicy::kAuto),
last_handle_before_deferred_block_(nullptr) {
}
@@ -388,9 +399,6 @@ class HandleScopeImplementer {
inline void EnterMicrotaskContext(Context context);
- inline void set_microtasks_policy(v8::MicrotasksPolicy policy);
- inline v8::MicrotasksPolicy microtasks_policy() const;
-
// Returns the last entered context or an empty handle if no
// contexts have been entered.
inline Handle<Context> LastEnteredContext();
@@ -459,8 +467,6 @@ class HandleScopeImplementer {
Address* spare_;
int call_depth_;
- v8::MicrotasksPolicy microtasks_policy_;
-
Address* last_handle_before_deferred_block_;
// This is only used for threading support.
HandleScopeData handle_scope_data_;
@@ -478,17 +484,6 @@ class HandleScopeImplementer {
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
-
-void HandleScopeImplementer::set_microtasks_policy(
- v8::MicrotasksPolicy policy) {
- microtasks_policy_ = policy;
-}
-
-
-v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
- return microtasks_policy_;
-}
-
void HandleScopeImplementer::SaveContext(Context context) {
saved_contexts_.push_back(context);
}
diff --git a/deps/v8/src/arguments-inl.h b/deps/v8/src/arguments-inl.h
index ad2b5ca87c..e596d44117 100644
--- a/deps/v8/src/arguments-inl.h
+++ b/deps/v8/src/arguments-inl.h
@@ -8,7 +8,7 @@
#include "src/arguments.h"
#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects-inl.h" // TODO(jkummerow): Just smi-inl.h.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 041c030933..17a38cbbfe 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -184,24 +184,6 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
return GetCodeTarget(code_target_index);
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) : rmode_(rmode) {
value_.immediate = immediate;
}
@@ -366,7 +348,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Memory<Address>(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(pc, sizeof(target));
+ // FlushInstructionCache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -384,7 +366,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
DCHECK(IsMovW(Memory<int32_t>(pc)));
DCHECK(IsMovT(Memory<int32_t>(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 2 * kInstrSize);
+ FlushInstructionCache(pc, 2 * kInstrSize);
}
} else if (IsMovImmed(Memory<int32_t>(pc))) {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -404,14 +386,14 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 4 * kInstrSize);
+ FlushInstructionCache(pc, 4 * kInstrSize);
}
} else {
intptr_t branch_offset = target - pc - Instruction::kPcLoadDelta;
Instruction* branch = Instruction::At(pc);
branch->SetBranchOffset(branch_offset);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, kInstrSize);
+ FlushInstructionCache(pc, kInstrSize);
}
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index a994b6907d..ce6209c592 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -559,7 +559,9 @@ Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(pending_32_bit_constants_.empty());
@@ -569,19 +571,27 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size =
- (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
- desc->constant_pool_size = 0;
- desc->origin = this;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
-
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 0c14a67707..4528b8efb1 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -53,6 +53,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// Coprocessor number
enum Coprocessor {
p0 = 0,
@@ -306,10 +308,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
pending_32_bit_constants_.clear();
}
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -1092,6 +1101,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index fa9791a0e0..48eaa3484a 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -522,16 +522,16 @@ class Instruction {
inline Condition ConditionField() const {
return static_cast<Condition>(BitField(31, 28));
}
- DECLARE_STATIC_TYPED_ACCESSOR(int, ConditionValue);
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
+ DECLARE_STATIC_TYPED_ACCESSOR(int, ConditionValue)
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField)
inline int TypeValue() const { return Bits(27, 25); }
inline int SpecialValue() const { return Bits(27, 23); }
inline int RnValue() const { return Bits(19, 16); }
- DECLARE_STATIC_ACCESSOR(RnValue);
+ DECLARE_STATIC_ACCESSOR(RnValue)
inline int RdValue() const { return Bits(15, 12); }
- DECLARE_STATIC_ACCESSOR(RdValue);
+ DECLARE_STATIC_ACCESSOR(RdValue)
inline int CoprocessorValue() const { return Bits(11, 8); }
// Support for VFP.
@@ -573,7 +573,7 @@ class Instruction {
inline int SValue() const { return Bit(20); }
// with register
inline int RmValue() const { return Bits(3, 0); }
- DECLARE_STATIC_ACCESSOR(RmValue);
+ DECLARE_STATIC_ACCESSOR(RmValue)
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));
@@ -583,13 +583,13 @@ class Instruction {
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
inline int RotateValue() const { return Bits(11, 8); }
- DECLARE_STATIC_ACCESSOR(RotateValue);
+ DECLARE_STATIC_ACCESSOR(RotateValue)
inline int Immed8Value() const { return Bits(7, 0); }
- DECLARE_STATIC_ACCESSOR(Immed8Value);
+ DECLARE_STATIC_ACCESSOR(Immed8Value)
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
- DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
+ DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue)
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 3f82f43e84..35ff085e32 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -35,6 +35,7 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 887a183182..c4140bdaf9 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -99,6 +99,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r1 : function template info
+ // r2 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {r1, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
@@ -203,9 +211,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- r1, // kApiFunctionAddress
- r2, // kArgc
+ r1, // kApiFunctionAddress
+ r2, // kArgc
+ r3, // kCallData
+ r0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index acf96b31c2..4690fa7a1e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -18,6 +18,7 @@
#include "src/double.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
@@ -475,6 +476,22 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
}
}
+void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
+ Register src1) {
+ DCHECK_NE(dst0, dst1);
+ if (dst0 != src1) {
+ Move(dst0, src0);
+ Move(dst1, src1);
+ } else if (dst1 != src0) {
+ // Swap the order of the moves to resolve the overlap.
+ Move(dst1, src1);
+ Move(dst0, src0);
+ } else {
+ // Worse case scenario, this is a swap.
+ Swap(dst0, src0);
+ }
+}
+
void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
@@ -588,41 +605,6 @@ void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
}
}
-void MacroAssembler::Load(Register dst,
- const MemOperand& src,
- Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8()) {
- ldrsb(dst, src);
- } else if (r.IsUInteger8()) {
- ldrb(dst, src);
- } else if (r.IsInteger16()) {
- ldrsh(dst, src);
- } else if (r.IsUInteger16()) {
- ldrh(dst, src);
- } else {
- ldr(dst, src);
- }
-}
-
-void MacroAssembler::Store(Register src,
- const MemOperand& dst,
- Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8() || r.IsUInteger8()) {
- strb(src, dst);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- strh(src, dst);
- } else {
- if (r.IsHeapObject()) {
- AssertNotSmi(src);
- } else if (r.IsSmi()) {
- AssertSmi(src);
- }
- str(src, dst);
- }
-}
-
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
ldr(destination,
@@ -735,11 +717,7 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(
descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
- Push(object);
- Push(address);
-
- Pop(slot_parameter);
- Pop(object_parameter);
+ MovePair(object_parameter, object, slot_parameter, address);
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
@@ -1173,8 +1151,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_high, src_low));
- Label less_than_32;
- Label done;
+
if (shift == 0) {
Move(dst_high, src_high);
Move(dst_low, src_low);
@@ -1222,8 +1199,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
- Label less_than_32;
- Label done;
+
if (shift == 32) {
mov(dst_low, src_high);
mov(dst_high, Operand(0));
@@ -1270,8 +1246,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
- Label less_than_32;
- Label done;
+
if (shift == 32) {
mov(dst_low, src_high);
asr(dst_high, src_high, Operand(31));
@@ -1765,6 +1740,20 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
cmp(obj, scratch);
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sub(scratch, value, Operand(lower_limit));
+ cmp(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ cmp(value, Operand(higher_limit));
+ }
+ b(ls, on_in_range);
+}
+
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 29fa10cfea..64aabea84d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -430,6 +430,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
mov(dst, src, sbit, cond);
}
}
+ // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
+ void MovePair(Register dst0, Register src0, Register dst1, Register src1);
+
void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
void Move(QwNeonRegister dst, QwNeonRegister src);
@@ -560,36 +563,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
- void Load(Register dst, const MemOperand& src, Representation r);
- void Store(Register src, const MemOperand& dst, Representation r);
-
// ---------------------------------------------------------------------------
// GC Support
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -709,6 +685,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
b(ne, if_not_equal);
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
@@ -813,12 +794,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/arm/register-arm.h b/deps/v8/src/arm/register-arm.h
index 4767e50661..3584a6b19f 100644
--- a/deps/v8/src/arm/register-arm.h
+++ b/deps/v8/src/arm/register-arm.h
@@ -326,12 +326,12 @@ C_REGISTERS(DECLARE_C_REGISTER)
#undef DECLARE_C_REGISTER
// Define {RegisterName} methods for the register types.
-DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
-DEFINE_REGISTER_NAMES(SwVfpRegister, FLOAT_REGISTERS);
-DEFINE_REGISTER_NAMES(DwVfpRegister, DOUBLE_REGISTERS);
-DEFINE_REGISTER_NAMES(LowDwVfpRegister, LOW_DOUBLE_REGISTERS);
-DEFINE_REGISTER_NAMES(QwNeonRegister, SIMD128_REGISTERS);
-DEFINE_REGISTER_NAMES(CRegister, C_REGISTERS);
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(SwVfpRegister, FLOAT_REGISTERS)
+DEFINE_REGISTER_NAMES(DwVfpRegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(LowDwVfpRegister, LOW_DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(QwNeonRegister, SIMD128_REGISTERS)
+DEFINE_REGISTER_NAMES(CRegister, C_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = r0;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 0ee54c8f5b..ef5215c78d 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -19,13 +19,14 @@
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
+#include "src/vector.h"
// Only build the simulator if not compiling for real ARM hardware.
namespace v8 {
namespace internal {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
- Simulator::GlobalMonitor::Get);
+ Simulator::GlobalMonitor::Get)
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 253fb984f4..fc8e31aac3 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -583,7 +583,7 @@ int Assembler::deserialization_special_target_size(Address location) {
return kSpecialTargetSize;
} else {
DCHECK_EQ(instr->InstructionBits(), 0);
- return kPointerSize;
+ return kSystemPointerSize;
}
}
@@ -598,7 +598,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
target = location;
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
- Assembler::FlushICache(location, kInstrSize);
+ FlushInstructionCache(location, kInstrSize);
} else {
DCHECK_EQ(instr->InstructionBits(), 0);
Memory<Address>(location) = target;
@@ -635,7 +635,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, kInstrSize);
+ FlushInstructionCache(pc, kInstrSize);
}
}
}
@@ -645,7 +645,7 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
} else {
DCHECK(reinterpret_cast<Instruction*>(pc_)->IsLdrLiteralX());
- return kPointerSize;
+ return kSystemPointerSize;
}
}
@@ -765,24 +765,6 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
DCHECK(rt.IsValid());
if (rt.IsRegister()) {
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index be0a4a9519..8a5a82fba8 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -377,7 +377,7 @@ int ConstPool::WorstCaseSize() {
// blr xzr
// nop
// All entries are 64-bit for now.
- return 4 * kInstrSize + EntryCount() * kPointerSize;
+ return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
}
@@ -395,7 +395,7 @@ int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
// All entries are 64-bit for now.
- return prologue_size + EntryCount() * kPointerSize;
+ return prologue_size + EntryCount() * kSystemPointerSize;
}
@@ -549,6 +549,7 @@ Assembler::~Assembler() {
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
}
+void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
void Assembler::Reset() {
#ifdef DEBUG
@@ -589,7 +590,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
}
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
@@ -599,21 +602,27 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- if (desc) {
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
- reloc_info_writer.pos());
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
- }
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
-
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
@@ -4887,7 +4896,9 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
EmitVeneersGuard();
+#ifdef DEBUG
Label veneer_size_check;
+#endif
std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 54e46c74dd..586eff1241 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -28,6 +28,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// -----------------------------------------------------------------------------
// Immediates.
class Immediate {
@@ -260,9 +262,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler();
- virtual void AbortedCodeGeneration() {
- constpool_.Clear();
- }
+ virtual void AbortedCodeGeneration();
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
@@ -272,13 +272,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// constant pool is not blocked.
void Reset();
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- //
- // The descriptor (desc) can be nullptr. In that case, the code is finalized
- // as usual, but the descriptor is not populated.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -2504,6 +2508,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
@@ -2844,7 +2851,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Functions used for testing.
int GetConstantPoolEntriesSizeForTesting() const {
// Do not include branch over the pool.
- return constpool_.EntryCount() * kPointerSize;
+ return constpool_.EntryCount() * kSystemPointerSize;
}
static constexpr int GetCheckConstPoolIntervalForTesting() {
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 379d7647d7..66ce3f9da4 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -15,7 +15,7 @@ namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
-#if defined(USE_SIMULATOR) || defined(V8_OS_WIN)
+#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
@@ -38,9 +38,10 @@ class CacheLineSizes {
};
void CpuFeatures::FlushICache(void* address, size_t length) {
+#if defined(V8_HOST_ARCH_ARM64)
#if defined(V8_OS_WIN)
- FlushInstructionCache(GetCurrentProcess(), address, length);
-#elif defined(V8_HOST_ARCH_ARM64)
+ ::FlushInstructionCache(GetCurrentProcess(), address, length);
+#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
@@ -109,6 +110,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
+#endif // V8_OS_WIN
#endif // V8_HOST_ARCH_ARM64
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 4b6aa1bf93..9e23988624 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -215,7 +215,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
__ SlotAddress(x1, 0);
- __ Lsr(unwind_limit, unwind_limit, kPointerSizeLog2);
+ __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
__ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5);
__ Drop(unwind_limit);
@@ -237,19 +237,18 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, outer_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
- __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
Register frame_size = x3;
- __ Ldr(current_frame, MemOperand(x0, kPointerSize, PostIndex));
+ __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ Lsr(frame_size, x3, kPointerSizeLog2);
+ __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
__ Claim(frame_size);
__ Add(x7, current_frame, FrameDescription::frame_content_offset());
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 13a879e8bd..3fb21ed8bc 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -37,8 +37,8 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -3 * kPointerSize;
- static constexpr int kFixedFrameSize = 6 * kPointerSize;
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+ static constexpr int kFixedFrameSize = 6 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -62,7 +62,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kFixedFrameSizeFromFp =
// Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
@@ -74,7 +74,7 @@ class JavaScriptFrameConstants : public AllStatic {
// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
- static constexpr int kLastParameterOffset = 2 * kPointerSize;
+ static constexpr int kLastParameterOffset = 2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index ad79b1ec2b..4dc7b4f54a 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -99,6 +99,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1 : function template info
+ // x2 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {x1, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
@@ -207,9 +215,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- x1, // kApiFunctionAddress
- x2, // kArgc
+ x1, // kApiFunctionAddress
+ x2, // kArgc
+ x3, // kCallData
+ x0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 48cd13d5fc..ad0ed8894a 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -15,6 +15,7 @@
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -1470,7 +1471,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Bind(&pointer1_below_pointer2);
Add(pointer1, pointer1, pointer2);
}
- static_assert(kPointerSize == kDRegSize,
+ static_assert(kSystemPointerSize == kDRegSize,
"pointers must be the same size as doubles");
int direction = (mode == kDstLessThanSrc) ? 1 : -1;
@@ -1481,21 +1482,23 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Label pairs, loop, done;
Tbz(count, 0, &pairs);
- Ldr(temp0, MemOperand(src, direction * kPointerSize, PostIndex));
+ Ldr(temp0, MemOperand(src, direction * kSystemPointerSize, PostIndex));
Sub(count, count, 1);
- Str(temp0, MemOperand(dst, direction * kPointerSize, PostIndex));
+ Str(temp0, MemOperand(dst, direction * kSystemPointerSize, PostIndex));
Bind(&pairs);
if (mode == kSrcLessThanDst) {
// Adjust pointers for post-index ldp/stp with negative offset:
- Sub(dst, dst, kPointerSize);
- Sub(src, src, kPointerSize);
+ Sub(dst, dst, kSystemPointerSize);
+ Sub(src, src, kSystemPointerSize);
}
Bind(&loop);
Cbz(count, &done);
- Ldp(temp0, temp1, MemOperand(src, 2 * direction * kPointerSize, PostIndex));
+ Ldp(temp0, temp1,
+ MemOperand(src, 2 * direction * kSystemPointerSize, PostIndex));
Sub(count, count, 2);
- Stp(temp0, temp1, MemOperand(dst, 2 * direction * kPointerSize, PostIndex));
+ Stp(temp0, temp1,
+ MemOperand(dst, 2 * direction * kSystemPointerSize, PostIndex));
B(&loop);
// TODO(all): large copies may benefit from using temporary Q registers
@@ -1505,11 +1508,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
- Add(dst, sp, slot_offset << kPointerSizeLog2);
+ Add(dst, sp, slot_offset << kSystemPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
- Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
+ Add(dst, sp, Operand(slot_offset, LSL, kSystemPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@@ -1565,6 +1568,22 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
+void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
+ Register src1) {
+ DCHECK_NE(dst0, dst1);
+ if (dst0 != src1) {
+ Mov(dst0, src0);
+ Mov(dst1, src1);
+ } else if (dst1 != src0) {
+ // Swap the order of the moves to resolve the overlap.
+ Mov(dst1, src1);
+ Mov(dst0, src0);
+ } else {
+ // Worse case scenario, this is a swap.
+ Swap(dst0, src0);
+ }
+}
+
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
@@ -1616,7 +1635,8 @@ void MacroAssembler::AssertConstructor(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(temp,
+ FieldMemOperand(object, HeapObject::kMapOffset));
Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
Tst(temp, Operand(Map::IsConstructorBit::kMask));
@@ -1656,7 +1676,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Load map
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(temp, FieldMemOperand(object, HeapObject::kMapOffset));
Label do_check;
// Load instance type and check if JSGeneratorObject
@@ -1682,7 +1702,8 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Label done_checking;
AssertNotSmi(object);
JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
- Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(scratch,
+ FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
Bind(&done_checking);
@@ -1848,9 +1869,9 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- Ldr(destination,
- FieldMemOperand(destination,
- FixedArray::kHeaderSize + constant_index * kPointerSize));
+ LoadTaggedPointerField(
+ destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
@@ -2002,13 +2023,18 @@ void TurboAssembler::Call(ExternalReference target) {
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_pointer register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift);
+#else
+ STATIC_ASSERT(kSmiShiftSize == 31);
Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
+#endif
Add(builtin_pointer, builtin_pointer,
IsolateData::builtin_entry_table_offset());
Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
@@ -2163,23 +2189,25 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
#endif
// Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
+ // after we drop current frame. We add kSystemPointerSize to count the
+ // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
- Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
+ Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kSystemPointerSizeLog2));
+ Add(dst_reg, dst_reg,
+ StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
// Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
// potential padding.
Add(dst_reg, dst_reg, 15);
Bic(dst_reg, dst_reg, 15);
Register src_reg = caller_args_count_reg;
- // Calculate the end of source area. +kPointerSize is for the receiver.
+ // Calculate the end of source area. +kSystemPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
- Add(src_reg, src_reg, kPointerSize);
+ Add(src_reg, sp,
+ Operand(callee_args_count.reg(), LSL, kSystemPointerSizeLog2));
+ Add(src_reg, src_reg, kSystemPointerSize);
} else {
- Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kSystemPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@@ -2206,8 +2234,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Label loop, entry;
B(&entry);
bind(&loop);
- Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ Ldr(tmp_reg, MemOperand(src_reg, -kSystemPointerSize, PreIndex));
+ Str(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize, PreIndex));
bind(&entry);
Cmp(sp, src_reg);
B(ne, &loop);
@@ -2298,7 +2326,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
: Operand(actual.reg());
Mov(x4, actual_op);
- Ldr(x4, MemOperand(sp, x4, LSL, kPointerSizeLog2));
+ Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2352,7 +2380,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedPointerField(code,
+ FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
CallCodeObject(code);
} else {
@@ -2378,12 +2407,14 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register expected_reg = x2;
- Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp,
+ FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
// (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
// extension to correctly handle it.
- Ldr(expected_reg, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(
+ expected_reg,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
Ldrh(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2404,7 +2435,8 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.Is(x1));
// Set up the context.
- Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp,
+ FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
@@ -2476,7 +2508,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// type_reg pushed twice for alignment.
Push(lr, fp, type_reg, type_reg);
const int kFrameSize =
- TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize;
+ TypedFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize;
Add(fp, sp, kFrameSize);
// sp[3] : lr
// sp[2] : fp
@@ -2504,7 +2536,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ Add(fp, sp,
+ TypedFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@@ -2560,12 +2593,16 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// sp -> fp[-32]: padding
- STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
- STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
- STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
- STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
- STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
- STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kPaddingOffset);
+ STATIC_ASSERT((2 * kSystemPointerSize) ==
+ ExitFrameConstants::kCallerSPOffset);
+ STATIC_ASSERT((1 * kSystemPointerSize) ==
+ ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kSystemPointerSize) ==
+ ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-2 * kSystemPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-3 * kSystemPointerSize) == ExitFrameConstants::kCodeOffset);
+ STATIC_ASSERT((-4 * kSystemPointerSize) ==
+ ExitFrameConstants::kPaddingOffset);
// Save the frame pointer and context pointer in the top frame.
Mov(scratch,
@@ -2575,7 +2612,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Str(cp, MemOperand(scratch));
- STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
+ STATIC_ASSERT((-4 * kSystemPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
if (save_doubles) {
ExitFramePreserveFPRegs();
}
@@ -2692,7 +2730,7 @@ void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
- Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, type_reg, type);
}
@@ -2733,6 +2771,95 @@ void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index,
B(ne, if_not_equal);
}
+void MacroAssembler::JumpIfIsInRange(const Register& value,
+ unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireW();
+ Sub(scratch, value, Operand(lower_limit));
+ CompareAndBranch(scratch, Operand(higher_limit - lower_limit), ls,
+ on_in_range);
+ } else {
+ CompareAndBranch(value, Operand(higher_limit - lower_limit), ls,
+ on_in_range);
+ }
+}
+
+void TurboAssembler::LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand) {
+#ifdef V8_COMPRESS_POINTERS
+ DecompressTaggedPointer(destination, field_operand);
+#else
+ Ldr(destination, field_operand);
+#endif
+}
+
+void TurboAssembler::LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
+#ifdef V8_COMPRESS_POINTERS
+ DecompressAnyTagged(destination, field_operand);
+#else
+ Ldr(destination, field_operand);
+#endif
+}
+
+void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+ SmiUntag(dst, src);
+}
+
+void TurboAssembler::StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand) {
+#ifdef V8_COMPRESS_POINTERS
+ RecordComment("[ StoreTagged");
+ // Use temporary register to zero out and don't trash value register
+ UseScratchRegisterScope temps(this);
+ Register compressed_value = temps.AcquireX();
+ Uxtw(compressed_value, value);
+ Str(compressed_value, dst_field_operand);
+ RecordComment("]");
+#else
+ Str(value, dst_field_operand);
+#endif
+}
+
+void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressTaggedSigned");
+ // TODO(solanes): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
+ Ldr(destination, field_operand);
+ Sxtw(destination, destination);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressTaggedPointer");
+ // TODO(solanes): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
+ Ldr(destination, field_operand);
+ Add(destination, kRootRegister, Operand(destination, SXTW));
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressAnyTagged");
+ UseScratchRegisterScope temps(this);
+ // TODO(solanes): use Ldrsw instead of Ldr,SXTW once kTaggedSize is shrinked
+ Ldr(destination, field_operand);
+ // Branchlessly compute |masked_root|:
+ // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ Register masked_root = temps.AcquireX();
+ // Sign extend tag bit to entire register.
+ Sbfx(masked_root, destination, 0, kSmiTagSize);
+ And(masked_root, masked_root, kRootRegister);
+ // Now this add operation will either leave the value unchanged if it is a smi
+ // or add the isolate root if it is a heap object.
+ Add(destination, masked_root, Operand(destination, SXTW));
+ RecordComment("]");
+}
void MacroAssembler::CompareAndSplit(const Register& lhs,
const Operand& rhs,
@@ -2863,13 +2990,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so offset must be a multiple of kTaggedSize.
+ DCHECK(IsAligned(offset, kTaggedSize));
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
Label ok;
- Tst(scratch, kPointerSize - 1);
+ Tst(scratch, kTaggedSize - 1);
B(eq, &ok);
Abort(AbortReason::kUnalignedCellInWriteBarrier);
Bind(&ok);
@@ -2953,9 +3080,7 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(
descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
- Push(object, address);
-
- Pop(slot_parameter, object_parameter);
+ MovePair(object_parameter, object, slot_parameter, address);
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
@@ -2985,7 +3110,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- Ldr(temp, MemOperand(address));
+ LoadTaggedPointerField(temp, MemOperand(address));
Cmp(temp, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -3100,8 +3225,8 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
- Ldr(dst, NativeContextMemOperand());
- Ldr(dst, ContextMemOperand(dst, index));
+ LoadTaggedPointerField(dst, NativeContextMemOperand());
+ LoadTaggedPointerField(dst, ContextMemOperand(dst, index));
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index ba1885a248..ab8ac08511 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -213,6 +213,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Remove if not needed.
void Move(Register dst, Smi src);
+ // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
+ void MovePair(Register dst0, Register src0, Register dst1, Register src1);
+
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
@@ -1175,6 +1178,32 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // ---------------------------------------------------------------------------
+ // Pointer compression Support
+
+ // Loads a field containing a HeapObject and decompresses it if pointer
+ // compression is enabled.
+ void LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, const MemOperand& src);
+
+ // Compresses and stores tagged value to given on-heap location.
+ void StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand);
+
+ void DecompressTaggedSigned(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTaggedPointer(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressAnyTagged(const Register& destination,
+ const MemOperand& field_operand);
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1811,6 +1840,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(const Register& value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// Compare the contents of a register with an operand, and branch to true,
// false or fall through, depending on condition.
void CompareAndSplit(const Register& lhs,
@@ -1962,11 +1996,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const CPURegister& arg3 = NoCPUReg);
private:
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
// Try to represent a double as an int so that integer fast-paths may be
// used. Not every valid integer value is guaranteed to be caught.
// It supports both 32-bit and 64-bit integers depending whether 'as_int'
diff --git a/deps/v8/src/arm64/register-arm64.h b/deps/v8/src/arm64/register-arm64.h
index 77310213f2..008268de59 100644
--- a/deps/v8/src/arm64/register-arm64.h
+++ b/deps/v8/src/arm64/register-arm64.h
@@ -61,6 +61,12 @@ namespace internal {
V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \
V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31)
+#define VECTOR_REGISTERS(V) \
+ V(v0) V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
+ V(v8) V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
+
// Register d29 could be allocated, but we keep an even length list here, in
// order to make stack alignment easier for save and restore.
#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
@@ -710,8 +716,9 @@ class CPURegList {
#define kCallerSaved CPURegList::GetCallerSaved()
#define kCallerSavedV CPURegList::GetCallerSavedV()
-// Define a {RegisterName} method for {CPURegister}.
-DEFINE_REGISTER_NAMES(CPURegister, GENERAL_REGISTERS);
+// Define a {RegisterName} method for {Register} and {VRegister}.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = x0;
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index aa36de4afa..be2c6cdec6 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -58,7 +58,7 @@ TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
- Simulator::GlobalMonitor::Get);
+ Simulator::GlobalMonitor::Get)
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
@@ -252,9 +252,9 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
intptr_t new_sp = sp() - 2 * kXRegSize;
uintptr_t* alignment_slot =
reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
- memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
+ memcpy(alignment_slot, &kSlotsZapValue, kSystemPointerSize);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- memcpy(stack_slot, &address, kPointerSize);
+ memcpy(stack_slot, &address, kSystemPointerSize);
set_sp(new_sp);
return new_sp;
}
@@ -2278,7 +2278,8 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
unsigned reg_code = instr->Rd();
int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
: wreg(reg_code);
- new_xn_val = (prev_xn_val & ~(0xFFFFL << shift)) | shifted_imm16;
+ new_xn_val =
+ (prev_xn_val & ~(INT64_C(0xFFFF) << shift)) | shifted_imm16;
break;
}
case MOVZ_w:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 586d65b341..e4e3e09e45 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -860,13 +860,13 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
- DCHECK_EQ(sizeof(T), static_cast<unsigned>(kPointerSize));
+ DCHECK_EQ(sizeof(T), static_cast<unsigned>(kSystemPointerSize));
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
- DCHECK_EQ(sizeof(T), static_cast<unsigned>(kPointerSize));
+ DCHECK_EQ(sizeof(T), static_cast<unsigned>(kSystemPointerSize));
set_reg(31, value, Reg31IsStackPointer);
}
diff --git a/deps/v8/src/asan.h b/deps/v8/src/asan.h
index fc0add016e..0713392672 100644
--- a/deps/v8/src/asan.h
+++ b/deps/v8/src/asan.h
@@ -16,12 +16,10 @@
#else // !V8_USE_ADDRESS_SANITIZER
-#define ASAN_POISON_MEMORY_REGION(start, size) \
- static_assert( \
- (std::is_pointer<decltype(start)>::value || \
- std::is_same<v8::internal::Address, decltype(start)>::value) && \
- std::is_convertible<decltype(size), size_t>::value, \
- "static type violation")
+#define ASAN_POISON_MEMORY_REGION(start, size) \
+ static_assert(std::is_pointer<decltype(start)>::value && \
+ std::is_convertible<decltype(size), size_t>::value, \
+ "static type violation")
#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index c242c56389..a84c88546e 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -23,6 +23,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "src/unoptimized-compilation-info.h"
+#include "src/vector.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index df86bf5b9a..8352ec02e2 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -353,6 +353,8 @@ void AsmJsParser::ValidateModule() {
RECURSE(ValidateFunctionTable());
}
RECURSE(ValidateExport());
+ RECURSE(SkipSemicolon());
+ EXPECT_TOKEN('}');
// Check that all functions were eventually defined.
for (auto& info : global_var_info_) {
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index dd8392ddcf..202bac718b 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -11,6 +11,7 @@
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
#include "src/base/enum-set.h"
+#include "src/vector.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 448f8a77d3..f249f2c724 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -16,7 +16,7 @@ namespace {
// Cap number of identifiers to ensure we can assign both global and
// local ones a token id in the range of an int32_t.
static const int kMaxIdentifierCount = 0xF000000;
-};
+} // namespace
AsmJsScanner::AsmJsScanner(Utf16CharacterStream* stream)
: stream_(stream),
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 383d6f67fe..04567377bd 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -37,9 +37,9 @@
#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
#include "src/disassembler.h"
+#include "src/heap/heap-inl.h" // For MemoryAllocator. TODO(jkummerow): Drop.
#include "src/isolate.h"
#include "src/ostreams.h"
-#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/embedded-data.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
@@ -68,7 +68,7 @@ AssemblerOptions AssemblerOptions::Default(
const bool serializer =
isolate->serializer_enabled() || explicitly_support_serialization;
const bool generating_embedded_builtin =
- isolate->ShouldLoadConstantsFromRootList();
+ isolate->IsGeneratingEmbeddedBuiltins();
options.record_reloc_info_for_serialization = serializer;
options.enable_root_array_delta_access =
!serializer && !generating_embedded_builtin;
@@ -161,17 +161,6 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
AssemblerBase::~AssemblerBase() = default;
-void AssemblerBase::FlushICache(void* start, size_t size) {
- if (size == 0) return;
-
-#if defined(USE_SIMULATOR)
- base::MutexGuard lock_guard(Simulator::i_cache_mutex());
- Simulator::FlushICache(Simulator::i_cache(), start, size);
-#else
- CpuFeatures::FlushICache(start, size);
-#endif // USE_SIMULATOR
-}
-
void AssemblerBase::Print(Isolate* isolate) {
StdoutStream os;
v8::internal::Disassembler::Decode(isolate, &os, buffer_start_, pc_);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 69ab58cdb4..7efaf6a7da 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -247,6 +247,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
+ byte* buffer_start() const { return buffer_->start(); }
+ int buffer_size() const { return buffer_->size(); }
+ int instruction_size() const { return pc_offset(); }
+
// This function is called when code generation is aborted, so that
// the assembler could clean up internal data structures.
virtual void AbortedCodeGeneration() { }
@@ -264,11 +268,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
static const int kMinimalBufferSize = 4*KB;
- static void FlushICache(void* start, size_t size);
- static void FlushICache(Address start, size_t size) {
- return FlushICache(reinterpret_cast<void*>(start), size);
- }
-
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 114942f1d3..3655a5e599 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -16,7 +16,7 @@ namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey,
GetPerThreadAssertKey,
- base::Thread::CreateThreadLocalKey());
+ base::Thread::CreateThreadLocalKey())
} // namespace
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 0a41af7f6a..8d0ad5e0c0 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -133,11 +133,9 @@ typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
#ifdef DEBUG
-#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name
-#define DISALLOW_HEAP_ALLOCATION_REF(name) const DisallowHeapAllocation& name
+#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name;
#else
#define DISALLOW_HEAP_ALLOCATION(name)
-#define DISALLOW_HEAP_ALLOCATION_REF(name)
#endif
// Scope to introduce an exception to DisallowHeapAllocation.
@@ -232,6 +230,35 @@ typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
// Scope to introduce an exception to DisallowExceptions.
typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
AllowExceptions;
+
+// Explicit instantiation declarations.
+extern template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>;
+extern template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>;
+extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
+extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
+extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
+extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
+extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ false>;
+extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ true>;
+extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT,
+ false>;
+extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, false>;
+extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, true>;
+extern template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
+extern template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
+extern template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
+extern template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
+extern template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, false>;
+extern template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 21986789ba..b4836ff784 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -467,7 +467,7 @@ void AstTraversalVisitor<Subclass>::VisitCompareOperation(
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitThisFunction(ThisFunction* expr) {
+void AstTraversalVisitor<Subclass>::VisitThisExpression(ThisExpression* expr) {
PROCESS_EXPRESSION(expr);
}
@@ -555,7 +555,6 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
RECURSE_EXPRESSION(Visit(expr->home_object()));
}
@@ -563,7 +562,6 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperCallReference(
SuperCallReference* expr) {
PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
}
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 2a35097f9c..94d500c07d 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -124,6 +124,7 @@ bool AstRawString::Compare(void* a, void* b) {
DCHECK_EQ(lhs->Hash(), rhs->Hash());
if (lhs->length() != rhs->length()) return false;
+ if (lhs->length() == 0) return true;
const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data();
size_t length = rhs->length();
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 472527bebe..dd557f5ac9 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -202,10 +202,12 @@ class AstBigInt {
F(await, "await") \
F(bigint, "bigint") \
F(boolean, "boolean") \
+ F(computed, "<computed>") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
F(dot, ".") \
+ F(dot_default, ".default") \
F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
@@ -235,7 +237,6 @@ class AstBigInt {
F(return, "return") \
F(set, "set") \
F(set_space, "set ") \
- F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
F(target, "target") \
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 1c1802d602..f70579bd69 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -23,6 +23,7 @@
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -155,8 +156,8 @@ VariableProxy::VariableProxy(Variable* var, int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(var->raw_name()),
next_unresolved_(nullptr) {
- bit_field_ |= IsThisField::encode(var->is_this()) |
- IsAssignedField::encode(false) |
+ DCHECK(!var->is_this());
+ bit_field_ |= IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
HoleCheckModeField::encode(HoleCheckMode::kElided);
BindTo(var);
@@ -171,7 +172,7 @@ VariableProxy::VariableProxy(const VariableProxy* copy_from)
}
void VariableProxy::BindTo(Variable* var) {
- DCHECK((is_this() && var->is_this()) || raw_name() == var->raw_name());
+ DCHECK_EQ(raw_name(), var->raw_name());
set_var(var);
set_is_resolved();
var->set_is_used();
@@ -213,6 +214,18 @@ bool FunctionLiteral::AllowsLazyCompilation() {
return scope()->AllowsLazyCompilation();
}
+bool FunctionLiteral::SafeToSkipArgumentsAdaptor() const {
+ // TODO(bmeurer,verwaest): The --fast_calls_with_arguments_mismatches
+ // is mostly here for checking the real-world impact of the calling
+ // convention. There's not really a point in turning off this flag
+ // otherwise, so we should remove it at some point, when we're done
+ // with the experiments (https://crbug.com/v8/8895).
+ return FLAG_fast_calls_with_arguments_mismatches &&
+ language_mode() == LanguageMode::kStrict &&
+ scope()->arguments() == nullptr &&
+ scope()->rest_parameter() == nullptr;
+}
+
Handle<String> FunctionLiteral::name(Isolate* isolate) const {
return raw_name_ ? raw_name_->string() : isolate->factory()->empty_string();
}
@@ -457,15 +470,10 @@ void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
has_seen_proto = true;
continue;
}
- if (property->is_computed_name()) {
- continue;
- }
+ if (property->is_computed_name()) continue;
Literal* key = property->key()->AsLiteral();
-
- if (!key->IsPropertyName()) {
- index_keys++;
- }
+ if (!key->IsPropertyName()) index_keys++;
}
Handle<ObjectBoilerplateDescription> boilerplate_description =
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 4f9f083d12..80f76bd6e4 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -11,6 +11,7 @@
#include "src/ast/modules.h"
#include "src/ast/variables.h"
#include "src/bailout-reason.h"
+#include "src/base/threaded-list.h"
#include "src/globals.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -100,7 +101,7 @@ namespace internal {
V(SuperCallReference) \
V(SuperPropertyReference) \
V(TemplateLiteral) \
- V(ThisFunction) \
+ V(ThisExpression) \
V(Throw) \
V(UnaryOperation) \
V(VariableProxy) \
@@ -483,26 +484,14 @@ inline NestedVariableDeclaration* VariableDeclaration::AsNested() {
class FunctionDeclaration final : public Declaration {
public:
FunctionLiteral* fun() const { return fun_; }
- bool declares_sloppy_block_function() const {
- return DeclaresSloppyBlockFunction::decode(bit_field_);
- }
private:
friend class AstNodeFactory;
- class DeclaresSloppyBlockFunction
- : public BitField<bool, Declaration::kNextBitFieldIndex, 1> {};
-
- FunctionDeclaration(FunctionLiteral* fun, bool declares_sloppy_block_function,
- int pos)
- : Declaration(pos, kFunctionDeclaration), fun_(fun) {
- bit_field_ = DeclaresSloppyBlockFunction::update(
- bit_field_, declares_sloppy_block_function);
- }
+ FunctionDeclaration(FunctionLiteral* fun, int pos)
+ : Declaration(pos, kFunctionDeclaration), fun_(fun) {}
FunctionLiteral* fun_;
-
- static const uint8_t kNextBitFieldIndex = DeclaresSloppyBlockFunction::kNext;
};
@@ -977,14 +966,30 @@ class SloppyBlockFunctionStatement final : public Statement {
public:
Statement* statement() const { return statement_; }
void set_statement(Statement* statement) { statement_ = statement; }
+ Scope* scope() const { return var_->scope(); }
+ Variable* var() const { return var_; }
+ Token::Value init() const { return TokenField::decode(bit_field_); }
+ const AstRawString* name() const { return var_->raw_name(); }
+ SloppyBlockFunctionStatement** next() { return &next_; }
private:
friend class AstNodeFactory;
- SloppyBlockFunctionStatement(int pos, Statement* statement)
- : Statement(pos, kSloppyBlockFunctionStatement), statement_(statement) {}
+ class TokenField
+ : public BitField<Token::Value, Statement::kNextBitFieldIndex, 8> {};
+
+ SloppyBlockFunctionStatement(int pos, Variable* var, Token::Value init,
+ Statement* statement)
+ : Statement(pos, kSloppyBlockFunctionStatement),
+ var_(var),
+ statement_(statement),
+ next_(nullptr) {
+ bit_field_ = TokenField::update(bit_field_, init);
+ }
+ Variable* var_;
Statement* statement_;
+ SloppyBlockFunctionStatement* next_;
};
@@ -1495,11 +1500,15 @@ class ArrayLiteral final : public AggregateLiteral {
enum class HoleCheckMode { kRequired, kElided };
+class ThisExpression final : public Expression {
+ private:
+ friend class AstNodeFactory;
+ ThisExpression() : Expression(kNoSourcePosition, kThisExpression) {}
+};
+
class VariableProxy final : public Expression {
public:
- bool IsValidReferenceExpression() const {
- return !is_this() && !is_new_target();
- }
+ bool IsValidReferenceExpression() const { return !is_new_target(); }
Handle<String> name() const { return raw_name()->string(); }
const AstRawString* raw_name() const {
@@ -1520,8 +1529,6 @@ class VariableProxy final : public Expression {
return Scanner::Location(position(), position() + raw_name()->length());
}
- bool is_this() const { return IsThisField::decode(bit_field_); }
-
bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true);
@@ -1594,8 +1601,8 @@ class VariableProxy final : public Expression {
: Expression(start_position, kVariableProxy),
raw_name_(name),
next_unresolved_(nullptr) {
- bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
- IsAssignedField::encode(false) |
+ DCHECK_NE(THIS_VARIABLE, variable_kind);
+ bit_field_ |= IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
IsRemovedFromUnresolvedField::encode(false) |
HoleCheckModeField::encode(HoleCheckMode::kElided);
@@ -1603,9 +1610,8 @@ class VariableProxy final : public Expression {
explicit VariableProxy(const VariableProxy* copy_from);
- class IsThisField : public BitField<bool, Expression::kNextBitFieldIndex, 1> {
- };
- class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
+ class IsAssignedField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
class IsRemovedFromUnresolvedField
: public BitField<bool, IsResolvedField::kNext, 1> {};
@@ -2190,8 +2196,6 @@ class FunctionLiteral final : public Expression {
kWrapped,
};
- enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
-
enum ParameterFlag : uint8_t {
kNoDuplicateParameters,
kHasDuplicateParameters
@@ -2226,7 +2230,7 @@ class FunctionLiteral final : public Expression {
}
bool is_oneshot_iife() const { return OneshotIIFEBit::decode(bit_field_); }
bool is_toplevel() const {
- return function_literal_id() == FunctionLiteral::kIdTypeTopLevel;
+ return function_literal_id() == kFunctionLiteralIdTopLevel;
}
bool is_wrapped() const { return function_type() == kWrapped; }
LanguageMode language_mode() const;
@@ -2251,6 +2255,18 @@ class FunctionLiteral final : public Expression {
return false;
}
+ // We can safely skip the arguments adaptor frame setup even
+ // in case of arguments mismatches for strict mode functions,
+ // as long as there's
+ //
+ // 1. no use of the arguments object (either explicitly or
+ // potentially implicitly via a direct eval() call), and
+ // 2. rest parameters aren't being used in the function.
+ //
+ // See http://bit.ly/v8-faster-calls-with-arguments-mismatch
+ // for the details here (https://crbug.com/v8/8895).
+ bool SafeToSkipArgumentsAdaptor() const;
+
// Returns either name or inferred name as a cstring.
std::unique_ptr<char[]> GetDebugName() const;
@@ -2550,56 +2566,41 @@ class NativeFunctionLiteral final : public Expression {
};
-class ThisFunction final : public Expression {
- private:
- friend class AstNodeFactory;
- explicit ThisFunction(int pos) : Expression(pos, kThisFunction) {}
-};
-
-
class SuperPropertyReference final : public Expression {
public:
- VariableProxy* this_var() const { return this_var_; }
Expression* home_object() const { return home_object_; }
private:
friend class AstNodeFactory;
- SuperPropertyReference(VariableProxy* this_var, Expression* home_object,
- int pos)
- : Expression(pos, kSuperPropertyReference),
- this_var_(this_var),
- home_object_(home_object) {
- DCHECK(this_var->is_this());
+ // We take in ThisExpression* only as a proof that it was accessed.
+ SuperPropertyReference(Expression* home_object, int pos)
+ : Expression(pos, kSuperPropertyReference), home_object_(home_object) {
DCHECK(home_object->IsProperty());
}
- VariableProxy* this_var_;
Expression* home_object_;
};
class SuperCallReference final : public Expression {
public:
- VariableProxy* this_var() const { return this_var_; }
VariableProxy* new_target_var() const { return new_target_var_; }
VariableProxy* this_function_var() const { return this_function_var_; }
private:
friend class AstNodeFactory;
- SuperCallReference(VariableProxy* this_var, VariableProxy* new_target_var,
+ // We take in ThisExpression* only as a proof that it was accessed.
+ SuperCallReference(VariableProxy* new_target_var,
VariableProxy* this_function_var, int pos)
: Expression(pos, kSuperCallReference),
- this_var_(this_var),
new_target_var_(new_target_var),
this_function_var_(this_function_var) {
- DCHECK(this_var->is_this());
DCHECK(new_target_var->raw_name()->IsOneByteEqualTo(".new.target"));
DCHECK(this_function_var->raw_name()->IsOneByteEqualTo(".this_function"));
}
- VariableProxy* this_var_;
VariableProxy* new_target_var_;
VariableProxy* this_function_var_;
};
@@ -2780,6 +2781,7 @@ class AstNodeFactory final {
: zone_(zone),
ast_value_factory_(ast_value_factory),
empty_statement_(new (zone) class EmptyStatement()),
+ this_expression_(new (zone) class ThisExpression()),
failure_expression_(new (zone) class FailureExpression()) {}
AstNodeFactory* ast_node_factory() { return this; }
@@ -2794,10 +2796,8 @@ class AstNodeFactory final {
return new (zone_) NestedVariableDeclaration(scope, pos);
}
- FunctionDeclaration* NewFunctionDeclaration(FunctionLiteral* fun,
- bool is_sloppy_block_function,
- int pos) {
- return new (zone_) FunctionDeclaration(fun, is_sloppy_block_function, pos);
+ FunctionDeclaration* NewFunctionDeclaration(FunctionLiteral* fun, int pos) {
+ return new (zone_) FunctionDeclaration(fun, pos);
}
Block* NewBlock(int capacity, bool ignore_completion_value) {
@@ -2936,12 +2936,18 @@ class AstNodeFactory final {
return empty_statement_;
}
+ class ThisExpression* ThisExpression() {
+ return this_expression_;
+ }
+
class FailureExpression* FailureExpression() {
return failure_expression_;
}
- SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(int pos) {
- return new (zone_) SloppyBlockFunctionStatement(pos, EmptyStatement());
+ SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
+ int pos, Variable* var, Token::Value init) {
+ return new (zone_)
+ SloppyBlockFunctionStatement(pos, var, init, EmptyStatement());
}
CaseClause* NewCaseClause(Expression* label,
@@ -3143,6 +3149,8 @@ class AstNodeFactory final {
Expression* value,
int pos) {
DCHECK(Token::IsAssignmentOp(op));
+ DCHECK_NOT_NULL(target);
+ DCHECK_NOT_NULL(value);
if (op != Token::INIT && target->IsVariableProxy()) {
target->AsVariableProxy()->set_is_assigned();
@@ -3206,7 +3214,7 @@ class AstNodeFactory final {
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false,
- FunctionLiteral::kIdTypeTopLevel);
+ kFunctionLiteralIdTopLevel);
}
ClassLiteral::Property* NewClassLiteralProperty(
@@ -3242,22 +3250,16 @@ class AstNodeFactory final {
return new (zone_) DoExpression(block, result, pos);
}
- ThisFunction* NewThisFunction(int pos) {
- return new (zone_) ThisFunction(pos);
- }
-
- SuperPropertyReference* NewSuperPropertyReference(VariableProxy* this_var,
- Expression* home_object,
+ SuperPropertyReference* NewSuperPropertyReference(Expression* home_object,
int pos) {
- return new (zone_) SuperPropertyReference(this_var, home_object, pos);
+ return new (zone_) SuperPropertyReference(home_object, pos);
}
- SuperCallReference* NewSuperCallReference(VariableProxy* this_var,
- VariableProxy* new_target_var,
+ SuperCallReference* NewSuperCallReference(VariableProxy* new_target_var,
VariableProxy* this_function_var,
int pos) {
return new (zone_)
- SuperCallReference(this_var, new_target_var, this_function_var, pos);
+ SuperCallReference(new_target_var, this_function_var, pos);
}
EmptyParentheses* NewEmptyParentheses(int pos) {
@@ -3295,6 +3297,7 @@ class AstNodeFactory final {
Zone* zone_;
AstValueFactory* ast_value_factory_;
class EmptyStatement* empty_statement_;
+ class ThisExpression* this_expression_;
class FailureExpression* failure_expression_;
};
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 0f66ac91ec..d1be965a4a 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -91,20 +91,11 @@ void ModuleDescriptor::AddStarExport(const AstRawString* module_request,
}
namespace {
-
Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
return (s == nullptr)
? Handle<Object>::cast(isolate->factory()->undefined_value())
: Handle<Object>::cast(s->string());
}
-
-const AstRawString* FromStringOrUndefined(Isolate* isolate,
- AstValueFactory* avfactory,
- Handle<Object> object) {
- if (object->IsUndefined(isolate)) return nullptr;
- return avfactory->GetString(Handle<String>::cast(object));
-}
-
} // namespace
Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
@@ -117,21 +108,6 @@ Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
location.beg_pos, location.end_pos);
}
-ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
- Isolate* isolate, AstValueFactory* avfactory,
- Handle<ModuleInfoEntry> entry) {
- Entry* result = new (avfactory->zone()) Entry(Scanner::Location::invalid());
- result->export_name = FromStringOrUndefined(
- isolate, avfactory, handle(entry->export_name(), isolate));
- result->local_name = FromStringOrUndefined(
- isolate, avfactory, handle(entry->local_name(), isolate));
- result->import_name = FromStringOrUndefined(
- isolate, avfactory, handle(entry->import_name(), isolate));
- result->module_request = entry->module_request();
- result->cell_index = entry->cell_index();
- return result;
-}
-
Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
Zone* zone) const {
// We serialize regular exports in a way that lets us later iterate over their
@@ -183,29 +159,6 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
return result;
}
-void ModuleDescriptor::DeserializeRegularExports(
- Isolate* isolate, AstValueFactory* avfactory,
- Handle<ModuleInfo> module_info) {
- for (int i = 0, count = module_info->RegularExportCount(); i < count; ++i) {
- Handle<String> local_name(module_info->RegularExportLocalName(i), isolate);
- int cell_index = module_info->RegularExportCellIndex(i);
- Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
- isolate);
-
- for (int j = 0, length = export_names->length(); j < length; ++j) {
- Handle<String> export_name(String::cast(export_names->get(j)), isolate);
-
- Entry* entry =
- new (avfactory->zone()) Entry(Scanner::Location::invalid());
- entry->local_name = avfactory->GetString(local_name);
- entry->export_name = avfactory->GetString(export_name);
- entry->cell_index = cell_index;
-
- AddRegularExport(entry);
- }
- }
-}
-
void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
Entry* entry = it->second;
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 44e86dce42..ebc3e3a288 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -107,12 +107,7 @@ class ModuleDescriptor : public ZoneObject {
module_request(-1),
cell_index(0) {}
- // (De-)serialization support.
- // Note that the location value is not preserved as it's only needed by the
- // parser. (A Deserialize'd entry has an invalid location.)
Handle<ModuleInfoEntry> Serialize(Isolate* isolate) const;
- static Entry* Deserialize(Isolate* isolate, AstValueFactory* avfactory,
- Handle<ModuleInfoEntry> entry);
};
enum CellIndexKind { kInvalid, kExport, kImport };
@@ -191,8 +186,6 @@ class ModuleDescriptor : public ZoneObject {
Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
Zone* zone) const;
- void DeserializeRegularExports(Isolate* isolate, AstValueFactory* avfactory,
- Handle<ModuleInfo> module_info);
private:
ModuleRequestMap module_requests_;
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index a53d07064d..c7f6e3d9f0 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -12,6 +12,7 @@
#include "src/globals.h"
#include "src/objects-inl.h"
#include "src/string-builder-inl.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -500,8 +501,7 @@ void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print(")");
}
-void CallPrinter::VisitThisFunction(ThisFunction* node) {}
-
+void CallPrinter::VisitThisExpression(ThisExpression* node) { Print("this"); }
void CallPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {}
@@ -1391,11 +1391,10 @@ void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Visit(node->argument());
}
-void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent(this, "THIS-FUNCTION", node->position());
+void AstPrinter::VisitThisExpression(ThisExpression* node) {
+ IndentedScope indent(this, "THIS-EXPRESSION", node->position());
}
-
void AstPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE", node->position());
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index e6f2766915..e1efdbfb88 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
+#include "src/function-kind.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 28869cd94a..e625865a11 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -83,28 +83,6 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
return nullptr;
}
-void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
- if (statement_ != nullptr) {
- statement_->set_statement(statement);
- }
-}
-
-SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
- : ZoneHashMap(8, ZoneAllocationPolicy(zone)), count_(0) {}
-
-void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
- Scope* scope,
- SloppyBlockFunctionStatement* statement) {
- auto* delegate = new (zone) Delegate(scope, statement, count_++);
- // AstRawStrings are unambiguous, i.e., the same string is always represented
- // by the same AstRawString*.
- Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
- ZoneAllocationPolicy(zone));
- delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
- p->value = delegate;
-}
-
// ----------------------------------------------------------------------------
// Implementation of Scope
@@ -132,10 +110,8 @@ DeclarationScope::DeclarationScope(Zone* zone,
: Scope(zone), function_kind_(kNormalFunction), params_(4, zone) {
DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
SetDefaults();
-
- // Make sure that if we don't find the global 'this', it won't be declared as
- // a regular dynamic global by predeclaring it with the right variable kind.
- DeclareDynamicGlobal(ast_value_factory->this_string(), THIS_VARIABLE, this);
+ receiver_ = DeclareDynamicGlobal(ast_value_factory->this_string(),
+ THIS_VARIABLE, this);
}
DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
@@ -149,59 +125,19 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
}
ModuleScope::ModuleScope(DeclarationScope* script_scope,
- AstValueFactory* ast_value_factory)
- : DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
- kModule) {
- Zone* zone = ast_value_factory->zone();
- module_descriptor_ = new (zone) ModuleDescriptor(zone);
+ AstValueFactory* avfactory)
+ : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule),
+ module_descriptor_(new (avfactory->zone())
+ ModuleDescriptor(avfactory->zone())) {
set_language_mode(LanguageMode::kStrict);
- DeclareThis(ast_value_factory);
+ DeclareThis(avfactory);
}
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
- : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
- Zone* zone = avfactory->zone();
- Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
-
+ : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info),
+ module_descriptor_(nullptr) {
set_language_mode(LanguageMode::kStrict);
- module_descriptor_ = new (zone) ModuleDescriptor(zone);
-
- // Deserialize special exports.
- Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> serialized_entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- module_descriptor_->AddSpecialExport(
- ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
- serialized_entry),
- avfactory->zone());
- }
-
- // Deserialize regular exports.
- module_descriptor_->DeserializeRegularExports(isolate, avfactory,
- module_info);
-
- // Deserialize namespace imports.
- Handle<FixedArray> namespace_imports(module_info->namespace_imports(),
- isolate);
- for (int i = 0, n = namespace_imports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> serialized_entry(
- ModuleInfoEntry::cast(namespace_imports->get(i)), isolate);
- module_descriptor_->AddNamespaceImport(
- ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
- serialized_entry),
- avfactory->zone());
- }
-
- // Deserialize regular imports.
- Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
- for (int i = 0, n = regular_imports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> serialized_entry(
- ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
- module_descriptor_->AddRegularImport(ModuleDescriptor::Entry::Deserialize(
- isolate, avfactory, serialized_entry));
- }
}
Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
@@ -262,8 +198,11 @@ void DeclarationScope::SetDefaults() {
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
+ has_checked_syntax_ = false;
+ has_this_reference_ = false;
+ has_this_declaration_ =
+ (is_function_scope() && !is_arrow_scope()) || is_module_scope();
has_rest_ = false;
- sloppy_block_function_map_ = nullptr;
receiver_ = nullptr;
new_target_ = nullptr;
function_ = nullptr;
@@ -319,10 +258,6 @@ bool Scope::HasSimpleParameters() {
return !scope->is_function_scope() || scope->has_simple_parameters();
}
-bool DeclarationScope::ShouldEagerCompile() const {
- return force_eager_compilation_ || should_eager_compile_;
-}
-
void DeclarationScope::set_should_eager_compile() {
should_eager_compile_ = !was_lazily_parsed_;
}
@@ -359,15 +294,16 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
Scope* outer_scope = nullptr;
while (!scope_info.is_null()) {
if (scope_info->scope_type() == WITH_SCOPE) {
- // For scope analysis, debug-evaluate is equivalent to a with scope.
- outer_scope =
- new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate));
-
- // TODO(yangguo): Remove once debug-evaluate properly keeps track of the
- // function scope in which we are evaluating.
if (scope_info->IsDebugEvaluateScope()) {
+ outer_scope = new (zone)
+ DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
outer_scope->set_is_debug_evaluate_scope();
+ } else {
+ // For scope analysis, debug-evaluate is equivalent to a with scope.
+ outer_scope =
+ new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate));
}
+
} else if (scope_info->scope_type() == SCRIPT_SCOPE) {
// If we reach a script scope, it's the outermost scope. Install the
// scope info of this script context onto the existing script scope to
@@ -455,19 +391,9 @@ const ModuleScope* Scope::AsModuleScope() const {
return static_cast<const ModuleScope*>(this);
}
-int Scope::num_parameters() const {
- return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
-}
-
void DeclarationScope::DeclareSloppyBlockFunction(
- const AstRawString* name, Scope* scope,
- SloppyBlockFunctionStatement* statement) {
- if (sloppy_block_function_map_ == nullptr) {
- sloppy_block_function_map_ =
- new (zone()->New(sizeof(SloppyBlockFunctionMap)))
- SloppyBlockFunctionMap(zone());
- }
- sloppy_block_function_map_->Declare(zone(), name, scope, statement);
+ SloppyBlockFunctionStatement* sloppy_block_function) {
+ sloppy_block_functions_.Add(sloppy_block_function);
}
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
@@ -477,8 +403,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
- SloppyBlockFunctionMap* map = sloppy_block_function_map();
- if (map == nullptr) return;
+ if (sloppy_block_functions_.is_empty()) return;
// In case of complex parameters the current scope is the body scope and the
// parameters are stored in the outer scope.
@@ -486,14 +411,17 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(parameter_scope->is_function_scope() || is_eval_scope() ||
is_script_scope());
- // The declarations need to be added in the order they were seen,
- // so accumulate declared names sorted by index.
- ZoneMap<int, const AstRawString*> names_to_declare(zone());
+ DeclarationScope* decl_scope = this;
+ while (decl_scope->is_eval_scope()) {
+ decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
+ }
+ Scope* outer_scope = decl_scope->outer_scope();
// For each variable which is used as a function declaration in a sloppy
// block,
- for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
- const AstRawString* name = static_cast<AstRawString*>(p->key);
+ for (SloppyBlockFunctionStatement* sloppy_block_function :
+ sloppy_block_functions_) {
+ const AstRawString* name = sloppy_block_function->name();
// If the variable wouldn't conflict with a lexical declaration
// or parameter,
@@ -504,79 +432,52 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
continue;
}
- bool declaration_queued = false;
-
- // Write in assignments to var for each block-scoped function declaration
- auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
-
- DeclarationScope* decl_scope = this;
- while (decl_scope->is_eval_scope()) {
- decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
- }
- Scope* outer_scope = decl_scope->outer_scope();
-
- for (SloppyBlockFunctionMap::Delegate* delegate = delegates;
- delegate != nullptr; delegate = delegate->next()) {
- // Check if there's a conflict with a lexical declaration
- Scope* query_scope = delegate->scope()->outer_scope();
- Variable* var = nullptr;
- bool should_hoist = true;
-
- // Note that we perform this loop for each delegate named 'name',
- // which may duplicate work if those delegates share scopes.
- // It is not sufficient to just do a Lookup on query_scope: for
- // example, that does not prevent hoisting of the function in
- // `{ let e; try {} catch (e) { function e(){} } }`
- do {
- var = query_scope->LookupInScopeOrScopeInfo(name);
- if (var != nullptr && IsLexicalVariableMode(var->mode())) {
- should_hoist = false;
- break;
- }
- query_scope = query_scope->outer_scope();
- } while (query_scope != outer_scope);
-
- if (!should_hoist) continue;
-
- if (!declaration_queued) {
- declaration_queued = true;
- names_to_declare.insert({delegate->index(), name});
- }
-
- if (factory) {
- DCHECK(!is_being_lazily_parsed_);
- int pos = delegate->position();
- Assignment* assignment = factory->NewAssignment(
- Token::ASSIGN, NewUnresolved(factory, name, pos),
- delegate->scope()->NewUnresolved(factory, name, pos), pos);
- assignment->set_lookup_hoisting_mode(LookupHoistingMode::kLegacySloppy);
- Statement* statement = factory->NewExpressionStatement(assignment, pos);
- delegate->set_statement(statement);
+ // Check if there's a conflict with a lexical declaration
+ Scope* query_scope = sloppy_block_function->scope()->outer_scope();
+ Variable* var = nullptr;
+ bool should_hoist = true;
+
+ // It is not sufficient to just do a Lookup on query_scope: for
+ // example, that does not prevent hoisting of the function in
+ // `{ let e; try {} catch (e) { function e(){} } }`
+ do {
+ var = query_scope->LookupInScopeOrScopeInfo(name);
+ if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+ should_hoist = false;
+ break;
}
- }
- }
+ query_scope = query_scope->outer_scope();
+ } while (query_scope != outer_scope);
- if (names_to_declare.empty()) return;
+ if (!should_hoist) continue;
- for (const auto& index_and_name : names_to_declare) {
- const AstRawString* name = index_and_name.second;
if (factory) {
DCHECK(!is_being_lazily_parsed_);
- VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
- auto declaration = factory->NewVariableDeclaration(kNoSourcePosition);
+ int pos = sloppy_block_function->position();
+ bool ok = true;
bool was_added;
+ auto declaration = factory->NewVariableDeclaration(pos);
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
- bool ok = true;
- DeclareVariable(declaration, proxy, VariableMode::kVar, NORMAL_VARIABLE,
- Variable::DefaultInitializationFlag(VariableMode::kVar),
- &was_added, nullptr, &ok);
+ Variable* var = DeclareVariable(
+ declaration, name, pos, VariableMode::kVar, NORMAL_VARIABLE,
+ Variable::DefaultInitializationFlag(VariableMode::kVar), &was_added,
+ nullptr, &ok);
DCHECK(ok);
+ VariableProxy* source =
+ factory->NewVariableProxy(sloppy_block_function->var());
+ VariableProxy* target = factory->NewVariableProxy(var);
+ Assignment* assignment = factory->NewAssignment(
+ sloppy_block_function->init(), target, source, pos);
+ assignment->set_lookup_hoisting_mode(LookupHoistingMode::kLegacySloppy);
+ Statement* statement = factory->NewExpressionStatement(assignment, pos);
+ sloppy_block_function->set_statement(statement);
} else {
DCHECK(is_being_lazily_parsed_);
bool was_added;
Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added);
- var->set_maybe_assigned();
+ if (sloppy_block_function->init() == Token::ASSIGN)
+ var->set_maybe_assigned();
}
}
}
@@ -605,8 +506,7 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
// 1) top-level code,
// 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved.
- DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
- scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
+ DCHECK(scope->is_script_scope() || scope->outer_scope()->is_script_scope() ||
scope->outer_scope()->already_resolved_);
// The outer scope is never lazy.
@@ -633,20 +533,16 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
}
void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
- DCHECK(!already_resolved_);
- DCHECK(is_declaration_scope());
DCHECK(has_this_declaration());
bool derived_constructor = IsDerivedConstructor(function_kind_);
- bool was_added;
- Variable* var =
- Declare(zone(), ast_value_factory->this_string(),
- derived_constructor ? VariableMode::kConst : VariableMode::kVar,
- THIS_VARIABLE,
- derived_constructor ? kNeedsInitialization : kCreatedInitialized,
- kNotAssigned, &was_added);
- DCHECK(was_added);
- receiver_ = var;
+
+ receiver_ = new (zone())
+ Variable(this, ast_value_factory->this_string(),
+ derived_constructor ? VariableMode::kConst : VariableMode::kVar,
+ THIS_VARIABLE,
+ derived_constructor ? kNeedsInitialization : kCreatedInitialized,
+ kNotAssigned);
}
void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
@@ -844,8 +740,9 @@ void Scope::ReplaceOuterScope(Scope* outer) {
Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
DCHECK(!scope_info_.is_null());
DCHECK_NULL(cache->variables_.Lookup(name));
+ DisallowHeapAllocation no_gc;
- Handle<String> name_handle = name->string();
+ String name_handle = *name->string();
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
// it's ok to get the Handle<String> here.
@@ -859,12 +756,12 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
{
location = VariableLocation::CONTEXT;
- index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+ index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
&init_flag, &maybe_assigned_flag);
found = index >= 0;
}
- if (!found && scope_type() == MODULE_SCOPE) {
+ if (!found && is_module_scope()) {
location = VariableLocation::MODULE;
index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
&maybe_assigned_flag);
@@ -872,7 +769,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
}
if (!found) {
- index = scope_info_->FunctionContextSlotIndex(*name_handle);
+ index = scope_info_->FunctionContextSlotIndex(name_handle);
if (index < 0) return nullptr; // Nowhere found.
Variable* var = AsDeclarationScope()->DeclareFunctionVar(name, cache);
DCHECK_EQ(VariableMode::kConst, var->mode());
@@ -880,18 +777,14 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
return cache->variables_.Lookup(name);
}
- VariableKind kind = NORMAL_VARIABLE;
- if (location == VariableLocation::CONTEXT &&
- index == scope_info_->ReceiverContextSlotIndex()) {
- kind = THIS_VARIABLE;
+ if (!is_module_scope()) {
+ DCHECK_NE(index, scope_info_->ReceiverContextSlotIndex());
}
- // TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
- // ARGUMENTS bindings as their corresponding VariableKind.
bool was_added;
Variable* var =
- cache->variables_.Declare(zone(), this, name, mode, kind, init_flag,
- maybe_assigned_flag, &was_added);
+ cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+ init_flag, maybe_assigned_flag, &was_added);
DCHECK(was_added);
var->AllocateTo(location, index);
return var;
@@ -953,15 +846,29 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
mode == VariableMode::kVar || mode == VariableMode::kLet ||
mode == VariableMode::kConst);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
- return Declare(zone(), name, mode, kind, init_flag, kNotAssigned, was_added);
+ Variable* var =
+ Declare(zone(), name, mode, kind, init_flag, kNotAssigned, was_added);
+
+ // Pessimistically assume that top-level variables will be assigned and used.
+ //
+ // Top-level variables in a script can be accessed by other scripts or even
+ // become global properties. While this does not apply to top-level variables
+ // in a module (assuming they are not exported), we must still mark these as
+ // assigned because they might be accessed by a lazily parsed top-level
+ // function, which, for efficiency, we preparse without variable tracking.
+ if (is_script_scope() || is_module_scope()) {
+ if (mode != VariableMode::kConst) var->set_maybe_assigned();
+ var->set_is_used();
+ }
+
+ return var;
}
-// TODO(leszeks): Avoid passing the proxy into here, passing the raw_name alone
-// instead.
Variable* Scope::DeclareVariable(
- Declaration* declaration, VariableProxy* proxy, VariableMode mode,
- VariableKind kind, InitializationFlag init, bool* was_added,
- bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
+ Declaration* declaration, const AstRawString* name, int pos,
+ VariableMode mode, VariableKind kind, InitializationFlag init,
+ bool* was_added, bool* sloppy_mode_block_scope_function_redefinition,
+ bool* ok) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
@@ -969,7 +876,7 @@ Variable* Scope::DeclareVariable(
if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
- declaration, proxy, mode, kind, init, was_added,
+ declaration, name, pos, mode, kind, init, was_added,
sloppy_mode_block_scope_function_redefinition, ok);
}
DCHECK(!is_catch_scope());
@@ -977,19 +884,7 @@ Variable* Scope::DeclareVariable(
DCHECK(is_declaration_scope() ||
(IsLexicalVariableMode(mode) && is_block_scope()));
- DCHECK_NOT_NULL(proxy->raw_name());
- const AstRawString* name = proxy->raw_name();
-
- // Pessimistically assume that top-level variables will be assigned.
- //
- // Top-level variables in a script can be accessed by other scripts or even
- // become global properties. While this does not apply to top-level variables
- // in a module (assuming they are not exported), we must still mark these as
- // assigned because they might be accessed by a lazily parsed top-level
- // function, which, for efficiency, we preparse without variable tracking.
- if (is_script_scope() || is_module_scope()) {
- if (mode != VariableMode::kConst) proxy->set_is_assigned();
- }
+ DCHECK_NOT_NULL(name);
Variable* var = LookupLocal(name);
// Declare the variable in the declaration scope.
@@ -1002,7 +897,9 @@ Variable* Scope::DeclareVariable(
// The proxy is bound to a lookup variable to force a dynamic declaration
// using the DeclareEvalVar or DeclareEvalFunction runtime functions.
DCHECK_EQ(NORMAL_VARIABLE, kind);
- var = NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ var = NonLocal(name, VariableMode::kDynamic);
+ // Mark the var as used in case anyone outside the eval wants to use it.
+ var->set_is_used();
} else {
// Declare the name.
var = DeclareLocal(name, mode, kind, was_added, init);
@@ -1029,16 +926,9 @@ Variable* Scope::DeclareVariable(
// In harmony we treat re-declarations as early errors. See ES5 16 for a
// definition of early errors.
//
- // Allow duplicate function decls for web compat, see bug 4693. If the
- // duplication is allowed, then the var will show up in the
- // SloppyBlockFunctionMap.
- SloppyBlockFunctionMap* map =
- GetDeclarationScope()->sloppy_block_function_map();
- *ok =
- map != nullptr && declaration->IsFunctionDeclaration() &&
- declaration->AsFunctionDeclaration()
- ->declares_sloppy_block_function() &&
- map->Lookup(const_cast<AstRawString*>(name), name->Hash()) != nullptr;
+ // Allow duplicate function decls for web compat, see bug 4693.
+ *ok = var->is_sloppy_block_function() &&
+ kind == SLOPPY_BLOCK_FUNCTION_VARIABLE;
*sloppy_mode_block_scope_function_redefinition = *ok;
}
}
@@ -1055,7 +945,6 @@ Variable* Scope::DeclareVariable(
// lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
decls_.Add(declaration);
declaration->set_var(var);
- proxy->BindTo(var);
return var;
}
@@ -1079,12 +968,16 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
Variable* var = DeclareLocal(name, mode, kind, was_added);
if (!*was_added) {
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())) {
- // Duplicate functions are allowed in the sloppy mode, but if this is not
- // a function declaration, it's an error. This is an error PreParser
- // hasn't previously detected.
- return nullptr;
+ if (!var->is_sloppy_block_function() ||
+ kind != SLOPPY_BLOCK_FUNCTION_VARIABLE) {
+ // Duplicate functions are allowed in the sloppy mode, but if this is
+ // not a function declaration, it's an error. This is an error PreParser
+ // hasn't previously detected.
+ return nullptr;
+ }
+ // Sloppy block function redefinition.
}
- if (mode == VariableMode::kVar) var->set_maybe_assigned();
+ var->set_maybe_assigned();
}
var->set_is_used();
return var;
@@ -1142,36 +1035,61 @@ Variable* Scope::NewTemporary(const AstRawString* name,
return var;
}
-Declaration* Scope::CheckConflictingVarDeclarations() {
+Declaration* DeclarationScope::CheckConflictingVarDeclarations() {
+ if (has_checked_syntax_) return nullptr;
for (Declaration* decl : decls_) {
// Lexical vs lexical conflicts within the same scope have already been
// captured in Parser::Declare. The only conflicts we still need to check
// are lexical vs nested var.
- Scope* current = nullptr;
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
- current = decl->AsVariableDeclaration()->AsNested()->scope();
- } else if (is_eval_scope() && is_sloppy(language_mode())) {
- if (IsLexicalVariableMode(decl->var()->mode())) continue;
- current = outer_scope_;
+ Scope* current = decl->AsVariableDeclaration()->AsNested()->scope();
+ DCHECK(decl->var()->mode() == VariableMode::kVar ||
+ decl->var()->mode() == VariableMode::kDynamic);
+ // Iterate through all scopes until the declaration scope.
+ do {
+ // There is a conflict if there exists a non-VAR binding.
+ if (current->is_catch_scope()) {
+ current = current->outer_scope();
+ continue;
+ }
+ Variable* other_var = current->LookupLocal(decl->var()->raw_name());
+ if (other_var != nullptr) {
+ DCHECK(IsLexicalVariableMode(other_var->mode()));
+ return decl;
+ }
+ current = current->outer_scope();
+ } while (current != this);
}
- if (current == nullptr) continue;
- DCHECK(decl->var()->mode() == VariableMode::kVar ||
- decl->var()->mode() == VariableMode::kDynamic);
+ }
+
+ if (V8_LIKELY(!is_eval_scope())) return nullptr;
+ if (!is_sloppy(language_mode())) return nullptr;
+
+ // Var declarations in sloppy eval are hoisted to the first non-eval
+ // declaration scope. Check for conflicts between the eval scope that
+ // declaration scope.
+ Scope* end = this;
+ do {
+ end = end->outer_scope_->GetDeclarationScope();
+ } while (end->is_eval_scope());
+ end = end->outer_scope_;
+
+ for (Declaration* decl : decls_) {
+ if (IsLexicalVariableMode(decl->var()->mode())) continue;
+ Scope* current = outer_scope_;
// Iterate through all scopes until and including the declaration scope.
- while (true) {
- // There is a conflict if there exists a non-VAR binding.
+ do {
+ // There is a conflict if there exists a non-VAR binding up to the
+ // declaration scope in which this sloppy-eval runs.
Variable* other_var =
current->LookupInScopeOrScopeInfo(decl->var()->raw_name());
if (other_var != nullptr && IsLexicalVariableMode(other_var->mode())) {
+ DCHECK(!current->is_catch_scope());
return decl;
}
- if (current->is_declaration_scope() &&
- !(current->is_eval_scope() && is_sloppy(current->language_mode()))) {
- break;
- }
current = current->outer_scope();
- }
+ } while (current != end);
}
return nullptr;
}
@@ -1188,6 +1106,21 @@ const AstRawString* Scope::FindVariableDeclaredIn(Scope* scope,
return nullptr;
}
+void DeclarationScope::DeserializeReceiver(AstValueFactory* ast_value_factory) {
+ if (is_script_scope()) {
+ DCHECK_NOT_NULL(receiver_);
+ return;
+ }
+ DCHECK(has_this_declaration());
+ DeclareThis(ast_value_factory);
+ if (is_debug_evaluate_scope()) {
+ receiver_->AllocateTo(VariableLocation::LOOKUP, -1);
+ } else {
+ receiver_->AllocateTo(VariableLocation::CONTEXT,
+ scope_info_->ReceiverContextSlotIndex());
+ }
+}
+
bool DeclarationScope::AllocateVariables(ParseInfo* info) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
@@ -1197,11 +1130,28 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) {
DCHECK(info->pending_error_handler()->has_pending_error());
return false;
}
- AllocateVariablesRecursively();
+
+ // // Don't allocate variables of preparsed scopes.
+ if (!was_lazily_parsed()) AllocateVariablesRecursively();
return true;
}
+bool Scope::HasThisReference() const {
+ if (is_declaration_scope() && AsDeclarationScope()->has_this_reference()) {
+ return true;
+ }
+
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ if (!scope->is_declaration_scope() ||
+ !scope->AsDeclarationScope()->has_this_declaration()) {
+ if (scope->HasThisReference()) return true;
+ }
+ }
+
+ return false;
+}
+
bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
const Scope* outer) const {
// If none of the outer scopes need to decide whether to context allocate
@@ -1227,7 +1177,10 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
}
bool DeclarationScope::AllowsLazyCompilation() const {
- return !force_eager_compilation_;
+ // Functions which force eager compilation and class member initializer
+ // functions are not lazily compilable.
+ return !force_eager_compilation_ &&
+ !IsClassMembersInitializerFunction(function_kind());
}
int Scope::ContextChainLength(Scope* scope) const {
@@ -1294,9 +1247,9 @@ bool Scope::ShouldBanArguments() {
DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
- while (!scope->is_script_scope() &&
- (!scope->is_function_scope() ||
- scope->AsDeclarationScope()->is_arrow_scope())) {
+ while (!scope->is_declaration_scope() ||
+ (!scope->is_script_scope() &&
+ !scope->AsDeclarationScope()->has_this_declaration())) {
scope = scope->outer_scope();
}
return scope->AsDeclarationScope();
@@ -1310,77 +1263,103 @@ Scope* Scope::GetOuterScopeWithContext() {
return scope;
}
-void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
- Isolate* isolate, ParseInfo* info,
- Handle<StringSet>* non_locals) {
- // Module variables must be allocated before variable resolution
- // to ensure that UpdateNeedsHoleCheck() can detect import variables.
- if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
+namespace {
+bool WasLazilyParsed(Scope* scope) {
+ return scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->was_lazily_parsed();
+}
- // Lazy parsed declaration scopes are already partially analyzed. If there are
- // unresolved references remaining, they just need to be resolved in outer
- // scopes.
- Scope* lookup =
- is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
- ? outer_scope()
- : this;
-
- for (VariableProxy* proxy : unresolved_list_) {
- DCHECK(!proxy->is_resolved());
- Variable* var =
- Lookup<kParsedScope>(proxy, lookup, max_outer_scope->outer_scope());
- if (var == nullptr) {
- *non_locals = StringSet::Add(isolate, *non_locals, proxy->name());
+} // namespace
+
+template <typename FunctionType>
+void Scope::ForEach(FunctionType callback) {
+ Scope* scope = this;
+ while (true) {
+ Iteration iteration = callback(scope);
+ // Try to descend into inner scopes first.
+ if ((iteration == Iteration::kDescend) && scope->inner_scope_ != nullptr) {
+ scope = scope->inner_scope_;
} else {
- // In this case we need to leave scopes in a way that they can be
- // allocated. If we resolved variables from lazy parsed scopes, we need
- // to context allocate the var.
- ResolveTo(info, proxy, var);
- if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
+ // Find the next outer scope with a sibling.
+ while (scope->sibling_ == nullptr) {
+ if (scope == this) return;
+ scope = scope->outer_scope_;
+ }
+ if (scope == this) return;
+ scope = scope->sibling_;
}
}
+}
- // Clear unresolved_list_ as it's in an inconsistent state.
- unresolved_list_.Clear();
+void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
+ Isolate* isolate, ParseInfo* info,
+ Handle<StringSet>* non_locals) {
+ this->ForEach([max_outer_scope, isolate, info, non_locals](Scope* scope) {
+ // Module variables must be allocated before variable resolution
+ // to ensure that UpdateNeedsHoleCheck() can detect import variables.
+ if (scope->is_module_scope()) {
+ scope->AsModuleScope()->AllocateModuleVariables();
+ }
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->CollectNonLocals(max_outer_scope, isolate, info, non_locals);
- }
+ // Lazy parsed declaration scopes are already partially analyzed. If there
+ // are unresolved references remaining, they just need to be resolved in
+ // outer scopes.
+ Scope* lookup = WasLazilyParsed(scope) ? scope->outer_scope() : scope;
+
+ for (VariableProxy* proxy : scope->unresolved_list_) {
+ DCHECK(!proxy->is_resolved());
+ Variable* var =
+ Lookup<kParsedScope>(proxy, lookup, max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ *non_locals = StringSet::Add(isolate, *non_locals, proxy->name());
+ } else {
+ // In this case we need to leave scopes in a way that they can be
+ // allocated. If we resolved variables from lazy parsed scopes, we need
+ // to context allocate the var.
+ scope->ResolveTo(info, proxy, var);
+ if (!var->is_dynamic() && lookup != scope)
+ var->ForceContextAllocation();
+ }
+ }
+
+ // Clear unresolved_list_ as it's in an inconsistent state.
+ scope->unresolved_list_.Clear();
+ return Iteration::kDescend;
+ });
}
void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
AstNodeFactory* ast_node_factory,
UnresolvedList* new_unresolved_list) {
- DCHECK_IMPLIES(is_declaration_scope(),
- !AsDeclarationScope()->was_lazily_parsed());
-
- for (VariableProxy* proxy = unresolved_list_.first(); proxy != nullptr;
- proxy = proxy->next_unresolved()) {
- DCHECK(!proxy->is_resolved());
- Variable* var =
- Lookup<kParsedScope>(proxy, this, max_outer_scope->outer_scope());
- if (var == nullptr) {
- // Don't copy unresolved references to the script scope, unless it's a
- // reference to a private name or method. In that case keep it so we
- // can fail later.
- if (!max_outer_scope->outer_scope()->is_script_scope() ||
- proxy->IsPrivateName()) {
- VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
- new_unresolved_list->Add(copy);
+ this->ForEach([max_outer_scope, ast_node_factory,
+ new_unresolved_list](Scope* scope) {
+ DCHECK_IMPLIES(scope->is_declaration_scope(),
+ !scope->AsDeclarationScope()->was_lazily_parsed());
+
+ for (VariableProxy* proxy = scope->unresolved_list_.first();
+ proxy != nullptr; proxy = proxy->next_unresolved()) {
+ DCHECK(!proxy->is_resolved());
+ Variable* var =
+ Lookup<kParsedScope>(proxy, scope, max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ // Don't copy unresolved references to the script scope, unless it's a
+ // reference to a private name or method. In that case keep it so we
+ // can fail later.
+ if (!max_outer_scope->outer_scope()->is_script_scope() ||
+ proxy->IsPrivateName()) {
+ VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+ new_unresolved_list->Add(copy);
+ }
+ } else {
+ var->set_is_used();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
}
- } else {
- var->set_is_used();
- if (proxy->is_assigned()) var->set_maybe_assigned();
}
- }
- // Clear unresolved_list_ as it's in an inconsistent state.
- unresolved_list_.Clear();
-
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->AnalyzePartially(max_outer_scope, ast_node_factory,
- new_unresolved_list);
- }
+ // Clear unresolved_list_ as it's in an inconsistent state.
+ scope->unresolved_list_.Clear();
+ return Iteration::kDescend;
+ });
}
Handle<StringSet> DeclarationScope::CollectNonLocals(
@@ -1399,7 +1378,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
locals_.Clear();
inner_scope_ = nullptr;
unresolved_list_.Clear();
- sloppy_block_function_map_ = nullptr;
+ sloppy_block_functions_.Clear();
rare_data_ = nullptr;
has_rest_ = false;
@@ -1428,14 +1407,25 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
was_lazily_parsed_ = !aborted;
}
-void Scope::SavePreparseData(Parser* parser) {
- if (PreparseDataBuilder::ScopeIsSkippableFunctionScope(this)) {
- AsDeclarationScope()->SavePreparseDataForDeclarationScope(parser);
- }
+bool Scope::IsSkippableFunctionScope() {
+ // Lazy non-arrow function scopes are skippable. Lazy functions are exactly
+ // those Scopes which have their own PreparseDataBuilder object. This
+ // logic ensures that the scope allocation data is consistent with the
+ // skippable function data (both agree on where the lazy function boundaries
+ // are).
+ if (!is_function_scope()) return false;
+ DeclarationScope* declaration_scope = AsDeclarationScope();
+ return !declaration_scope->is_arrow_scope() &&
+ declaration_scope->preparse_data_builder() != nullptr;
+}
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->SavePreparseData(parser);
- }
+void Scope::SavePreparseData(Parser* parser) {
+ this->ForEach([parser](Scope* scope) {
+ if (scope->IsSkippableFunctionScope()) {
+ scope->AsDeclarationScope()->SavePreparseDataForDeclarationScope(parser);
+ }
+ return Iteration::kDescend;
+ });
}
void DeclarationScope::SavePreparseDataForDeclarationScope(Parser* parser) {
@@ -1685,27 +1675,26 @@ void Scope::Print(int n) {
}
void Scope::CheckScopePositions() {
- // Visible leaf scopes must have real positions.
- if (!is_hidden() && inner_scope_ == nullptr) {
- DCHECK_NE(kNoSourcePosition, start_position());
- DCHECK_NE(kNoSourcePosition, end_position());
- }
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->CheckScopePositions();
- }
+ this->ForEach([](Scope* scope) {
+ // Visible leaf scopes must have real positions.
+ if (!scope->is_hidden() && scope->inner_scope_ == nullptr) {
+ DCHECK_NE(kNoSourcePosition, scope->start_position());
+ DCHECK_NE(kNoSourcePosition, scope->end_position());
+ }
+ return Iteration::kDescend;
+ });
}
void Scope::CheckZones() {
DCHECK(!needs_migration_);
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- if (scope->is_declaration_scope() &&
- scope->AsDeclarationScope()->was_lazily_parsed()) {
+ this->ForEach([](Scope* scope) {
+ if (WasLazilyParsed(scope)) {
DCHECK_NULL(scope->zone());
DCHECK_NULL(scope->inner_scope_);
- continue;
+ return Iteration::kContinue;
}
- scope->CheckZones();
- }
+ return Iteration::kDescend;
+ });
}
#endif // DEBUG
@@ -1803,16 +1792,6 @@ template Variable* Scope::Lookup<Scope::kDeserializedScope>(
VariableProxy* proxy, Scope* scope, Scope* outer_scope_end,
Scope* entry_point, bool force_context_allocation);
-namespace {
-bool CanBeShadowed(Scope* scope, Variable* var) {
- if (var == nullptr) return false;
-
- // "this" can't be shadowed by "eval"-introduced bindings or by "with" scopes.
- // TODO(wingo): There are other variables in this category; add them.
- return !var->is_this();
-}
-}; // namespace
-
Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope,
Scope* outer_scope_end, Scope* entry_point,
bool force_context_allocation) {
@@ -1825,7 +1804,7 @@ Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope,
: Lookup<kDeserializedScope>(proxy, scope->outer_scope_,
outer_scope_end, entry_point);
- if (!CanBeShadowed(scope, var)) return var;
+ if (var == nullptr) return var;
// The current scope is a with scope, so the variable binding can not be
// statically resolved. However, note that it was necessary to do a lookup
@@ -1859,7 +1838,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
nullptr, force_context_allocation)
: Lookup<kDeserializedScope>(proxy, scope->outer_scope_,
outer_scope_end, entry);
- if (!CanBeShadowed(scope, var)) return var;
+ if (var == nullptr) return var;
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be the
@@ -1949,12 +1928,6 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
return SetNeedsHoleCheck(var, proxy);
}
- if (var->is_this()) {
- DCHECK(IsDerivedConstructor(scope->GetClosureScope()->function_kind()));
- // TODO(littledan): implement 'this' hole check elimination.
- return SetNeedsHoleCheck(var, proxy);
- }
-
// We should always have valid source positions.
DCHECK_NE(var->initializer_position(), kNoSourcePosition);
DCHECK_NE(proxy->position(), kNoSourcePosition);
@@ -1994,16 +1967,50 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
proxy->BindTo(var);
}
+bool Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
+ Scope* end) {
+ // Resolve the variable in all parsed scopes to force context allocation.
+ for (; scope != end; scope = scope->outer_scope_) {
+ Variable* var = scope->LookupLocal(proxy->raw_name());
+ if (var != nullptr) {
+ var->set_is_used();
+ if (!var->is_dynamic()) {
+ var->ForceContextAllocation();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
+ }
+ return true;
+ }
+ }
+
+ if (!proxy->IsPrivateName()) return true;
+
+ // If we're resolving a private name, throw an exception of we didn't manage
+ // to resolve. In case of eval, also look in all outer scope-info backed
+ // scopes except for the script scope. Don't throw an exception if a reference
+ // was found.
+ Scope* start = scope;
+ for (; !scope->is_script_scope(); scope = scope->outer_scope_) {
+ if (scope->LookupInScopeInfo(proxy->raw_name(), start) != nullptr) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
- if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
+ if (WasLazilyParsed(this)) {
DCHECK_EQ(variables_.occupancy(), 0);
+ Scope* end = info->scope();
+ // Resolve in all parsed scopes except for the script scope.
+ if (!end->is_script_scope()) end = end->outer_scope();
+
for (VariableProxy* proxy : unresolved_list_) {
- Variable* var = Lookup<kParsedScope>(proxy, outer_scope(), nullptr);
- if (var == nullptr) {
+ if (!ResolvePreparsedVariable(proxy, outer_scope(), end)) {
info->pending_error_handler()->ReportMessageAt(
proxy->position(), proxy->position() + 1,
MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(),
@@ -2011,11 +2018,6 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(proxy->IsPrivateName());
return false;
}
- if (!var->is_dynamic()) {
- var->set_is_used();
- var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
- }
}
} else {
// Resolve unresolved variables for this scope.
@@ -2037,7 +2039,7 @@ bool Scope::MustAllocate(Variable* var) {
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
// visible name.
- if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
+ if (!var->raw_name()->IsEmpty() &&
(inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) {
var->set_is_used();
if (inner_scope_calls_eval_) var->set_maybe_assigned();
@@ -2118,18 +2120,15 @@ void DeclarationScope::AllocateParameterLocals() {
}
void DeclarationScope::AllocateParameter(Variable* var, int index) {
- if (MustAllocate(var)) {
- if (has_forced_context_allocation_for_parameters() ||
- MustAllocateInContext(var)) {
- DCHECK(var->IsUnallocated() || var->IsContextSlot());
- if (var->IsUnallocated()) {
- AllocateHeapSlot(var);
- }
- } else {
- DCHECK(var->IsUnallocated() || var->IsParameter());
- if (var->IsUnallocated()) {
- var->AllocateTo(VariableLocation::PARAMETER, index);
- }
+ if (!MustAllocate(var)) return;
+ if (has_forced_context_allocation_for_parameters() ||
+ MustAllocateInContext(var)) {
+ DCHECK(var->IsUnallocated() || var->IsContextSlot());
+ if (var->IsUnallocated()) AllocateHeapSlot(var);
+ } else {
+ DCHECK(var->IsUnallocated() || var->IsParameter());
+ if (var->IsUnallocated()) {
+ var->AllocateTo(VariableLocation::PARAMETER, index);
}
}
}
@@ -2142,7 +2141,7 @@ void DeclarationScope::AllocateReceiver() {
}
void Scope::AllocateNonParameterLocal(Variable* var) {
- DCHECK(var->scope() == this);
+ DCHECK_EQ(var->scope(), this);
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
@@ -2201,51 +2200,47 @@ void ModuleScope::AllocateModuleVariables() {
}
void Scope::AllocateVariablesRecursively() {
- DCHECK(!already_resolved_);
-
- // Don't allocate variables of preparsed scopes.
- if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
- return;
- }
-
- // Allocate variables for inner scopes.
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->AllocateVariablesRecursively();
- }
-
- DCHECK(!already_resolved_);
- DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
+ this->ForEach([](Scope* scope) -> Iteration {
+ DCHECK(!scope->already_resolved_);
+ if (WasLazilyParsed(scope)) return Iteration::kContinue;
+ DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, scope->num_heap_slots_);
+
+ // Allocate variables for this scope.
+ // Parameters must be allocated first, if any.
+ if (scope->is_declaration_scope()) {
+ if (scope->is_function_scope()) {
+ scope->AsDeclarationScope()->AllocateParameterLocals();
+ }
+ scope->AsDeclarationScope()->AllocateReceiver();
+ }
+ scope->AllocateNonParameterLocalsAndDeclaredGlobals();
+
+ // Force allocation of a context for this scope if necessary. For a 'with'
+ // scope and for a function scope that makes an 'eval' call we need a
+ // context, even if no local variables were statically allocated in the
+ // scope. Likewise for modules and function scopes representing asm.js
+ // modules. Also force a context, if the scope is stricter than the outer
+ // scope.
+ bool must_have_context =
+ scope->is_with_scope() || scope->is_module_scope() ||
+ scope->IsAsmModule() || scope->ForceContextForLanguageMode() ||
+ (scope->is_function_scope() &&
+ scope->AsDeclarationScope()->calls_sloppy_eval()) ||
+ (scope->is_block_scope() && scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->calls_sloppy_eval());
- // Allocate variables for this scope.
- // Parameters must be allocated first, if any.
- if (is_declaration_scope()) {
- if (is_function_scope()) {
- AsDeclarationScope()->AllocateParameterLocals();
+ // If we didn't allocate any locals in the local context, then we only
+ // need the minimal number of slots if we must have a context.
+ if (scope->num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
+ !must_have_context) {
+ scope->num_heap_slots_ = 0;
}
- AsDeclarationScope()->AllocateReceiver();
- }
- AllocateNonParameterLocalsAndDeclaredGlobals();
-
- // Force allocation of a context for this scope if necessary. For a 'with'
- // scope and for a function scope that makes an 'eval' call we need a context,
- // even if no local variables were statically allocated in the scope.
- // Likewise for modules and function scopes representing asm.js modules.
- // Also force a context, if the scope is stricter than the outer scope.
- bool must_have_context =
- is_with_scope() || is_module_scope() || IsAsmModule() ||
- ForceContextForLanguageMode() ||
- (is_function_scope() && AsDeclarationScope()->calls_sloppy_eval()) ||
- (is_block_scope() && is_declaration_scope() &&
- AsDeclarationScope()->calls_sloppy_eval());
-
- // If we didn't allocate any locals in the local context, then we only
- // need the minimal number of slots if we must have a context.
- if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
- num_heap_slots_ = 0;
- }
-
- // Allocation done.
- DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
+
+ // Allocation done.
+ DCHECK(scope->num_heap_slots_ == 0 ||
+ scope->num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
+ return Iteration::kDescend;
+ });
}
void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 971cfc519b..732d759757 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -8,6 +8,8 @@
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
+#include "src/base/threaded-list.h"
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/objects.h"
#include "src/pointer-with-payload.h"
@@ -43,37 +45,6 @@ class VariableMap: public ZoneHashMap {
void Add(Zone* zone, Variable* var);
};
-
-// Sloppy block-scoped function declarations to var-bind
-class SloppyBlockFunctionMap : public ZoneHashMap {
- public:
- class Delegate : public ZoneObject {
- public:
- Delegate(Scope* scope, SloppyBlockFunctionStatement* statement, int index)
- : scope_(scope), statement_(statement), next_(nullptr), index_(index) {}
- void set_statement(Statement* statement);
-
- void set_next(Delegate* next) { next_ = next; }
- Delegate* next() const { return next_; }
- Scope* scope() const { return scope_; }
- int index() const { return index_; }
- int position() const { return statement_->position(); }
-
- private:
- Scope* scope_;
- SloppyBlockFunctionStatement* statement_;
- Delegate* next_;
- int index_;
- };
-
- explicit SloppyBlockFunctionMap(Zone* zone);
- void Declare(Zone* zone, const AstRawString* name, Scope* scope,
- SloppyBlockFunctionStatement* statement);
-
- private:
- int count_;
-};
-
class Scope;
template <>
@@ -111,9 +82,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
typedef base::ThreadedList<VariableProxy, VariableProxy::UnresolvedNext>
UnresolvedList;
- // TODO(verwaest): Is this needed on Scope?
- int num_parameters() const;
-
DeclarationScope* AsDeclarationScope();
const DeclarationScope* AsDeclarationScope() const;
ModuleScope* AsModuleScope();
@@ -225,8 +193,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableKind kind, bool* was_added,
InitializationFlag init_flag = kCreatedInitialized);
- Variable* DeclareVariable(Declaration* declaration, VariableProxy* proxy,
- VariableMode mode, VariableKind kind,
+ Variable* DeclareVariable(Declaration* declaration, const AstRawString* name,
+ int pos, VariableMode mode, VariableKind kind,
InitializationFlag init, bool* was_added,
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
@@ -280,14 +248,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// TODO(verwaest): Move to DeclarationScope?
Variable* NewTemporary(const AstRawString* name);
- // ---------------------------------------------------------------------------
- // Illegal redeclaration support.
-
- // Check if the scope has conflicting var
- // declarations, i.e. a var declaration that has been hoisted from a nested
- // scope over a let binding of the same name.
- Declaration* CheckConflictingVarDeclarations();
-
// Find variable with (variable->mode() <= |mode_limit|) that was declared in
// |scope|. This is used to catch patterns like `try{}catch(e){let e;}` and
// function([e]) { let e }, which are errors even though the two 'e's are each
@@ -421,6 +381,33 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return num_heap_slots() > 0;
}
+ // Use Scope::ForEach for depth first traversal of scopes.
+ // Before:
+ // void Scope::VisitRecursively() {
+ // DoSomething();
+ // for (Scope* s = inner_scope_; s != nullptr; s = s->sibling_) {
+ // if (s->ShouldContinue()) continue;
+ // s->VisitRecursively();
+ // }
+ // }
+ //
+ // After:
+ // void Scope::VisitIteratively() {
+ // this->ForEach([](Scope* s) {
+ // s->DoSomething();
+ // return s->ShouldContinue() ? kContinue : kDescend;
+ // });
+ // }
+ template <typename FunctionType>
+ V8_INLINE void ForEach(FunctionType callback);
+ enum Iteration {
+ // Continue the iteration on the same level, do not recurse/descent into
+ // inner scopes.
+ kContinue,
+ // Recurse/descend into inner scopes.
+ kDescend
+ };
+
// ---------------------------------------------------------------------------
// Accessors.
@@ -488,6 +475,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Find the innermost outer scope that needs a context.
Scope* GetOuterScopeWithContext();
+ bool HasThisReference() const;
+
// Analyze() must have been called once to create the ScopeInfo.
Handle<ScopeInfo> scope_info() const {
DCHECK(!scope_info_.is_null());
@@ -513,6 +502,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool HasSimpleParameters();
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
+ bool IsSkippableFunctionScope();
bool RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
@@ -599,6 +589,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
static Variable* LookupSloppyEval(VariableProxy* proxy, Scope* scope,
Scope* outer_scope_end, Scope* entry_point,
bool force_context_allocation);
+ static bool ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
+ Scope* end);
void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
V8_WARN_UNUSED_RESULT bool ResolveVariable(ParseInfo* info,
VariableProxy* proxy);
@@ -618,14 +610,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Variable allocation.
void AllocateStackSlot(Variable* var);
- void AllocateHeapSlot(Variable* var);
+ V8_INLINE void AllocateHeapSlot(Variable* var);
void AllocateNonParameterLocal(Variable* var);
void AllocateDeclaredGlobal(Variable* var);
- void AllocateNonParameterLocalsAndDeclaredGlobals();
+ V8_INLINE void AllocateNonParameterLocalsAndDeclaredGlobals();
void AllocateVariablesRecursively();
void AllocateScopeInfosRecursively(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
+
void AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
@@ -766,6 +759,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return var;
}
+ void DeserializeReceiver(AstValueFactory* ast_value_factory);
+
#ifdef DEBUG
void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
is_being_lazily_parsed_ = is_being_lazily_parsed;
@@ -779,7 +774,23 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
zone_ = zone;
}
- bool ShouldEagerCompile() const;
+ // ---------------------------------------------------------------------------
+ // Illegal redeclaration support.
+
+ // Check if the scope has conflicting var
+ // declarations, i.e. a var declaration that has been hoisted from a nested
+ // scope over a let binding of the same name.
+ Declaration* CheckConflictingVarDeclarations();
+
+ void set_has_checked_syntax(bool has_checked_syntax) {
+ has_checked_syntax_ = has_checked_syntax;
+ }
+ bool has_checked_syntax() const { return has_checked_syntax_; }
+
+ bool ShouldEagerCompile() const {
+ return force_eager_compilation_ || should_eager_compile_;
+ }
+
void set_should_eager_compile();
void SetScriptScopeInfo(Handle<ScopeInfo> scope_info) {
@@ -833,17 +844,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// The variable corresponding to the 'this' value.
Variable* receiver() {
- DCHECK(has_this_declaration());
+ DCHECK(has_this_declaration() || is_script_scope());
DCHECK_NOT_NULL(receiver_);
return receiver_;
}
- // TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
- // "this" (and no other variable) on the native context. Script scopes then
- // will not have a "this" declaration.
- bool has_this_declaration() const {
- return (is_function_scope() && !is_arrow_scope()) || is_module_scope();
- }
+ bool has_this_declaration() const { return has_this_declaration_; }
// The variable corresponding to the 'new.target' value.
Variable* new_target_var() { return new_target_; }
@@ -935,17 +941,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void AddLocal(Variable* var);
void DeclareSloppyBlockFunction(
- const AstRawString* name, Scope* scope,
- SloppyBlockFunctionStatement* statement = nullptr);
+ SloppyBlockFunctionStatement* sloppy_block_function);
- // Go through sloppy_block_function_map_ and hoist those (into this scope)
+ // Go through sloppy_block_functions_ and hoist those (into this scope)
// which should be hoisted.
void HoistSloppyBlockFunctions(AstNodeFactory* factory);
- SloppyBlockFunctionMap* sloppy_block_function_map() {
- return sloppy_block_function_map_;
- }
-
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
@@ -988,9 +989,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void PrintParameters();
#endif
- void AllocateLocals();
- void AllocateParameterLocals();
- void AllocateReceiver();
+ V8_INLINE void AllocateLocals();
+ V8_INLINE void AllocateParameterLocals();
+ V8_INLINE void AllocateReceiver();
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
@@ -1020,8 +1021,15 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return preparse_data_builder_;
}
+ void set_has_this_reference() { has_this_reference_ = true; }
+ bool has_this_reference() const { return has_this_reference_; }
+ void UsesThis() {
+ set_has_this_reference();
+ GetReceiverScope()->receiver()->ForceContextAllocation();
+ }
+
private:
- void AllocateParameter(Variable* var, int index);
+ V8_INLINE void AllocateParameter(Variable* var, int index);
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -1055,16 +1063,19 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
#endif
bool is_skipped_function_ : 1;
bool has_inferred_function_name_ : 1;
-
- int num_parameters_ = 0;
+ bool has_checked_syntax_ : 1;
+ bool has_this_reference_ : 1;
+ bool has_this_declaration_ : 1;
// If the scope is a function scope, this is the function kind.
const FunctionKind function_kind_;
+ int num_parameters_ = 0;
+
// Parameter list in source order.
ZonePtrList<Variable> params_;
// Map of function names to lists of functions defined in sloppy blocks
- SloppyBlockFunctionMap* sloppy_block_function_map_;
+ base::ThreadedList<SloppyBlockFunctionStatement> sloppy_block_functions_;
// Convenience variable.
Variable* receiver_;
// Function variable, if any; function scopes only.
@@ -1128,27 +1139,21 @@ Scope::Snapshot::Snapshot(Scope* scope)
class ModuleScope final : public DeclarationScope {
public:
- ModuleScope(DeclarationScope* script_scope,
- AstValueFactory* ast_value_factory);
+ ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory);
- // Deserialization.
- // The generated ModuleDescriptor does not preserve all information. In
- // particular, its module_requests map will be empty because we no longer need
- // the map after parsing.
+ // Deserialization. Does not restore the module descriptor.
ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
- AstValueFactory* ast_value_factory);
+ AstValueFactory* avfactory);
- ModuleDescriptor* module() const {
- DCHECK_NOT_NULL(module_descriptor_);
- return module_descriptor_;
- }
+ // Returns nullptr in a deserialized scope.
+ ModuleDescriptor* module() const { return module_descriptor_; }
// Set MODULE as VariableLocation for all variables that will live in a
// module's export table.
void AllocateModuleVariables();
private:
- ModuleDescriptor* module_descriptor_;
+ ModuleDescriptor* const module_descriptor_;
};
} // namespace internal
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 13a444536d..6dbb9dbac4 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -6,6 +6,7 @@
#define V8_AST_VARIABLES_H_
#include "src/ast/ast-value-factory.h"
+#include "src/base/threaded-list.h"
#include "src/globals.h"
#include "src/zone/zone.h"
@@ -59,7 +60,7 @@ class Variable final : public ZoneObject {
return ForceContextAllocationField::decode(bit_field_);
}
void ForceContextAllocation() {
- DCHECK(IsUnallocated() || IsContextSlot() ||
+ DCHECK(IsUnallocated() || IsContextSlot() || IsLookupSlot() ||
location() == VariableLocation::MODULE);
bit_field_ = ForceContextAllocationField::update(bit_field_, true);
}
@@ -137,6 +138,9 @@ class Variable final : public ZoneObject {
}
bool is_parameter() const { return kind() == PARAMETER_VARIABLE; }
+ bool is_sloppy_block_function() {
+ return kind() == SLOPPY_BLOCK_FUNCTION_VARIABLE;
+ }
Variable* local_if_not_shadowed() const {
DCHECK(mode() == VariableMode::kDynamicLocal &&
@@ -207,7 +211,7 @@ class Variable final : public ZoneObject {
class VariableModeField : public BitField16<VariableMode, 0, 3> {};
class VariableKindField
- : public BitField16<VariableKind, VariableModeField::kNext, 2> {};
+ : public BitField16<VariableKind, VariableModeField::kNext, 3> {};
class LocationField
: public BitField16<VariableLocation, VariableKindField::kNext, 3> {};
class ForceContextAllocationField
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index a5f14c611e..139ee14931 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -50,6 +50,7 @@ namespace internal {
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
index 4e0900fa24..7aa3a69014 100644
--- a/deps/v8/src/base/division-by-constant.cc
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -93,16 +93,22 @@ MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
// -----------------------------------------------------------------------------
// Instantiations.
-template struct V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>;
-template struct V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>;
+template struct EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t>;
+template struct EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t>;
-template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
-template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
-template MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
- uint32_t d, unsigned leading_zeros);
-template MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
- uint64_t d, unsigned leading_zeros);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
+ uint32_t d, unsigned leading_zeros);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
+ uint64_t d, unsigned leading_zeros);
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/division-by-constant.h b/deps/v8/src/base/division-by-constant.h
index 5d063f8bd5..744283981b 100644
--- a/deps/v8/src/base/division-by-constant.h
+++ b/deps/v8/src/base/division-by-constant.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "src/base/base-export.h"
+#include "src/base/export-template.h"
namespace v8 {
namespace base {
@@ -18,7 +19,7 @@ namespace base {
// Delight", chapter 10. The template parameter must be one of the unsigned
// integral types.
template <class T>
-struct V8_BASE_EXPORT MagicNumbersForDivision {
+struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision {
MagicNumbersForDivision(T m, unsigned s, bool a)
: multiplier(m), shift(s), add(a) {}
bool operator==(const MagicNumbersForDivision& rhs) const {
@@ -34,25 +35,35 @@ struct V8_BASE_EXPORT MagicNumbersForDivision {
// Calculate the multiplier and shift for signed division via multiplication.
// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
template <class T>
-V8_BASE_EXPORT MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
+EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
// Calculate the multiplier and shift for unsigned division via multiplication,
// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
// leading_zeros can be used to speed up the calculation if the given number of
// upper bits of the dividend value are known to be zero.
template <class T>
-V8_BASE_EXPORT MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(
T d, unsigned leading_zeros = 0);
-extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
-SignedDivisionByConstant(uint32_t d);
-extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
-SignedDivisionByConstant(uint64_t d);
+// Explicit instantiation declarations.
+extern template struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t>;
+extern template struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t>;
-extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
-UnsignedDivisionByConstant(uint32_t d, unsigned leading_zeros);
-extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
-UnsignedDivisionByConstant(uint64_t d, unsigned leading_zeros);
+extern template EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
+extern template EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
+
+extern template EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
+ uint32_t d, unsigned leading_zeros);
+extern template EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
+ MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
+ uint64_t d, unsigned leading_zeros);
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index d9846b7254..4fcb4df001 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -309,7 +309,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
GET_LOW_WORD(low, x);
SET_LOW_WORD(z, low);
e0 = (ix >> 20) - 1046; /* e0 = ilogb(z)-23; */
- SET_HIGH_WORD(z, ix - static_cast<int32_t>(e0 << 20));
+ SET_HIGH_WORD(z, ix - static_cast<int32_t>(static_cast<uint32_t>(e0) << 20));
for (i = 0; i < 2; i++) {
tx[i] = static_cast<double>(static_cast<int32_t>(z));
z = (z - tx[i]) * two24;
@@ -1569,9 +1569,12 @@ double exp(double x) {
/* x is now in primary range */
t = x * x;
if (k >= -1021) {
- INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0);
+ INSERT_WORDS(
+ twopk,
+ 0x3FF00000 + static_cast<int32_t>(static_cast<uint32_t>(k) << 20), 0);
} else {
- INSERT_WORDS(twopk, 0x3FF00000 + ((k + 1000) << 20), 0);
+ INSERT_WORDS(twopk, 0x3FF00000 + (static_cast<uint32_t>(k + 1000) << 20),
+ 0);
}
c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
if (k == 0) {
@@ -2341,7 +2344,10 @@ double expm1(double x) {
if (k == 0) {
return x - (x * e - hxs); /* c is 0 */
} else {
- INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0); /* 2^k */
+ INSERT_WORDS(
+ twopk,
+ 0x3FF00000 + static_cast<int32_t>(static_cast<uint32_t>(k) << 20),
+ 0); /* 2^k */
e = (x * (e - c) - c);
e -= hxs;
if (k == -1) return 0.5 * (x - e) - 0.5;
@@ -2642,6 +2648,317 @@ double cosh(double x) {
}
/*
+ * ES2019 Draft 2019-01-02 12.6.4
+ * Math.pow & Exponentiation Operator
+ *
+ * Return X raised to the Yth power
+ *
+ * Method:
+ * Let x = 2 * (1+f)
+ * 1. Compute and return log2(x) in two pieces:
+ * log2(x) = w1 + w2,
+ * where w1 has 53-24 = 29 bit trailing zeros.
+ * 2. Perform y*log2(x) = n+y' by simulating muti-precision
+ * arithmetic, where |y'|<=0.5.
+ * 3. Return x**y = 2**n*exp(y'*log2)
+ *
+ * Special cases:
+ * 1. (anything) ** 0 is 1
+ * 2. (anything) ** 1 is itself
+ * 3. (anything) ** NAN is NAN
+ * 4. NAN ** (anything except 0) is NAN
+ * 5. +-(|x| > 1) ** +INF is +INF
+ * 6. +-(|x| > 1) ** -INF is +0
+ * 7. +-(|x| < 1) ** +INF is +0
+ * 8. +-(|x| < 1) ** -INF is +INF
+ * 9. +-1 ** +-INF is NAN
+ * 10. +0 ** (+anything except 0, NAN) is +0
+ * 11. -0 ** (+anything except 0, NAN, odd integer) is +0
+ * 12. +0 ** (-anything except 0, NAN) is +INF
+ * 13. -0 ** (-anything except 0, NAN, odd integer) is +INF
+ * 14. -0 ** (odd integer) = -( +0 ** (odd integer) )
+ * 15. +INF ** (+anything except 0,NAN) is +INF
+ * 16. +INF ** (-anything except 0,NAN) is +0
+ * 17. -INF ** (anything) = -0 ** (-anything)
+ * 18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+ * 19. (-anything except 0 and inf) ** (non-integer) is NAN
+ *
+ * Accuracy:
+ * pow(x,y) returns x**y nearly rounded. In particular,
+ * pow(integer, integer) always returns the correct integer provided it is
+ * representable.
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+double pow(double x, double y) {
+ static const double
+ bp[] = {1.0, 1.5},
+ dp_h[] = {0.0, 5.84962487220764160156e-01}, // 0x3FE2B803, 0x40000000
+ dp_l[] = {0.0, 1.35003920212974897128e-08}, // 0x3E4CFDEB, 0x43CFD006
+ zero = 0.0, one = 1.0, two = 2.0,
+ two53 = 9007199254740992.0, // 0x43400000, 0x00000000
+ huge = 1.0e300, tiny = 1.0e-300,
+ // poly coefs for (3/2)*(log(x)-2s-2/3*s**3
+ L1 = 5.99999999999994648725e-01, // 0x3FE33333, 0x33333303
+ L2 = 4.28571428578550184252e-01, // 0x3FDB6DB6, 0xDB6FABFF
+ L3 = 3.33333329818377432918e-01, // 0x3FD55555, 0x518F264D
+ L4 = 2.72728123808534006489e-01, // 0x3FD17460, 0xA91D4101
+ L5 = 2.30660745775561754067e-01, // 0x3FCD864A, 0x93C9DB65
+ L6 = 2.06975017800338417784e-01, // 0x3FCA7E28, 0x4A454EEF
+ P1 = 1.66666666666666019037e-01, // 0x3FC55555, 0x5555553E
+ P2 = -2.77777777770155933842e-03, // 0xBF66C16C, 0x16BEBD93
+ P3 = 6.61375632143793436117e-05, // 0x3F11566A, 0xAF25DE2C
+ P4 = -1.65339022054652515390e-06, // 0xBEBBBD41, 0xC5D26BF1
+ P5 = 4.13813679705723846039e-08, // 0x3E663769, 0x72BEA4D0
+ lg2 = 6.93147180559945286227e-01, // 0x3FE62E42, 0xFEFA39EF
+ lg2_h = 6.93147182464599609375e-01, // 0x3FE62E43, 0x00000000
+ lg2_l = -1.90465429995776804525e-09, // 0xBE205C61, 0x0CA86C39
+ ovt = 8.0085662595372944372e-0017, // -(1024-log2(ovfl+.5ulp))
+ cp = 9.61796693925975554329e-01, // 0x3FEEC709, 0xDC3A03FD =2/(3ln2)
+ cp_h = 9.61796700954437255859e-01, // 0x3FEEC709, 0xE0000000 =(float)cp
+ cp_l = -7.02846165095275826516e-09, // 0xBE3E2FE0, 0x145B01F5 =tail cp_h
+ ivln2 = 1.44269504088896338700e+00, // 0x3FF71547, 0x652B82FE =1/ln2
+ ivln2_h =
+ 1.44269502162933349609e+00, // 0x3FF71547, 0x60000000 =24b 1/ln2
+ ivln2_l =
+ 1.92596299112661746887e-08; // 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail
+
+ double z, ax, z_h, z_l, p_h, p_l;
+ double y1, t1, t2, r, s, t, u, v, w;
+ int i, j, k, yisint, n;
+ int hx, hy, ix, iy;
+ unsigned lx, ly;
+
+ EXTRACT_WORDS(hx, lx, x);
+ EXTRACT_WORDS(hy, ly, y);
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+
+ /* y==zero: x**0 = 1 */
+ if ((iy | ly) == 0) return one;
+
+ /* +-NaN return x+y */
+ if (ix > 0x7ff00000 || ((ix == 0x7ff00000) && (lx != 0)) || iy > 0x7ff00000 ||
+ ((iy == 0x7ff00000) && (ly != 0))) {
+ return x + y;
+ }
+
+ /* determine if y is an odd int when x < 0
+ * yisint = 0 ... y is not an integer
+ * yisint = 1 ... y is an odd int
+ * yisint = 2 ... y is an even int
+ */
+ yisint = 0;
+ if (hx < 0) {
+ if (iy >= 0x43400000) {
+ yisint = 2; /* even integer y */
+ } else if (iy >= 0x3ff00000) {
+ k = (iy >> 20) - 0x3ff; /* exponent */
+ if (k > 20) {
+ j = ly >> (52 - k);
+ if ((j << (52 - k)) == static_cast<int>(ly)) yisint = 2 - (j & 1);
+ } else if (ly == 0) {
+ j = iy >> (20 - k);
+ if ((j << (20 - k)) == iy) yisint = 2 - (j & 1);
+ }
+ }
+ }
+
+ /* special value of y */
+ if (ly == 0) {
+ if (iy == 0x7ff00000) { /* y is +-inf */
+ if (((ix - 0x3ff00000) | lx) == 0) {
+ return y - y; /* inf**+-1 is NaN */
+ } else if (ix >= 0x3ff00000) { /* (|x|>1)**+-inf = inf,0 */
+ return (hy >= 0) ? y : zero;
+ } else { /* (|x|<1)**-,+inf = inf,0 */
+ return (hy < 0) ? -y : zero;
+ }
+ }
+ if (iy == 0x3ff00000) { /* y is +-1 */
+ if (hy < 0) {
+ return base::Divide(one, x);
+ } else {
+ return x;
+ }
+ }
+ if (hy == 0x40000000) return x * x; /* y is 2 */
+ if (hy == 0x3fe00000) { /* y is 0.5 */
+ if (hx >= 0) { /* x >= +0 */
+ return sqrt(x);
+ }
+ }
+ }
+
+ ax = fabs(x);
+ /* special value of x */
+ if (lx == 0) {
+ if (ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000) {
+ z = ax; /*x is +-0,+-inf,+-1*/
+ if (hy < 0) z = base::Divide(one, z); /* z = (1/|x|) */
+ if (hx < 0) {
+ if (((ix - 0x3ff00000) | yisint) == 0) {
+ /* (-1)**non-int is NaN */
+ z = std::numeric_limits<double>::signaling_NaN();
+ } else if (yisint == 1) {
+ z = -z; /* (x<0)**odd = -(|x|**odd) */
+ }
+ }
+ return z;
+ }
+ }
+
+ n = (hx >> 31) + 1;
+
+ /* (x<0)**(non-int) is NaN */
+ if ((n | yisint) == 0) {
+ return std::numeric_limits<double>::signaling_NaN();
+ }
+
+ s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
+ if ((n | (yisint - 1)) == 0) s = -one; /* (-ve)**(odd int) */
+
+ /* |y| is huge */
+ if (iy > 0x41e00000) { /* if |y| > 2**31 */
+ if (iy > 0x43f00000) { /* if |y| > 2**64, must o/uflow */
+ if (ix <= 0x3fefffff) return (hy < 0) ? huge * huge : tiny * tiny;
+ if (ix >= 0x3ff00000) return (hy > 0) ? huge * huge : tiny * tiny;
+ }
+ /* over/underflow if x is not close to one */
+ if (ix < 0x3fefffff) return (hy < 0) ? s * huge * huge : s * tiny * tiny;
+ if (ix > 0x3ff00000) return (hy > 0) ? s * huge * huge : s * tiny * tiny;
+ /* now |1-x| is tiny <= 2**-20, suffice to compute
+ log(x) by x-x^2/2+x^3/3-x^4/4 */
+ t = ax - one; /* t has 20 trailing zeros */
+ w = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25));
+ u = ivln2_h * t; /* ivln2_h has 21 sig. bits */
+ v = t * ivln2_l - w * ivln2;
+ t1 = u + v;
+ SET_LOW_WORD(t1, 0);
+ t2 = v - (t1 - u);
+ } else {
+ double ss, s2, s_h, s_l, t_h, t_l;
+ n = 0;
+ /* take care subnormal number */
+ if (ix < 0x00100000) {
+ ax *= two53;
+ n -= 53;
+ GET_HIGH_WORD(ix, ax);
+ }
+ n += ((ix) >> 20) - 0x3ff;
+ j = ix & 0x000fffff;
+ /* determine interval */
+ ix = j | 0x3ff00000; /* normalize ix */
+ if (j <= 0x3988E) {
+ k = 0; /* |x|<sqrt(3/2) */
+ } else if (j < 0xBB67A) {
+ k = 1; /* |x|<sqrt(3) */
+ } else {
+ k = 0;
+ n += 1;
+ ix -= 0x00100000;
+ }
+ SET_HIGH_WORD(ax, ix);
+
+ /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+ u = ax - bp[k]; /* bp[0]=1.0, bp[1]=1.5 */
+ v = base::Divide(one, ax + bp[k]);
+ ss = u * v;
+ s_h = ss;
+ SET_LOW_WORD(s_h, 0);
+ /* t_h=ax+bp[k] High */
+ t_h = zero;
+ SET_HIGH_WORD(t_h, ((ix >> 1) | 0x20000000) + 0x00080000 + (k << 18));
+ t_l = ax - (t_h - bp[k]);
+ s_l = v * ((u - s_h * t_h) - s_h * t_l);
+ /* compute log(ax) */
+ s2 = ss * ss;
+ r = s2 * s2 *
+ (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6)))));
+ r += s_l * (s_h + ss);
+ s2 = s_h * s_h;
+ t_h = 3.0 + s2 + r;
+ SET_LOW_WORD(t_h, 0);
+ t_l = r - ((t_h - 3.0) - s2);
+ /* u+v = ss*(1+...) */
+ u = s_h * t_h;
+ v = s_l * t_h + t_l * ss;
+ /* 2/(3log2)*(ss+...) */
+ p_h = u + v;
+ SET_LOW_WORD(p_h, 0);
+ p_l = v - (p_h - u);
+ z_h = cp_h * p_h; /* cp_h+cp_l = 2/(3*log2) */
+ z_l = cp_l * p_h + p_l * cp + dp_l[k];
+ /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+ t = static_cast<double>(n);
+ t1 = (((z_h + z_l) + dp_h[k]) + t);
+ SET_LOW_WORD(t1, 0);
+ t2 = z_l - (((t1 - t) - dp_h[k]) - z_h);
+ }
+
+ /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+ y1 = y;
+ SET_LOW_WORD(y1, 0);
+ p_l = (y - y1) * t1 + y * t2;
+ p_h = y1 * t1;
+ z = p_l + p_h;
+ EXTRACT_WORDS(j, i, z);
+ if (j >= 0x40900000) { /* z >= 1024 */
+ if (((j - 0x40900000) | i) != 0) { /* if z > 1024 */
+ return s * huge * huge; /* overflow */
+ } else {
+ if (p_l + ovt > z - p_h) return s * huge * huge; /* overflow */
+ }
+ } else if ((j & 0x7fffffff) >= 0x4090cc00) { /* z <= -1075 */
+ if (((j - 0xc090cc00) | i) != 0) { /* z < -1075 */
+ return s * tiny * tiny; /* underflow */
+ } else {
+ if (p_l <= z - p_h) return s * tiny * tiny; /* underflow */
+ }
+ }
+ /*
+ * compute 2**(p_h+p_l)
+ */
+ i = j & 0x7fffffff;
+ k = (i >> 20) - 0x3ff;
+ n = 0;
+ if (i > 0x3fe00000) { /* if |z| > 0.5, set n = [z+0.5] */
+ n = j + (0x00100000 >> (k + 1));
+ k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */
+ t = zero;
+ SET_HIGH_WORD(t, n & ~(0x000fffff >> k));
+ n = ((n & 0x000fffff) | 0x00100000) >> (20 - k);
+ if (j < 0) n = -n;
+ p_h -= t;
+ }
+ t = p_l + p_h;
+ SET_LOW_WORD(t, 0);
+ u = t * lg2_h;
+ v = (p_l - (t - p_h)) * lg2 + t * lg2_l;
+ z = u + v;
+ w = v - (z - u);
+ t = z * z;
+ t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+ r = base::Divide(z * t1, (t1 - two) - (w + z * w));
+ z = one - (r - z);
+ GET_HIGH_WORD(j, z);
+ j += static_cast<int>(static_cast<uint32_t>(n) << 20);
+ if ((j >> 20) <= 0) {
+ z = scalbn(z, n); /* subnormal output */
+ } else {
+ int tmp;
+ GET_HIGH_WORD(tmp, z);
+ SET_HIGH_WORD(z, tmp + static_cast<int>(static_cast<uint32_t>(n) << 20));
+ }
+ return s * z;
+}
+
+/*
* ES6 draft 09-27-13, section 20.2.2.30.
* Math.sinh
* Method :
@@ -2752,6 +3069,16 @@ double tanh(double x) {
return (jx >= 0) ? z : -z;
}
+#undef EXTRACT_WORDS
+#undef EXTRACT_WORD64
+#undef GET_HIGH_WORD
+#undef GET_LOW_WORD
+#undef INSERT_WORDS
+#undef INSERT_WORD64
+#undef SET_HIGH_WORD
+#undef SET_LOW_WORD
+#undef STRICT_ASSIGN
+
} // namespace ieee754
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h
index 72f3db15ef..f2b3a3eb58 100644
--- a/deps/v8/src/base/ieee754.h
+++ b/deps/v8/src/base/ieee754.h
@@ -60,6 +60,14 @@ V8_BASE_EXPORT double cbrt(double x);
// Returns exp(x)-1, the exponential of |x| minus 1.
V8_BASE_EXPORT double expm1(double x);
+// Returns |x| to the power of |y|.
+// The result of base ** exponent when base is 1 or -1 and exponent is
+// +Infinity or -Infinity differs from IEEE 754-2008. The first edition
+// of ECMAScript specified a result of NaN for this operation, whereas
+// later versions of IEEE 754-2008 specified 1. The historical ECMAScript
+// behaviour is preserved for compatibility reasons.
+V8_BASE_EXPORT double pow(double x, double y);
+
// Returns the sine of |x|, where |x| is given in radians.
V8_BASE_EXPORT double sin(double x);
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 8a088ffc40..14c69d3fc2 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -146,7 +146,7 @@ V8_INLINE Dest bit_cast(Source const& source) {
// odr-used by the definition of the destructor of that class, [...]
#define DISALLOW_NEW_AND_DELETE() \
void* operator new(size_t) { base::OS::Abort(); } \
- void* operator new[](size_t) { base::OS::Abort(); }; \
+ void* operator new[](size_t) { base::OS::Abort(); } \
void operator delete(void*, size_t) { base::OS::Abort(); } \
void operator delete[](void*, size_t) { base::OS::Abort(); }
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index a044075c16..2e2f7f9320 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -155,6 +155,45 @@ bool RecursiveMutex::TryLock() {
return true;
}
+SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); }
+
+SharedMutex::~SharedMutex() {
+ int result = pthread_rwlock_destroy(&native_handle_);
+ DCHECK_EQ(0, result);
+ USE(result);
+}
+
+void SharedMutex::LockShared() {
+ int result = pthread_rwlock_rdlock(&native_handle_);
+ DCHECK_EQ(0, result);
+ USE(result);
+}
+
+void SharedMutex::LockExclusive() {
+ int result = pthread_rwlock_wrlock(&native_handle_);
+ DCHECK_EQ(0, result);
+ USE(result);
+}
+
+void SharedMutex::UnlockShared() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(0, result);
+ USE(result);
+}
+
+void SharedMutex::UnlockExclusive() {
+ // Same code as {UnlockShared} on POSIX.
+ UnlockShared();
+}
+
+bool SharedMutex::TryLockShared() {
+ return pthread_rwlock_tryrdlock(&native_handle_) == 0;
+}
+
+bool SharedMutex::TryLockExclusive() {
+ return pthread_rwlock_trywrlock(&native_handle_) == 0;
+}
+
#elif V8_OS_WIN
Mutex::Mutex() : native_handle_(SRWLOCK_INIT) {
@@ -233,6 +272,28 @@ bool RecursiveMutex::TryLock() {
return true;
}
+SharedMutex::SharedMutex() : native_handle_(SRWLOCK_INIT) {}
+
+SharedMutex::~SharedMutex() {}
+
+void SharedMutex::LockShared() { AcquireSRWLockShared(&native_handle_); }
+
+void SharedMutex::LockExclusive() { AcquireSRWLockExclusive(&native_handle_); }
+
+void SharedMutex::UnlockShared() { ReleaseSRWLockShared(&native_handle_); }
+
+void SharedMutex::UnlockExclusive() {
+ ReleaseSRWLockExclusive(&native_handle_);
+}
+
+bool SharedMutex::TryLockShared() {
+ return TryAcquireSRWLockShared(&native_handle_);
+}
+
+bool SharedMutex::TryLockExclusive() {
+ return TryAcquireSRWLockExclusive(&native_handle_);
+}
+
#endif // V8_OS_POSIX
} // namespace base
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index a69eee0bc6..ea589d5b98 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -20,7 +20,7 @@ namespace v8 {
namespace base {
// ----------------------------------------------------------------------------
-// Mutex
+// Mutex - a replacement for std::mutex
//
// This class is a synchronization primitive that can be used to protect shared
// data from being simultaneously accessed by multiple threads. A mutex offers
@@ -106,9 +106,8 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
// -----------------------------------------------------------------------------
-// RecursiveMutex
+// RecursiveMutex - a replacement for std::recursive_mutex
//
// This class is a synchronization primitive that can be used to protect shared
// data from being simultaneously accessed by multiple threads. A recursive
@@ -151,6 +150,7 @@ class V8_BASE_EXPORT RecursiveMutex final {
// successfully locked.
bool TryLock() V8_WARN_UNUSED_RESULT;
+ private:
// The implementation-defined native handle type.
#if V8_OS_POSIX
typedef pthread_mutex_t NativeHandle;
@@ -158,14 +158,6 @@ class V8_BASE_EXPORT RecursiveMutex final {
typedef CRITICAL_SECTION NativeHandle;
#endif
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
NativeHandle native_handle_;
#ifdef DEBUG
int level_;
@@ -191,6 +183,73 @@ typedef LazyStaticInstance<RecursiveMutex,
#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+// ----------------------------------------------------------------------------
+// SharedMutex - a replacement for std::shared_mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. In contrast to
+// other mutex types which facilitate exclusive access, a shared_mutex has two
+// levels of access:
+// - shared: several threads can share ownership of the same mutex.
+// - exclusive: only one thread can own the mutex.
+// Shared mutexes are usually used in situations when multiple readers can
+// access the same resource at the same time without causing data races, but
+// only one writer can do so.
+// The SharedMutex class is non-copyable.
+
+class V8_BASE_EXPORT SharedMutex final {
+ public:
+ SharedMutex();
+ ~SharedMutex();
+
+ // Acquires shared ownership of the {SharedMutex}. If another thread is
+ // holding the mutex in exclusive ownership, a call to {LockShared()} will
+ // block execution until shared ownership can be acquired.
+ // If {LockShared()} is called by a thread that already owns the mutex in any
+ // mode (exclusive or shared), the behavior is undefined.
+ void LockShared();
+
+ // Locks the SharedMutex. If another thread has already locked the mutex, a
+ // call to {LockExclusive()} will block execution until the lock is acquired.
+ // If {LockExclusive()} is called by a thread that already owns the mutex in
+ // any mode (shared or exclusive), the behavior is undefined.
+ void LockExclusive();
+
+ // Releases the {SharedMutex} from shared ownership by the calling thread.
+ // The mutex must be locked by the current thread of execution in shared mode,
+ // otherwise, the behavior is undefined.
+ void UnlockShared();
+
+ // Unlocks the {SharedMutex}. It must be locked by the current thread of
+ // execution, otherwise, the behavior is undefined.
+ void UnlockExclusive();
+
+ // Tries to lock the {SharedMutex} in shared mode. Returns immediately. On
+ // successful lock acquisition returns true, otherwise returns false.
+ // This function is allowed to fail spuriously and return false even if the
+ // mutex is not currenly exclusively locked by any other thread.
+ bool TryLockShared() V8_WARN_UNUSED_RESULT;
+
+ // Tries to lock the {SharedMutex}. Returns immediately. On successful lock
+ // acquisition returns true, otherwise returns false.
+ // This function is allowed to fail spuriously and return false even if the
+ // mutex is not currently locked by any other thread.
+ // If try_lock is called by a thread that already owns the mutex in any mode
+ // (shared or exclusive), the behavior is undefined.
+ bool TryLockExclusive() V8_WARN_UNUSED_RESULT;
+
+ private:
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_rwlock_t NativeHandle;
+#elif V8_OS_WIN
+ typedef SRWLOCK NativeHandle;
+#endif
+
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMutex);
+};
// -----------------------------------------------------------------------------
// LockGuard
@@ -210,22 +269,61 @@ template <typename Mutex, NullBehavior Behavior = NullBehavior::kRequireNotNull>
class LockGuard final {
public:
explicit LockGuard(Mutex* mutex) : mutex_(mutex) {
- if (Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr) {
- mutex_->Lock();
- }
+ if (has_mutex()) mutex_->Lock();
}
~LockGuard() {
- if (mutex_ != nullptr) mutex_->Unlock();
+ if (has_mutex()) mutex_->Unlock();
}
private:
- Mutex* mutex_;
+ Mutex* const mutex_;
+
+ bool V8_INLINE has_mutex() const {
+ DCHECK_IMPLIES(Behavior == NullBehavior::kRequireNotNull,
+ mutex_ != nullptr);
+ return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
+ }
DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
using MutexGuard = LockGuard<Mutex>;
+enum MutexSharedType : bool { kShared = true, kExclusive = false };
+
+template <MutexSharedType kIsShared,
+ NullBehavior Behavior = NullBehavior::kRequireNotNull>
+class SharedMutexGuard final {
+ public:
+ explicit SharedMutexGuard(SharedMutex* mutex) : mutex_(mutex) {
+ if (!has_mutex()) return;
+ if (kIsShared) {
+ mutex_->LockShared();
+ } else {
+ mutex_->LockExclusive();
+ }
+ }
+ ~SharedMutexGuard() {
+ if (!has_mutex()) return;
+ if (kIsShared) {
+ mutex_->UnlockShared();
+ } else {
+ mutex_->UnlockExclusive();
+ }
+ }
+
+ private:
+ SharedMutex* const mutex_;
+
+ bool V8_INLINE has_mutex() const {
+ DCHECK_IMPLIES(Behavior == NullBehavior::kRequireNotNull,
+ mutex_ != nullptr);
+ return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMutexGuard);
+};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index f6123437ec..7dd29dc39e 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -20,14 +20,13 @@ uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
case OS::MemoryPermission::kNoAccess:
return 0; // no permissions
case OS::MemoryPermission::kRead:
- return ZX_VM_FLAG_PERM_READ;
+ return ZX_VM_PERM_READ;
case OS::MemoryPermission::kReadWrite:
- return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
+ return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
case OS::MemoryPermission::kReadWriteExecute:
- return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
- ZX_VM_FLAG_PERM_EXECUTE;
+ return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
case OS::MemoryPermission::kReadExecute:
- return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE;
+ return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
}
UNREACHABLE();
}
@@ -55,13 +54,17 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
static const char kVirtualMemoryName[] = "v8-virtualmem";
zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
+
+ // Always call zx_vmo_replace_as_executable() in case the memory will need
+ // to be marked as executable in the future.
+ // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
+ // region will need to be marked as executable in the future.
+ if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
+ return nullptr;
+ }
+
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
- if ((prot & ZX_VM_FLAG_PERM_EXECUTE) != 0) {
- if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
- return nullptr;
- }
- }
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
request_size, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index e7edbf5f58..33a9371a92 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -93,7 +93,7 @@ bool g_hard_abort = false;
const char* g_gc_fake_mmap = nullptr;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
- GetPlatformRandomNumberGenerator);
+ GetPlatformRandomNumberGenerator)
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
#if !V8_OS_FUCHSIA
@@ -269,7 +269,7 @@ void* OS::GetRandomMmapAddr() {
return reinterpret_cast<void*>(raw_addr);
}
-// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
+// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
void* OS::Allocate(void* address, size_t size, size_t alignment,
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index 8cf5e54604..7d732b4a8f 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -14,7 +14,7 @@ namespace base {
class PosixTimezoneCache : public TimezoneCache {
public:
double DaylightSavingsOffset(double time_ms) override;
- void Clear() override {}
+ void Clear(TimeZoneDetection) override {}
~PosixTimezoneCache() override = default;
protected:
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 45aabf390f..c82ec5335a 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -113,7 +113,7 @@ class WindowsTimezoneCache : public TimezoneCache {
~WindowsTimezoneCache() override {}
- void Clear() override { initialized_ = false; }
+ void Clear(TimeZoneDetection) override { initialized_ = false; }
const char* LocalTimezone(double time) override;
@@ -690,7 +690,7 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef STRUNCATE
DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
- GetPlatformRandomNumberGenerator);
+ GetPlatformRandomNumberGenerator)
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
@@ -920,6 +920,11 @@ void OS::Sleep(TimeDelta interval) {
void OS::Abort() {
+ // Give a chance to debug the failure.
+ if (IsDebuggerPresent()) {
+ DebugBreak();
+ }
+
// Before aborting, make sure to flush output buffers.
fflush(stdout);
fflush(stderr);
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 2e10f539f4..5339e14ade 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -314,8 +314,8 @@ class Clock final {
};
namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock);
-};
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
+}
Time Time::Now() { return GetClock()->Now(); }
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index 5138e65ab5..bbfe2e2e9c 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -15,7 +15,7 @@ namespace base {
// Minimal SmallVector implementation. Uses inline storage first, switches to
// malloc when it overflows.
-template <typename T, size_t kInlineSize>
+template <typename T, size_t kSize>
class SmallVector {
// Currently only support trivially copyable and trivially destructible data
// types, as it uses memcpy to copy elements and never calls destructors.
@@ -23,6 +23,8 @@ class SmallVector {
STATIC_ASSERT(std::is_trivially_destructible<T>::value);
public:
+ static constexpr size_t kInlineSize = kSize;
+
SmallVector() = default;
SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; }
SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); }
@@ -62,9 +64,15 @@ class SmallVector {
return *this;
}
- T* data() const { return begin_; }
- T* begin() const { return begin_; }
- T* end() const { return end_; }
+ T* data() { return begin_; }
+ const T* data() const { return begin_; }
+
+ T* begin() { return begin_; }
+ const T* begin() const { return begin_; }
+
+ T* end() { return end_; }
+ const T* end() const { return end_; }
+
size_t size() const { return end_ - begin_; }
bool empty() const { return end_ == begin_; }
size_t capacity() const { return end_of_storage_ - begin_; }
diff --git a/deps/v8/src/base/timezone-cache.h b/deps/v8/src/base/timezone-cache.h
index 3d97eee126..5b8e5a20da 100644
--- a/deps/v8/src/base/timezone-cache.h
+++ b/deps/v8/src/base/timezone-cache.h
@@ -23,8 +23,22 @@ class TimezoneCache {
// https://github.com/tc39/ecma262/pull/778
virtual double LocalTimeOffset(double time_ms, bool is_utc) = 0;
+ /**
+ * Time zone redetection indicator for Clear function.
+ *
+ * kSkip indicates host time zone doesn't have to be redetected.
+ * kRedetect indicates host time zone should be redetected, and used to set
+ * the default time zone.
+ *
+ * The host time zone detection may require file system access or similar
+ * operations unlikely to be available inside a sandbox. If v8 is run inside a
+ * sandbox, the host time zone has to be detected outside the sandbox
+ * separately.
+ */
+ enum class TimeZoneDetection { kSkip, kRedetect };
+
// Called when the local timezone changes
- virtual void Clear() = 0;
+ virtual void Clear(TimeZoneDetection time_zone_detection) = 0;
// Called when tearing down the isolate
virtual ~TimezoneCache() = default;
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc
index 47fd633098..444c2bb397 100644
--- a/deps/v8/src/basic-block-profiler.cc
+++ b/deps/v8/src/basic-block-profiler.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(BasicBlockProfiler, BasicBlockProfiler::Get);
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(BasicBlockProfiler, BasicBlockProfiler::Get)
BasicBlockProfiler::Data::Data(size_t n_blocks)
: n_blocks_(n_blocks),
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 23399546ee..8dc879b7a0 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -17,9 +17,11 @@
#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
-#include "src/heap/heap.h"
+#include "src/function-kind.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate-inl.h"
#include "src/math-random.h"
+#include "src/microtask-queue.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
#include "src/objects/builtin-function-id.h"
@@ -120,42 +122,13 @@ static const char* GCFunctionName() {
return flag_given ? FLAG_expose_gc_as : "gc";
}
-v8::Extension* Bootstrapper::free_buffer_extension_ = nullptr;
-v8::Extension* Bootstrapper::gc_extension_ = nullptr;
-v8::Extension* Bootstrapper::externalize_string_extension_ = nullptr;
-v8::Extension* Bootstrapper::statistics_extension_ = nullptr;
-v8::Extension* Bootstrapper::trigger_failure_extension_ = nullptr;
-v8::Extension* Bootstrapper::ignition_statistics_extension_ = nullptr;
-
void Bootstrapper::InitializeOncePerProcess() {
- free_buffer_extension_ = new FreeBufferExtension;
- v8::RegisterExtension(free_buffer_extension_);
- gc_extension_ = new GCExtension(GCFunctionName());
- v8::RegisterExtension(gc_extension_);
- externalize_string_extension_ = new ExternalizeStringExtension;
- v8::RegisterExtension(externalize_string_extension_);
- statistics_extension_ = new StatisticsExtension;
- v8::RegisterExtension(statistics_extension_);
- trigger_failure_extension_ = new TriggerFailureExtension;
- v8::RegisterExtension(trigger_failure_extension_);
- ignition_statistics_extension_ = new IgnitionStatisticsExtension;
- v8::RegisterExtension(ignition_statistics_extension_);
-}
-
-
-void Bootstrapper::TearDownExtensions() {
- delete free_buffer_extension_;
- free_buffer_extension_ = nullptr;
- delete gc_extension_;
- gc_extension_ = nullptr;
- delete externalize_string_extension_;
- externalize_string_extension_ = nullptr;
- delete statistics_extension_;
- statistics_extension_ = nullptr;
- delete trigger_failure_extension_;
- trigger_failure_extension_ = nullptr;
- delete ignition_statistics_extension_;
- ignition_statistics_extension_ = nullptr;
+ v8::RegisterExtension(v8::base::make_unique<FreeBufferExtension>());
+ v8::RegisterExtension(v8::base::make_unique<GCExtension>(GCFunctionName()));
+ v8::RegisterExtension(v8::base::make_unique<ExternalizeStringExtension>());
+ v8::RegisterExtension(v8::base::make_unique<StatisticsExtension>());
+ v8::RegisterExtension(v8::base::make_unique<TriggerFailureExtension>());
+ v8::RegisterExtension(v8::base::make_unique<IgnitionStatisticsExtension>());
}
void Bootstrapper::TearDown() {
@@ -167,7 +140,8 @@ class Genesis {
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue);
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template);
~Genesis() = default;
@@ -330,12 +304,14 @@ Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue) {
HandleScope scope(isolate_);
Handle<Context> env;
{
Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
- context_snapshot_index, embedder_fields_deserializer);
+ context_snapshot_index, embedder_fields_deserializer,
+ microtask_queue);
env = genesis.result();
if (env.is_null() || !InstallExtensions(env, extensions)) {
return Handle<Context>();
@@ -381,6 +357,8 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
+
+ env->native_context()->set_microtask_queue(nullptr);
}
namespace {
@@ -1761,6 +1739,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_array_for_each_iterator(*for_each_fun);
SimpleInstallFunction(isolate_, proto, "filter", Builtins::kArrayFilter, 1,
false);
+ SimpleInstallFunction(isolate_, proto, "flat",
+ Builtins::kArrayPrototypeFlat, 0, false);
+ SimpleInstallFunction(isolate_, proto, "flatMap",
+ Builtins::kArrayPrototypeFlatMap, 1, false);
SimpleInstallFunction(isolate_, proto, "map", Builtins::kArrayMap, 1,
false);
SimpleInstallFunction(isolate_, proto, "every", Builtins::kArrayEvery, 1,
@@ -2183,6 +2165,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kSymbolPrototypeValueOf, 0, true,
BuiltinFunctionId::kSymbolPrototypeValueOf);
+ // Install the Symbol.prototype.description getter.
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("description"),
+ Builtins::kSymbolPrototypeDescriptionGetter, true);
+
// Install the @@toPrimitive function.
InstallFunctionAtSymbol(
isolate_, prototype, factory->to_primitive_symbol(),
@@ -2628,19 +2615,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
writable, Representation::Tagged());
initial_map->AppendDescriptor(isolate(), &d);
- { // Internal: RegExpInternalMatch
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate_, isolate_->factory()->empty_string(),
- Builtins::kRegExpInternalMatch, 2, true);
- native_context()->set(Context::REGEXP_INTERNAL_MATCH, *function);
- }
-
- // Create the last match info. One for external use, and one for internal
- // use when we don't want to modify the externally visible match info.
+ // Create the last match info.
Handle<RegExpMatchInfo> last_match_info = factory->NewRegExpMatchInfo();
native_context()->set_regexp_last_match_info(*last_match_info);
- Handle<RegExpMatchInfo> internal_match_info = factory->NewRegExpMatchInfo();
- native_context()->set_regexp_internal_match_info(*internal_match_info);
// Force the RegExp constructor to fast properties, so that we can use the
// fast paths for various things like
@@ -2874,7 +2851,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate(), intl, "getCanonicalLocales",
Builtins::kIntlGetCanonicalLocales, 1, false);
- {
+ { // -- D a t e T i m e F o r m a t
Handle<JSFunction> date_time_format_constructor = InstallFunction(
isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE,
JSDateTimeFormat::kSize, 0, factory->the_hole_value(),
@@ -2907,7 +2884,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDateTimeFormatPrototypeFormat, false);
}
- {
+ { // -- N u m b e r F o r m a t
Handle<JSFunction> number_format_constructor = InstallFunction(
isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE,
JSNumberFormat::kSize, 0, factory->the_hole_value(),
@@ -2939,7 +2916,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kNumberFormatPrototypeFormatNumber, false);
}
- {
+ { // -- C o l l a t o r
Handle<JSFunction> collator_constructor = InstallFunction(
isolate_, intl, "Collator", JS_INTL_COLLATOR_TYPE, JSCollator::kSize,
0, factory->the_hole_value(), Builtins::kCollatorConstructor);
@@ -2965,7 +2942,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kCollatorPrototypeCompare, false);
}
- {
+ { // -- V 8 B r e a k I t e r a t o r
Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE,
JSV8BreakIterator::kSize, 0, factory->the_hole_value(),
@@ -3006,7 +2983,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kV8BreakIteratorPrototypeBreakType, false);
}
- {
+ { // -- P l u r a l R u l e s
Handle<JSFunction> plural_rules_constructor = InstallFunction(
isolate_, intl, "PluralRules", JS_INTL_PLURAL_RULES_TYPE,
JSPluralRules::kSize, 0, factory->the_hole_value(),
@@ -3029,6 +3006,63 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "select",
Builtins::kPluralRulesPrototypeSelect, 1, false);
}
+
+ { // -- R e l a t i v e T i m e F o r m a t e
+ Handle<JSFunction> relative_time_format_fun = InstallFunction(
+ isolate(), intl, "RelativeTimeFormat",
+ JS_INTL_RELATIVE_TIME_FORMAT_TYPE, JSRelativeTimeFormat::kSize, 0,
+ factory->the_hole_value(), Builtins::kRelativeTimeFormatConstructor);
+ relative_time_format_fun->shared()->set_length(0);
+ relative_time_format_fun->shared()->DontAdaptArguments();
+
+ SimpleInstallFunction(
+ isolate(), relative_time_format_fun, "supportedLocalesOf",
+ Builtins::kRelativeTimeFormatSupportedLocalesOf, 1, false);
+
+ // Setup %RelativeTimeFormatPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(relative_time_format_fun->instance_prototype()),
+ isolate());
+
+ InstallToStringTag(isolate(), prototype, "Intl.RelativeTimeFormat");
+
+ SimpleInstallFunction(
+ isolate(), prototype, "resolvedOptions",
+ Builtins::kRelativeTimeFormatPrototypeResolvedOptions, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "format",
+ Builtins::kRelativeTimeFormatPrototypeFormat, 2,
+ false);
+ SimpleInstallFunction(isolate(), prototype, "formatToParts",
+ Builtins::kRelativeTimeFormatPrototypeFormatToParts,
+ 2, false);
+ }
+
+ { // -- L i s t F o r m a t
+ Handle<JSFunction> list_format_fun = InstallFunction(
+ isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE,
+ JSListFormat::kSize, 0, factory->the_hole_value(),
+ Builtins::kListFormatConstructor);
+ list_format_fun->shared()->set_length(0);
+ list_format_fun->shared()->DontAdaptArguments();
+
+ SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf",
+ Builtins::kListFormatSupportedLocalesOf, 1, false);
+
+ // Setup %ListFormatPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(list_format_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate(), prototype, "Intl.ListFormat");
+
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kListFormatPrototypeResolvedOptions, 0,
+ false);
+ SimpleInstallFunction(isolate(), prototype, "format",
+ Builtins::kListFormatPrototypeFormat, 1, false);
+ SimpleInstallFunction(isolate(), prototype, "formatToParts",
+ Builtins::kListFormatPrototypeFormatToParts, 1,
+ false);
+ }
}
#endif // V8_INTL_SUPPORT
@@ -4211,6 +4245,11 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_await_optimization)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_hashbang)
+#ifdef V8_INTL_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style)
+#endif // V8_INTL_SUPPORT
+
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
void Genesis::InitializeGlobal_harmony_global() {
@@ -4237,30 +4276,6 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
InstallToStringTag(isolate_, isolate()->atomics_object(), "Atomics");
}
-void Genesis::InitializeGlobal_harmony_array_flat() {
- if (!FLAG_harmony_array_flat) return;
- Handle<JSFunction> array_constructor(native_context()->array_function(),
- isolate());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()), isolate());
- SimpleInstallFunction(isolate(), array_prototype, "flat",
- Builtins::kArrayPrototypeFlat, 0, false);
- SimpleInstallFunction(isolate(), array_prototype, "flatMap",
- Builtins::kArrayPrototypeFlatMap, 1, false);
-}
-
-void Genesis::InitializeGlobal_harmony_symbol_description() {
- if (!FLAG_harmony_symbol_description) return;
-
- // Symbol.prototype.description
- Handle<JSFunction> symbol_fun(native_context()->symbol_function(), isolate());
- Handle<JSObject> symbol_prototype(
- JSObject::cast(symbol_fun->instance_prototype()), isolate());
- SimpleInstallGetter(isolate(), symbol_prototype,
- factory()->InternalizeUtf8String("description"),
- Builtins::kSymbolPrototypeDescriptionGetter, true);
-}
-
void Genesis::InitializeGlobal_harmony_string_matchall() {
if (!FLAG_harmony_string_matchall) return;
@@ -4329,55 +4344,43 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
{
- // Create %WeakFactoryPrototype%
- Handle<String> weak_factory_name = factory->WeakFactory_string();
- Handle<JSObject> weak_factory_prototype =
+ // Create %FinalizationGroupPrototype%
+ Handle<String> finalization_group_name =
+ factory->NewStringFromStaticChars("FinalizationGroup");
+ Handle<JSObject> finalization_group_prototype =
factory->NewJSObject(isolate()->object_function(), TENURED);
- // Create %WeakFactory%
- Handle<JSFunction> weak_factory_fun =
- CreateFunction(isolate(), weak_factory_name, JS_WEAK_FACTORY_TYPE,
- JSWeakFactory::kSize, 0, weak_factory_prototype,
- Builtins::kWeakFactoryConstructor);
+ // Create %FinalizationGroup%
+ Handle<JSFunction> finalization_group_fun = CreateFunction(
+ isolate(), finalization_group_name, JS_FINALIZATION_GROUP_TYPE,
+ JSFinalizationGroup::kSize, 0, finalization_group_prototype,
+ Builtins::kFinalizationGroupConstructor);
- weak_factory_fun->shared()->DontAdaptArguments();
- weak_factory_fun->shared()->set_length(1);
+ finalization_group_fun->shared()->DontAdaptArguments();
+ finalization_group_fun->shared()->set_length(1);
// Install the "constructor" property on the prototype.
- JSObject::AddProperty(isolate(), weak_factory_prototype,
- factory->constructor_string(), weak_factory_fun,
+ JSObject::AddProperty(isolate(), finalization_group_prototype,
+ factory->constructor_string(), finalization_group_fun,
DONT_ENUM);
- InstallToStringTag(isolate(), weak_factory_prototype, weak_factory_name);
+ InstallToStringTag(isolate(), finalization_group_prototype,
+ finalization_group_name);
+
+ JSObject::AddProperty(isolate(), global, finalization_group_name,
+ finalization_group_fun, DONT_ENUM);
- JSObject::AddProperty(isolate(), global, weak_factory_name,
- weak_factory_fun, DONT_ENUM);
+ SimpleInstallFunction(isolate(), finalization_group_prototype, "register",
+ Builtins::kFinalizationGroupRegister, 3, false);
- SimpleInstallFunction(isolate(), weak_factory_prototype, "makeCell",
- Builtins::kWeakFactoryMakeCell, 2, false);
+ SimpleInstallFunction(isolate(), finalization_group_prototype, "unregister",
+ Builtins::kFinalizationGroupUnregister, 1, false);
- SimpleInstallFunction(isolate(), weak_factory_prototype, "cleanupSome",
- Builtins::kWeakFactoryCleanupSome, 0, false);
+ SimpleInstallFunction(isolate(), finalization_group_prototype,
+ "cleanupSome",
+ Builtins::kFinalizationGroupCleanupSome, 0, false);
}
{
- // Create %WeakCellPrototype%
- Handle<Map> weak_cell_map =
- factory->NewMap(JS_WEAK_CELL_TYPE, JSWeakCell::kSize);
- native_context()->set_js_weak_cell_map(*weak_cell_map);
-
- Handle<JSObject> weak_cell_prototype =
- factory->NewJSObject(isolate()->object_function(), TENURED);
- Map::SetPrototype(isolate(), weak_cell_map, weak_cell_prototype);
-
- InstallToStringTag(isolate(), weak_cell_prototype,
- factory->WeakCell_string());
-
- SimpleInstallGetter(isolate(), weak_cell_prototype,
- factory->InternalizeUtf8String("holdings"),
- Builtins::kWeakCellHoldingsGetter, false);
- SimpleInstallFunction(isolate(), weak_cell_prototype, "clear",
- Builtins::kWeakCellClear, 0, false);
-
// Create %WeakRefPrototype%
Handle<Map> weak_ref_map =
factory->NewMap(JS_WEAK_REF_TYPE, JSWeakRef::kSize);
@@ -4387,7 +4390,6 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSObject> weak_ref_prototype =
factory->NewJSObject(isolate()->object_function(), TENURED);
Map::SetPrototype(isolate(), weak_ref_map, weak_ref_prototype);
- JSObject::ForceSetPrototype(weak_ref_prototype, weak_cell_prototype);
InstallToStringTag(isolate(), weak_ref_prototype,
factory->WeakRef_string());
@@ -4414,7 +4416,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
}
{
- // Create cleanup iterator for JSWeakFactory.
+ // Create cleanup iterator for JSFinalizationGroup.
Handle<JSObject> iterator_prototype(
native_context()->initial_iterator_prototype(), isolate());
@@ -4423,55 +4425,22 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
InstallToStringTag(isolate(), cleanup_iterator_prototype,
- "JSWeakFactoryCleanupIterator");
+ "JSFinalizationGroupCleanupIterator");
SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
- Builtins::kWeakFactoryCleanupIteratorNext, 0, true);
+ Builtins::kFinalizationGroupCleanupIteratorNext, 0,
+ true);
Handle<Map> cleanup_iterator_map =
- factory->NewMap(JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE,
- JSWeakFactoryCleanupIterator::kSize);
+ factory->NewMap(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE,
+ JSFinalizationGroupCleanupIterator::kSize);
Map::SetPrototype(isolate(), cleanup_iterator_map,
cleanup_iterator_prototype);
- native_context()->set_js_weak_factory_cleanup_iterator_map(
+ native_context()->set_js_finalization_group_cleanup_iterator_map(
*cleanup_iterator_map);
}
}
#ifdef V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_harmony_intl_list_format() {
- if (!FLAG_harmony_intl_list_format) return;
- Handle<JSObject> intl = Handle<JSObject>::cast(
- JSReceiver::GetProperty(
- isolate(),
- Handle<JSReceiver>(native_context()->global_object(), isolate()),
- factory()->InternalizeUtf8String("Intl"))
- .ToHandleChecked());
-
- Handle<JSFunction> list_format_fun =
- InstallFunction(isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE,
- JSListFormat::kSize, 0, factory()->the_hole_value(),
- Builtins::kListFormatConstructor);
- list_format_fun->shared()->set_length(0);
- list_format_fun->shared()->DontAdaptArguments();
-
- SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf",
- Builtins::kListFormatSupportedLocalesOf, 1, false);
-
- // Setup %ListFormatPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(list_format_fun->instance_prototype()), isolate());
-
- InstallToStringTag(isolate(), prototype, "Intl.ListFormat");
-
- SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
- Builtins::kListFormatPrototypeResolvedOptions, 0,
- false);
- SimpleInstallFunction(isolate(), prototype, "format",
- Builtins::kListFormatPrototypeFormat, 1, false);
- SimpleInstallFunction(isolate(), prototype, "formatToParts",
- Builtins::kListFormatPrototypeFormatToParts, 1, false);
-}
-
void Genesis::InitializeGlobal_harmony_locale() {
if (!FLAG_harmony_locale) return;
@@ -4536,43 +4505,6 @@ void Genesis::InitializeGlobal_harmony_locale() {
Builtins::kLocalePrototypeNumberingSystem, true);
}
-void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
- if (!FLAG_harmony_intl_relative_time_format) return;
- Handle<JSObject> intl = Handle<JSObject>::cast(
- JSReceiver::GetProperty(
- isolate(),
- Handle<JSReceiver>(native_context()->global_object(), isolate()),
- factory()->InternalizeUtf8String("Intl"))
- .ToHandleChecked());
-
- Handle<JSFunction> relative_time_format_fun = InstallFunction(
- isolate(), intl, "RelativeTimeFormat", JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
- JSRelativeTimeFormat::kSize, 0, factory()->the_hole_value(),
- Builtins::kRelativeTimeFormatConstructor);
- relative_time_format_fun->shared()->set_length(0);
- relative_time_format_fun->shared()->DontAdaptArguments();
-
- SimpleInstallFunction(
- isolate(), relative_time_format_fun, "supportedLocalesOf",
- Builtins::kRelativeTimeFormatSupportedLocalesOf, 1, false);
-
- // Setup %RelativeTimeFormatPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(relative_time_format_fun->instance_prototype()),
- isolate());
-
- InstallToStringTag(isolate(), prototype, "Intl.RelativeTimeFormat");
-
- SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
- Builtins::kRelativeTimeFormatPrototypeResolvedOptions,
- 0, false);
- SimpleInstallFunction(isolate(), prototype, "format",
- Builtins::kRelativeTimeFormatPrototypeFormat, 2, false);
- SimpleInstallFunction(isolate(), prototype, "formatToParts",
- Builtins::kRelativeTimeFormatPrototypeFormatToParts, 2,
- false);
-}
-
void Genesis::InitializeGlobal_harmony_intl_segmenter() {
if (!FLAG_harmony_intl_segmenter) return;
Handle<JSObject> intl = Handle<JSObject>::cast(
@@ -4777,8 +4709,7 @@ bool Genesis::InstallNatives() {
// Set up the extras utils object as a shared container between native
// scripts and extras. (Extras consume things added there by native scripts.)
- Handle<JSObject> extras_utils =
- factory()->NewJSObject(isolate()->object_function());
+ Handle<JSObject> extras_utils = factory()->NewJSObjectWithNullProto();
native_context()->set_extras_utils_object(*extras_utils);
InstallInternalPackedArray(extras_utils, "InternalPackedArray");
@@ -5156,8 +5087,7 @@ bool Genesis::InstallNatives() {
bool Genesis::InstallExtraNatives() {
HandleScope scope(isolate());
- Handle<JSObject> extras_binding =
- factory()->NewJSObject(isolate()->object_function());
+ Handle<JSObject> extras_binding = factory()->NewJSObjectWithNullProto();
// binding.isTraceCategoryEnabled(category)
SimpleInstallFunction(isolate(), extras_binding, "isTraceCategoryEnabled",
@@ -5221,8 +5151,7 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
// Don't install extensions into the snapshot.
if (isolate_->serializer_enabled()) return true;
BootstrapperActive active(this);
- SaveContext saved_context(isolate_);
- isolate_->set_context(*native_context);
+ SaveAndSwitchContext saved_context(isolate_, *native_context);
return Genesis::InstallExtensions(isolate_, native_context, extensions) &&
Genesis::InstallSpecialObjects(isolate_, native_context);
}
@@ -5579,7 +5508,8 @@ Genesis::Genesis(
Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer)
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue)
: isolate_(isolate), active_(isolate->bootstrapper()) {
RuntimeCallTimerScope rcs_timer(isolate, RuntimeCallCounterId::kGenesis);
result_ = Handle<Context>::null();
@@ -5675,7 +5605,9 @@ Genesis::Genesis(
}
}
- native_context()->set_microtask_queue(isolate->default_microtask_queue());
+ native_context()->set_microtask_queue(
+ microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
+ : isolate->default_microtask_queue());
// Install experimental natives. Do not include them into the
// snapshot as we should be able to turn them off at runtime. Re-installing
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 6deff78097..1667d6018a 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -46,7 +46,6 @@ class SourceCodeCache final {
class Bootstrapper final {
public:
static void InitializeOncePerProcess();
- static void TearDownExtensions();
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
@@ -58,7 +57,8 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
+ v8::MicrotaskQueue* microtask_queue);
Handle<JSGlobalProxy> NewRemoteContext(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
@@ -109,13 +109,6 @@ class Bootstrapper final {
explicit Bootstrapper(Isolate* isolate);
- static v8::Extension* free_buffer_extension_;
- static v8::Extension* gc_extension_;
- static v8::Extension* externalize_string_extension_;
- static v8::Extension* statistics_extension_;
- static v8::Extension* trigger_failure_extension_;
- static v8::Extension* ignition_statistics_extension_;
-
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
index b758153155..3a6c26c000 100644
--- a/deps/v8/src/builtins/arguments.tq
+++ b/deps/v8/src/builtins/arguments.tq
@@ -28,7 +28,7 @@ namespace arguments {
const shared: SharedFunctionInfo = f.shared_function_info;
const formalParameterCount: bint =
- Convert<bint>(shared.formal_parameter_count);
+ Convert<bint>(Convert<int32>(shared.formal_parameter_count));
let argumentCount: bint = formalParameterCount;
const adaptor: ArgumentsAdaptorFrame =
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 6fdd93821d..6c5a59ff8c 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/cell.h"
@@ -52,7 +54,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -218,8 +219,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
- __ b(ne, &not_create_implicit_receiver);
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
+ __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -2306,111 +2308,157 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : new target (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments, stack_overflow;
-
- Label enough, too_few;
+ Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
- __ cmp(r0, r2);
- __ b(lt, &too_few);
-
- Register scratch = r5;
-
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
-
- // Calculate copy start address into r0 and copy end address into r4.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
- // adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- // r4: copy end address
-
- Label copy;
- __ bind(&copy);
- __ ldr(scratch, MemOperand(r0, 0));
- __ push(scratch);
- __ cmp(r0, r4); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- __ b(&invoke);
- }
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+ __ tst(r4,
+ Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
+ __ b(ne, &skip_adapt_arguments);
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
+ // -------------------------------------------
+ // Adapt arguments.
+ // -------------------------------------------
+ {
+ Label under_application, over_application, invoke;
+ __ cmp(r0, r2);
+ __ b(lt, &under_application);
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+ // Enough parameters: actual >= expected
+ __ bind(&over_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+
+ // Calculate copy start address into r0 and copy end address into r4.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+ // adjust for return address and receiver
+ __ add(r0, r0, Operand(2 * kPointerSize));
+ __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r0: copy start address
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ // r4: copy end address
+
+ Label copy;
+ __ bind(&copy);
+ __ ldr(r5, MemOperand(r0, 0));
+ __ push(r5);
+ __ cmp(r0, r4); // Compare before moving to next argument.
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ b(ne, &copy);
+
+ __ b(&invoke);
+ }
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
+ // Too few parameters: Actual < expected
+ __ bind(&under_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r0: copy start address
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ Label copy;
+ __ bind(&copy);
+
+ // Adjust load for return address and receiver.
+ __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
+ __ push(r5);
+
+ __ cmp(r0, fp); // Compare before moving to next argument.
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ b(ne, &copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ __ LoadRoot(r5, RootIndex::kUndefinedValue);
+ __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
+ // Adjust for frame.
+ __ sub(r4, r4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r5);
+ __ cmp(sp, r4);
+ __ b(ne, &fill);
+ }
- // Adjust load for return address and receiver.
- __ ldr(scratch, MemOperand(r0, 2 * kPointerSize));
- __ push(scratch);
+ // Call the entry point.
+ __ bind(&invoke);
+ __ mov(r0, r2);
+ // r0 : expected number of arguments
+ // r1 : function (passed through to callee)
+ // r3 : new target (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ CallCodeObject(r2);
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
- // Adjust for frame.
- __ sub(r4, r4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(scratch);
- __ cmp(sp, r4);
- __ b(ne, &fill);
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Jump(lr);
}
- // Call the entry point.
- __ bind(&invoke);
- __ mov(r0, r2);
- // r0 : expected number of arguments
- // r1 : function (passed through to callee)
- // r3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ CallCodeObject(r2);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // -------------------------------------------
+ // Skip adapt arguments.
+ // -------------------------------------------
+ __ bind(&skip_adapt_arguments);
+ {
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application;
+ __ cmp(r0, r2);
+ __ b(lt, &under_application);
+
+ __ bind(&over_application);
+ {
+ // Remove superfluous parameters from the stack.
+ __ sub(r4, r0, r2);
+ __ mov(r0, r2);
+ __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ __ b(&dont_adapt_arguments);
+ }
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Jump(lr);
+ __ bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
+ __ bind(&fill);
+ __ add(r0, r0, Operand(1));
+ __ push(r4);
+ __ cmp(r0, r2);
+ __ b(lt, &fill);
+ __ b(&dont_adapt_arguments);
+ }
+ }
// -------------------------------------------
// Dont adapt arguments.
@@ -2707,80 +2755,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const LowDwVfpRegister double_base = d0;
- const LowDwVfpRegister double_exponent = d1;
- const LowDwVfpRegister double_result = d2;
- const LowDwVfpRegister double_scratch = d3;
- const SwVfpRegister single_scratch = s6;
- // Avoid using Registers r0-r3 as they may be needed when calling to C if the
- // ABI is softfloat.
- const Register integer_exponent = r4;
- const Register scratch = r5;
-
- Label call_runtime, done, int_exponent;
-
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(integer_exponent, double_exponent, double_scratch);
- __ b(eq, &int_exponent);
-
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(lr);
- __ MovFromFloatResult(double_result);
- __ b(&done);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- __ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, Double(1.0), scratch);
-
- // Get absolute value of exponent.
- __ cmp(integer_exponent, Operand::Zero());
- __ mov(scratch, integer_exponent);
- __ rsb(scratch, integer_exponent, Operand::Zero(), LeaveCC, mi);
-
- Label while_true;
- __ bind(&while_true);
- __ mov(scratch, Operand(scratch, LSR, 1), SetCC);
- __ vmul(double_result, double_result, double_scratch, cs);
- __ vmul(double_scratch, double_scratch, double_scratch, ne);
- __ b(ne, &while_true);
-
- __ cmp(integer_exponent, Operand::Zero());
- __ b(ge, &done);
- __ vmov(double_scratch, Double(1.0), scratch);
- __ vdiv(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ VFPCompareAndSetFlags(double_result, 0.0);
- __ b(ne, &done);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, integer_exponent);
- __ vcvt_f64_s32(double_exponent, single_scratch);
-
- // Returning or bailing out.
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(lr);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -2953,32 +2927,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- cp : kTargetContext
- // -- r1 : kApiFunctionAddress
- // -- r2 : kArgc
- // --
+ // -- cp : context
+ // -- r1 : api function address
+ // -- r2 : arguments count (not including the receiver)
+ // -- r3 : call data
+ // -- r0 : holder
// -- sp[0] : last argument
// -- ...
// -- sp[(argc - 1) * 4] : first argument
// -- sp[(argc + 0) * 4] : receiver
- // -- sp[(argc + 1) * 4] : kHolder
- // -- sp[(argc + 2) * 4] : kCallData
// -----------------------------------
Register api_function_address = r1;
Register argc = r2;
+ Register call_data = r3;
+ Register holder = r0;
Register scratch = r4;
- Register index = r5; // For indexing MemOperands.
-
- DCHECK(!AreAliased(api_function_address, argc, scratch, index));
-
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = 0;
- static constexpr int kHolderOffset = kReceiverOffset + 1;
- static constexpr int kCallDataOffset = kHolderOffset + 1;
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
typedef FunctionCallbackArguments FCA;
@@ -3004,24 +2970,22 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
// kHolder.
- __ add(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
- __ ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ str(holder, MemOperand(sp, 0 * kPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ str(scratch, MemOperand(sp, 1 * kPointerSize));
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ // kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ str(scratch, MemOperand(sp, 2 * kPointerSize));
__ str(scratch, MemOperand(sp, 3 * kPointerSize));
- __ str(scratch, MemOperand(sp, 5 * kPointerSize));
// kData.
- __ add(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
- __ ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ str(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ str(scratch, MemOperand(sp, 5 * kPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3050,7 +3014,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
- Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
__ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
__ str(scratch, MemOperand(sp, 4 * kPointerSize));
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 8fadff4768..9c7397897a 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/cell.h"
@@ -50,7 +52,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
@@ -140,7 +143,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
// Store padding, if needed.
- __ Str(padreg, MemOperand(x2, 1 * kPointerSize));
+ __ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
// Copy arguments to the expression stack.
@@ -155,26 +158,26 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0: number of arguments (untagged)
- // -- x1: constructor function
- // -- x3: new target
+ // -- x0: number of arguments (untagged)
+ // -- x1: constructor function
+ // -- x3: new target
// If argc is odd:
- // -- sp[0*kPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kPointerSize]: argument 0
- // -- sp[(n+0)*kPointerSize]: the hole (receiver)
- // -- sp[(n+1)*kPointerSize]: padding
- // -- sp[(n+2)*kPointerSize]: padding
- // -- sp[(n+3)*kPointerSize]: number of arguments (tagged)
- // -- sp[(n+4)*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kSystemPointerSize]: padding
+ // -- sp[(n+2)*kSystemPointerSize]: padding
+ // -- sp[(n+3)*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
// If argc is even:
- // -- sp[0*kPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kPointerSize]: argument 0
- // -- sp[(n+0)*kPointerSize]: the hole (receiver)
- // -- sp[(n+1)*kPointerSize]: padding
- // -- sp[(n+2)*kPointerSize]: number of arguments (tagged)
- // -- sp[(n+3)*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n+1)*kSystemPointerSize]: padding
+ // -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
// Call the function.
@@ -203,13 +206,13 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
+
__ LoadRoot(scratch, RootIndex::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
__ B(le, stack_overflow);
#if defined(V8_OS_WIN)
@@ -218,7 +221,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label chkstk, chkstk_done;
Register probe = temps.AcquireX();
- __ Sub(scratch, sp, Operand(num_args, LSL, kPointerSizeLog2));
+ __ Sub(scratch, sp, Operand(num_args, LSL, kSystemPointerSizeLog2));
__ Mov(probe, sp);
// Loop start of stack probe.
@@ -265,18 +268,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(x0, x1, padreg, x3);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- x1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context (pushed by FrameScope)
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- x1 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(w4,
- SharedFunctionInfo::IsDerivedConstructorBit::kMask,
- &not_create_implicit_receiver);
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
+ __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -290,12 +294,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LoadRoot(x0, RootIndex::kTheHoleValue);
// ----------- S t a t e -------------
- // -- x0: receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- x0: receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -304,7 +308,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&post_instantiation_deopt_entry);
// Restore new target from the top of the stack.
- __ Peek(x3, 0 * kPointerSize);
+ __ Peek(x3, 0 * kSystemPointerSize);
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
@@ -326,15 +330,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(x0, x0);
// ----------- S t a t e -------------
- // -- x3: new target
- // -- x12: number of arguments (untagged)
- // -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: implicit receiver
- // -- sp[3*kPointerSize]: padding
- // -- x1 and sp[4*kPointerSize]: constructor function
- // -- sp[5*kPointerSize]: number of arguments (tagged)
- // -- sp[6*kPointerSize]: context
+ // -- x3: new target
+ // -- x12: number of arguments (untagged)
+ // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
+ // odd)
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: implicit receiver
+ // -- sp[3*kSystemPointerSize]: padding
+ // -- x1 and sp[4*kSystemPointerSize]: constructor function
+ // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[6*kSystemPointerSize]: context
// -----------------------------------
// Round the number of arguments down to the next even number, and claim
@@ -373,11 +378,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -414,7 +419,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ Bind(&use_receiver);
- __ Peek(x0, 0 * kPointerSize);
+ __ Peek(x0, 0 * kSystemPointerSize);
__ CompareRoot(x0, RootIndex::kTheHoleValue);
__ B(eq, &do_throw);
@@ -443,8 +448,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
- __ Ldr(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ Bind(&done);
}
@@ -458,13 +464,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(x1);
// Store input value into generator object.
- __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ StoreTaggedField(
+ x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
- __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -491,7 +500,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
- __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -502,11 +512,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store padding (which might be replaced by the receiver).
__ Sub(x11, x11, 1);
- __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
+ __ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
// Poke receiver into highest claimed slot.
- __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
- __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
+ __ LoadTaggedPointerField(
+ x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+ __ Poke(x5, Operand(x10, LSL, kSystemPointerSizeLog2));
// ----------- S t a t e -------------
// -- x1 : the JSGeneratorObject to resume
@@ -520,8 +531,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Copy the function arguments from the generator object's register file.
- __ Ldr(x5,
- FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ x5,
+ FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
@@ -529,9 +541,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&loop);
__ Sub(x10, x10, 1);
- __ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
- __ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
+ __ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
+ __ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
__ Add(x12, x12, 1);
__ Cbnz(x10, &loop);
__ Bind(&done);
@@ -539,8 +551,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, x3, x0);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
@@ -548,7 +562,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
@@ -557,7 +572,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x3, x1);
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeObject(x2);
}
@@ -569,7 +584,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg, x4, x5);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(padreg, x1);
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -579,7 +595,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(padreg, x1);
- __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -650,7 +667,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
- Label non_outermost_js, done;
+ Label done;
ExternalReference js_entry_sp = ExternalReference::Create(
IsolateAddressId::kJSEntrySPAddress, masm->isolate());
__ Mov(x10, js_entry_sp);
@@ -709,9 +726,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Bind(&invoke);
// Push new stack handler.
- static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
+ static_assert(StackHandlerConstants::kSize == 2 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kSize");
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
// Link the current handler as the next handler.
@@ -740,7 +757,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Pop the stack handler and unlink this frame from the handler chain.
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
__ Pop(x10, padreg);
__ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
@@ -763,7 +780,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Label non_outermost_js_2;
{
Register c_entry_fp = x11;
- __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
+ __ PeekPair(x10, c_entry_fp, 1 * kSystemPointerSize);
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x12, js_entry_sp);
@@ -777,9 +794,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
}
// Reset the stack to the callee saved registers.
- static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
- "Size of entry frame is not a multiple of 16 bytes");
- __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
+ static_assert(
+ EntryFrameConstants::kFixedFrameSize % (2 * kSystemPointerSize) == 0,
+ "Size of entry frame is not a multiple of 16 bytes");
+ __ Drop(EntryFrameConstants::kFixedFrameSize / kSystemPointerSize);
// Restore the callee-saved registers and return.
__ PopCalleeSavedRegisters();
__ Ret();
@@ -848,7 +866,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Store padding (which might be overwritten).
__ SlotAddress(scratch, slots_to_claim);
- __ Str(padreg, MemOperand(scratch, -kPointerSize));
+ __ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
// Store receiver and function on the stack.
__ SlotAddress(scratch, argc);
@@ -867,11 +885,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Bind(&loop);
// Load the handle.
- __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
// Dereference the handle.
__ Ldr(x11, MemOperand(x11));
// Poke the result into the stack.
- __ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
+ __ Str(x11, MemOperand(scratch, -kSystemPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(sp, scratch);
__ B(lt, &loop);
@@ -936,7 +954,8 @@ static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
// Store code entry in the closure.
- __ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
@@ -957,10 +976,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// Drop receiver + arguments.
if (__ emit_debug_code()) {
- __ Tst(args_size, kPointerSize - 1);
+ __ Tst(args_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
- __ Lsr(args_size, args_size, kPointerSizeLog2);
+ __ Lsr(args_size, args_size, kSystemPointerSizeLog2);
__ DropArguments(args_size);
}
@@ -993,7 +1012,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = x1;
Register optimized_code_entry = scratch1;
- __ Ldr(
+ __ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
@@ -1046,8 +1065,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch2,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch2,
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
@@ -1149,9 +1169,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
// The bytecode array could have been flushed from the shared function info,
@@ -1162,9 +1184,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &compile_lazy);
// Load the feedback vector from the closure.
- __ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
@@ -1224,7 +1248,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// register in the register file.
Label loop_header;
__ LoadRoot(x10, RootIndex::kUndefinedValue);
- __ Lsr(x11, x11, kPointerSizeLog2);
+ __ Lsr(x11, x11, kSystemPointerSizeLog2);
// Round up the number of registers to a multiple of 2, to align the stack
// to 16 bytes.
__ Add(x11, x11, 1);
@@ -1241,7 +1265,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ Cbz(x10, &no_incoming_new_target_or_generator_register);
- __ Str(x3, MemOperand(fp, x10, LSL, kPointerSizeLog2));
+ __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
@@ -1257,11 +1281,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
#if defined(V8_OS_WIN)
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
@@ -1342,7 +1366,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Sub(scratch, slots_to_claim, 1);
- __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
+ __ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@@ -1359,12 +1383,12 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
}
__ Sub(last_arg_addr, first_arg_index,
- Operand(slots_to_copy, LSL, kPointerSizeLog2));
- __ Add(last_arg_addr, last_arg_addr, kPointerSize);
+ Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
+ __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
// Load the final spread argument into spread_arg_out, if necessary.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kPointerSize));
+ __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
}
// Copy the rest of the arguments.
@@ -1458,8 +1482,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
@@ -1508,11 +1534,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
#if defined(V8_OS_WIN)
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
@@ -1610,7 +1636,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Three arguments.
__ Bind(&three_args);
__ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
- 2 * kPointerSize));
+ 2 * kSystemPointerSize));
__ Push(new_target, scratch3, scratch1, scratch2);
__ Bind(&args_done);
@@ -1622,7 +1648,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
+ __ SmiUntag(x4, MemOperand(sp, 3 * kSystemPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1637,7 +1663,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
__ JumpCodeObject(x4);
}
@@ -1651,7 +1678,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
(allocatable_register_count +
BuiltinContinuationFrameConstants::PaddingSlotCount(
allocatable_register_count)) *
- kPointerSize;
+ kSystemPointerSize;
// Set up frame pointer.
__ Add(fp, sp, frame_size);
@@ -1665,14 +1692,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore registers in pairs.
int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
- allocatable_register_count * kPointerSize;
+ allocatable_register_count * kSystemPointerSize;
for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
int code1 = config->GetAllocatableGeneralCode(i);
int code2 = config->GetAllocatableGeneralCode(i - 1);
Register reg1 = Register::from_code(code1);
Register reg2 = Register::from_code(code2);
__ Ldp(reg1, reg2, MemOperand(fp, offset));
- offset += 2 * kPointerSize;
+ offset += 2 * kSystemPointerSize;
}
// Restore first register separately, if number of registers is odd.
@@ -1753,13 +1780,14 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x0, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ SmiUntag(x1,
- FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
+ __ SmiUntagField(
+ x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1817,7 +1845,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Mov(saved_argc, argc);
__ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
- __ Ldr(receiver, MemOperand(scratch, 2 * kPointerSize));
+ __ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
__ Drop(2); // Drop the undefined values we pushed above.
__ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
@@ -1889,7 +1917,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Copy arguments two slots higher in memory, overwriting the original
// receiver and padding.
{
- Label loop;
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
@@ -1897,7 +1924,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Mov(count, argc);
__ Sub(last_arg_slot, argc, 1);
__ SlotAddress(copy_from, last_arg_slot);
- __ Add(copy_to, copy_from, 2 * kPointerSize);
+ __ Add(copy_to, copy_from, 2 * kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count,
TurboAssembler::kSrcLessThanDst);
// Drop two slots. These are copies of the last two arguments.
@@ -1964,8 +1991,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = x10;
__ SlotAddress(scratch, argc);
__ Ldp(arguments_list, this_argument,
- MemOperand(scratch, 1 * kPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+ MemOperand(scratch, 1 * kSystemPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Drop(4); // Drop the undefined values we pushed above.
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
@@ -2041,8 +2068,9 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// slot (argc + 1).
Register scratch = x10;
__ SlotAddress(scratch, argc);
- __ Ldp(new_target, arguments_list, MemOperand(scratch, 1 * kPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
+ __ Ldp(new_target, arguments_list,
+ MemOperand(scratch, 1 * kSystemPointerSize));
+ __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Cmp(argc, 2);
__ CmovX(new_target, target, ls); // target if argc <= 2.
@@ -2153,7 +2181,8 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
__ Mov(dst, len); // CopySlots will corrupt dst.
__ CopySlots(dst, src, slots_to_copy);
__ Add(scratch, argc, 1);
- __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
+ __ Poke(padreg,
+ Operand(scratch, LSL, kSystemPointerSizeLog2)); // Store padding.
}
__ Bind(&exit);
@@ -2175,7 +2204,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
- __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Cmp(x13, FIXED_ARRAY_TYPE);
__ B(eq, &ok);
@@ -2218,10 +2247,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// TODO(all): Consider using Ldp and Stp.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+ __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ Cmp(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne);
- __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
+ __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop);
}
__ Bind(&done);
@@ -2251,7 +2280,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
- __ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
&new_target_constructor);
@@ -2282,8 +2311,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ Ldr(scratch,
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(scratch,
- FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(len,
FieldMemOperand(scratch,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2311,7 +2341,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Push varargs.
{
Register dst = x13;
- __ Add(args_fp, args_fp, 2 * kPointerSize);
+ __ Add(args_fp, args_fp, 2 * kSystemPointerSize);
__ SlotAddress(dst, 0);
__ CopyDoubleWords(dst, args_fp, len);
}
@@ -2337,7 +2367,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that function is not a "classConstructor".
Label class_constructor;
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
@@ -2345,7 +2376,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ TestAndBranchIfAnySet(w3,
@@ -2396,7 +2428,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(cp, x1, x0, padreg);
__ SmiUntag(x0);
}
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
__ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
@@ -2439,10 +2472,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ Ldr(bound_argv,
- FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(bound_argc,
- FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ LoadTaggedPointerField(
+ bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(bound_argc,
+ FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
@@ -2466,7 +2499,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
+ __ Cmp(x10, Operand(bound_argc, LSL, kSystemPointerSizeLog2));
__ B(hs, &done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
@@ -2483,7 +2516,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load receiver before we start moving the arguments. We will only
// need this in this path because the bound arguments are odd.
Register receiver = x14;
- __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
+ __ Peek(receiver, Operand(argc, LSL, kSystemPointerSizeLog2));
// Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
// as we need one extra padding slot. If argc is odd, we know that the
@@ -2522,12 +2555,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, argc);
__ Add(argc, argc,
bound_argc); // Update argc to include bound arguments.
- __ Lsl(counter, bound_argc, kPointerSizeLog2);
+ __ Lsl(counter, bound_argc, kSystemPointerSizeLog2);
__ Bind(&loop);
- __ Sub(counter, counter, kPointerSize);
- __ Ldr(scratch, MemOperand(bound_argv, counter));
+ __ Sub(counter, counter, kTaggedSize);
+ __ LoadAnyTaggedField(scratch, MemOperand(bound_argv, counter));
// Poke into claimed area of stack.
- __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
+ __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
__ Cbnz(counter, &loop);
}
@@ -2536,8 +2569,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
- __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kSystemPointerSizeLog2));
+ __ Str(receiver, MemOperand(scratch, kSystemPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
__ Str(padreg, MemOperand(scratch));
@@ -2559,14 +2592,16 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(x1);
// Patch the receiver to [[BoundThis]].
- __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
- __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+ __ LoadAnyTaggedField(x10,
+ FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+ __ Poke(x10, Operand(x0, LSL, kSystemPointerSizeLog2));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2634,7 +2669,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAllClear(
w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
@@ -2665,13 +2701,14 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ Cmp(x1, x3);
__ B(ne, &done);
- __ Ldr(x3,
- FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2689,7 +2726,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(x1, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
&non_constructor);
@@ -2792,123 +2829,198 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
+ Register argc_actual_minus_expected = x5;
- Label dont_adapt_arguments, stack_overflow;
+ Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
+ adapt_arguments_in_place;
- Label enough_arguments;
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
- EnterArgumentsAdaptorFrame(masm);
-
- Register copy_from = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register argc_to_copy = x13;
- Register argc_unused_actual = x14;
- Register scratch1 = x15, scratch2 = x16;
-
- // We need slots for the expected arguments, with one extra slot for the
- // receiver.
- __ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 1);
- Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
-
- // Round up number of slots to be even, to maintain stack alignment.
- __ RecordComment("-- Allocate callee frame slots --");
- __ Add(scratch1, scratch1, 1);
- __ Bic(scratch1, scratch1, 1);
- __ Claim(scratch1, kPointerSize);
-
- __ Mov(copy_to, sp);
-
- // Preparing the expected arguments is done in four steps, the order of
- // which is chosen so we can use LDP/STP and avoid conditional branches as
- // much as possible.
-
- // (1) If we don't have enough arguments, fill the remaining expected
- // arguments with undefined, otherwise skip this step.
- __ Subs(scratch1, argc_actual, argc_expected);
- __ Csel(argc_unused_actual, xzr, scratch1, lt);
- __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
- __ B(ge, &enough_arguments);
-
- // Fill the remaining expected arguments with undefined.
- __ RecordComment("-- Fill slots with undefined --");
- __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
- __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
- // We might write one slot extra, but that is ok because we'll overwrite it
- // below.
- __ Cmp(copy_end, copy_to);
- __ B(hi, &fill);
-
- // Correct copy_to, for the case where we wrote one additional slot.
- __ Mov(copy_to, copy_end);
-
- __ Bind(&enough_arguments);
- // (2) Copy all of the actual arguments, or as many as we need.
- Label skip_copy;
- __ RecordComment("-- Copy actual arguments --");
- __ Cbz(argc_to_copy, &skip_copy);
- __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
- __ Add(copy_from, fp, 2 * kPointerSize);
- // Adjust for difference between actual and expected arguments.
- __ Add(copy_from, copy_from,
- Operand(argc_unused_actual, LSL, kPointerSizeLog2));
-
- // Copy arguments. We use load/store pair instructions, so we might overshoot
- // by one slot, but since we copy the arguments starting from the last one, if
- // we do overshoot, the extra slot will be overwritten later by the receiver.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, 2 * kPointerSize, PostIndex));
- __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
- __ Cmp(copy_end, copy_to);
- __ B(hi, &copy_2_by_2);
- __ Bind(&skip_copy);
-
- // (3) Store padding, which might be overwritten by the receiver, if it is not
- // necessary.
- __ RecordComment("-- Store padding --");
- __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
-
- // (4) Store receiver. Calculate target address from the sp to avoid checking
- // for padding. Storing the receiver will overwrite either the extra slot
- // we copied with the actual arguments, if we did copy one, or the padding we
- // stored above.
- __ RecordComment("-- Store receiver --");
- __ Add(copy_from, fp, 2 * kPointerSize);
- __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
-
- // Arguments have been adapted. Now call the entry point.
- __ RecordComment("-- Call entry point --");
- __ Mov(argc_actual, argc_expected);
- // x0 : expected number of arguments
- // x1 : function (passed through to callee)
- // x3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ CallCodeObject(x2);
+ // When the difference between argc_actual and argc_expected is odd, we
+ // create an arguments adaptor frame.
+ __ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
+ __ Tbnz(argc_actual_minus_expected, 0, &create_adaptor_frame);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // When the difference is even, check if we are allowed to adjust the
+ // existing frame instead.
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
+ &adapt_arguments_in_place);
+
+ // -------------------------------------------
+ // Create an arguments adaptor frame.
+ // -------------------------------------------
+ __ Bind(&create_adaptor_frame);
+ {
+ __ RecordComment("-- Adapt arguments --");
+ EnterArgumentsAdaptorFrame(masm);
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register argc_to_copy = x13;
+ Register argc_unused_actual = x14;
+ Register scratch1 = x15, scratch2 = x16;
+
+ // We need slots for the expected arguments, with one extra slot for the
+ // receiver.
+ __ RecordComment("-- Stack check --");
+ __ Add(scratch1, argc_expected, 1);
+ Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
+
+ // Round up number of slots to be even, to maintain stack alignment.
+ __ RecordComment("-- Allocate callee frame slots --");
+ __ Add(scratch1, scratch1, 1);
+ __ Bic(scratch1, scratch1, 1);
+ __ Claim(scratch1, kSystemPointerSize);
+
+ __ Mov(copy_to, sp);
+
+ // Preparing the expected arguments is done in four steps, the order of
+ // which is chosen so we can use LDP/STP and avoid conditional branches as
+ // much as possible.
+
+ // (1) If we don't have enough arguments, fill the remaining expected
+ // arguments with undefined, otherwise skip this step.
+ Label enough_arguments;
+ __ Subs(scratch1, argc_actual, argc_expected);
+ __ Csel(argc_unused_actual, xzr, scratch1, lt);
+ __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
+ __ B(ge, &enough_arguments);
+
+ // Fill the remaining expected arguments with undefined.
+ __ RecordComment("-- Fill slots with undefined --");
+ __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kSystemPointerSizeLog2));
+ __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
+ // We might write one slot extra, but that is ok because we'll overwrite it
+ // below.
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &fill);
+
+ // Correct copy_to, for the case where we wrote one additional slot.
+ __ Mov(copy_to, copy_end);
+
+ __ Bind(&enough_arguments);
+ // (2) Copy all of the actual arguments, or as many as we need.
+ Label skip_copy;
+ __ RecordComment("-- Copy actual arguments --");
+ __ Cbz(argc_to_copy, &skip_copy);
+ __ Add(copy_end, copy_to,
+ Operand(argc_to_copy, LSL, kSystemPointerSizeLog2));
+ __ Add(copy_from, fp, 2 * kSystemPointerSize);
+ // Adjust for difference between actual and expected arguments.
+ __ Add(copy_from, copy_from,
+ Operand(argc_unused_actual, LSL, kSystemPointerSizeLog2));
+
+ // Copy arguments. We use load/store pair instructions, so we might
+ // overshoot by one slot, but since we copy the arguments starting from the
+ // last one, if we do overshoot, the extra slot will be overwritten later by
+ // the receiver.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, 2 * kSystemPointerSize, PostIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &copy_2_by_2);
+ __ Bind(&skip_copy);
+
+ // (3) Store padding, which might be overwritten by the receiver, if it is
+ // not necessary.
+ __ RecordComment("-- Store padding --");
+ __ Str(padreg, MemOperand(fp, -5 * kSystemPointerSize));
+
+ // (4) Store receiver. Calculate target address from the sp to avoid
+ // checking for padding. Storing the receiver will overwrite either the
+ // extra slot we copied with the actual arguments, if we did copy one, or
+ // the padding we stored above.
+ __ RecordComment("-- Store receiver --");
+ __ Add(copy_from, fp, 2 * kSystemPointerSize);
+ __ Ldr(scratch1,
+ MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
+ __ Str(scratch1,
+ MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
+
+ // Arguments have been adapted. Now call the entry point.
+ __ RecordComment("-- Call entry point --");
+ __ Mov(argc_actual, argc_expected);
+ // x0 : expected number of arguments
+ // x1 : function (passed through to callee)
+ // x3 : new target (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ CallCodeObject(x2);
- // Call the entry point without adapting the arguments.
- __ RecordComment("-- Call without adapting args --");
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+ }
+
+ // -----------------------------------------
+ // Adapt arguments in the existing frame.
+ // -----------------------------------------
+ __ Bind(&adapt_arguments_in_place);
+ {
+ __ RecordComment("-- Update arguments in place --");
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application;
+ __ Tbnz(argc_actual_minus_expected, kXSignBit, &under_application);
+
+ __ Bind(&over_application);
+ {
+ // Remove superfluous arguments from the stack. The number of superflous
+ // arguments is even.
+ __ RecordComment("-- Over-application --");
+ __ Mov(argc_actual, argc_expected);
+ __ Drop(argc_actual_minus_expected);
+ __ B(&dont_adapt_arguments);
+ }
+
+ __ Bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ __ RecordComment("-- Under-application --");
+ Label fill;
+ Register undef_value = x16;
+ __ LoadRoot(undef_value, RootIndex::kUndefinedValue);
+ __ Bind(&fill);
+ __ Add(argc_actual, argc_actual, 2);
+ __ Push(undef_value, undef_value);
+ __ Cmp(argc_actual, argc_expected);
+ __ B(lt, &fill);
+ __ B(&dont_adapt_arguments);
+ }
+ }
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
__ Bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ JumpCodeObject(x2);
+ {
+ // Call the entry point without adapting the arguments.
+ __ RecordComment("-- Call without adapting args --");
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ JumpCodeObject(x2);
+ }
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
@@ -2944,8 +3056,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// function.
__ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
- __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Mov(cp, Smi::zero());
@@ -3011,7 +3124,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (argv_mode == kArgvOnStack) {
__ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ __ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize);
}
// Reserve three slots to preserve x21-x23 callee-saved registers.
@@ -3023,9 +3136,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
- __ Poke(argv, 1 * kPointerSize);
- __ Poke(argc, 2 * kPointerSize);
- __ Poke(target, 3 * kPointerSize);
+ __ Poke(argv, 1 * kSystemPointerSize);
+ __ Poke(argc, 2 * kSystemPointerSize);
+ __ Poke(target, 3 * kSystemPointerSize);
// We normally only keep tagged values in callee-saved registers, as they
// could be pushed onto the stack by called stubs and functions, and on the
@@ -3096,9 +3209,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Restore callee-saved registers x21-x23.
__ Mov(x11, argc);
- __ Peek(argv, 1 * kPointerSize);
- __ Peek(argc, 2 * kPointerSize);
- __ Peek(target, 3 * kPointerSize);
+ __ Peek(argv, 1 * kSystemPointerSize);
+ __ Peek(argc, 2 * kSystemPointerSize);
+ __ Peek(target, 3 * kSystemPointerSize);
__ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
if (argv_mode == kArgvOnStack) {
@@ -3178,7 +3291,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DoubleRegister double_scratch = temps.AcquireD();
// Account for saved regs.
- const int kArgumentOffset = 2 * kPointerSize;
+ const int kArgumentOffset = 2 * kSystemPointerSize;
__ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
__ Peek(double_scratch, kArgumentOffset);
@@ -3232,98 +3345,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- Register exponent_integer = x12;
- Register saved_lr = x19;
- VRegister result_double = d0;
- VRegister base_double = d0;
- VRegister exponent_double = d1;
- VRegister base_double_copy = d2;
- VRegister scratch1_double = d6;
- VRegister scratch0_double = d7;
-
- // A fast-path for integer exponents.
- Label exponent_is_integer;
- // Allocate a heap number for the result, and return it.
- Label done;
-
- // Unpack the inputs.
-
- // Handle double (heap number) exponents.
- // Detect integer exponents stored as doubles and handle those in the
- // integer fast-path.
- __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
- scratch0_double, &exponent_is_integer);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- __ Mov(lr, saved_lr);
- __ B(&done);
- }
-
- __ Bind(&exponent_is_integer);
-
- // Find abs(exponent). For negative exponents, we can find the inverse later.
- Register exponent_abs = x13;
- __ Cmp(exponent_integer, 0);
- __ Cneg(exponent_abs, exponent_integer, mi);
-
- // Repeatedly multiply to calculate the power.
- // result = 1.0;
- // For each bit n (exponent_integer{n}) {
- // if (exponent_integer{n}) {
- // result *= base;
- // }
- // base *= base;
- // if (remaining bits in exponent_integer are all zero) {
- // break;
- // }
- // }
- Label power_loop, power_loop_entry, power_loop_exit;
- __ Fmov(scratch1_double, base_double);
- __ Fmov(base_double_copy, base_double);
- __ Fmov(result_double, 1.0);
- __ B(&power_loop_entry);
-
- __ Bind(&power_loop);
- __ Fmul(scratch1_double, scratch1_double, scratch1_double);
- __ Lsr(exponent_abs, exponent_abs, 1);
- __ Cbz(exponent_abs, &power_loop_exit);
-
- __ Bind(&power_loop_entry);
- __ Tbz(exponent_abs, 0, &power_loop);
- __ Fmul(result_double, result_double, scratch1_double);
- __ B(&power_loop);
-
- __ Bind(&power_loop_exit);
-
- // If the exponent was positive, result_double holds the result.
- __ Tbz(exponent_integer, kXSignBit, &done);
-
- // The exponent was negative, so find the inverse.
- __ Fmov(scratch0_double, 1.0);
- __ Fdiv(result_double, scratch0_double, result_double);
- // ECMA-262 only requires Math.pow to return an 'implementation-dependent
- // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
- // to calculate the subnormal value 2^-1074. This method of calculating
- // negative powers doesn't work because 2^1074 overflows to infinity. To
- // catch this corner-case, we bail out if the result was 0. (This can only
- // occur if the divisor is infinity or the base is zero.)
- __ Fcmp(result_double, 0.0);
- __ B(&done, ne);
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ Fmov(base_double, base_double_copy);
- __ Scvtf(exponent_double, exponent_integer);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- __ Mov(lr, saved_lr);
- __ Bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -3340,8 +3361,9 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
Label unexpected_map, map_ok;
// Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10,
+ FieldMemOperand(constructor, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
@@ -3351,8 +3373,9 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
Register kind = w3;
// Figure out the right elements kind
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ x10,
+ FieldMemOperand(constructor, JSFunction::kPrototypeOrInitialMapOffset));
// Retrieve elements_kind from map.
__ LoadElementsKindFromMap(kind, x10);
@@ -3531,32 +3554,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- cp : kTargetContext
- // -- r1 : kApiFunctionAddress
- // -- r2 : kArgc
- // --
+ // -- cp : context
+ // -- x1 : api function address
+ // -- x2 : arguments count (not including the receiver)
+ // -- x3 : call data
+ // -- x0 : holder
// -- sp[0] : last argument
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[(argc + 0) * 8] : receiver
- // -- sp[(argc + 1) * 8] : kHolder
- // -- sp[(argc + 2) * 8] : kCallData
// -----------------------------------
Register api_function_address = x1;
Register argc = x2;
+ Register call_data = x3;
+ Register holder = x0;
Register scratch = x4;
- Register index = x5; // For indexing MemOperands.
-
- DCHECK(!AreAliased(api_function_address, argc, scratch, index));
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = 0;
- static constexpr int kHolderOffset = kReceiverOffset + 1;
- static constexpr int kCallDataOffset = kHolderOffset + 1;
-
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
typedef FunctionCallbackArguments FCA;
@@ -3571,35 +3586,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
- // sp[1 * kPointerSize]: kIsolate
- // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // sp[3 * kPointerSize]: undefined (kReturnValue)
- // sp[4 * kPointerSize]: kData
- // sp[5 * kPointerSize]: undefined (kNewTarget)
+ // sp[0 * kSystemPointerSize]: kHolder
+ // sp[1 * kSystemPointerSize]: kIsolate
+ // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
+ // sp[4 * kSystemPointerSize]: kData
+ // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ Sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+ __ Sub(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
// kHolder.
- __ Add(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
- __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ Str(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Mov(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ // kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Str(scratch, MemOperand(sp, 3 * kPointerSize));
- __ Str(scratch, MemOperand(sp, 5 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ __ Str(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
- __ Add(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
- __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ Str(call_data, MemOperand(sp, 4 * kSystemPointerSize));
+
+ // kNewTarget.
+ __ Str(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3616,16 +3629,17 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ Add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ Add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
- __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ Add(scratch, scratch,
+ Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
+ __ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
+ __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
- __ Str(argc, MemOperand(sp, 3 * kPointerSize));
+ __ Str(argc, MemOperand(sp, 3 * kSystemPointerSize));
// We also store the number of slots to drop from the stack after returning
// from the API function here.
@@ -3633,12 +3647,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// drop, not the number of bytes. arm64 must always drop a slot count that is
// a multiple of two, and related helper functions (DropArguments) expect a
// register containing the slot count.
- __ Add(scratch, argc, Operand(FCA::kArgsLength + kExtraStackArgumentCount));
- __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ Add(scratch, argc, Operand(FCA::kArgsLength + 1 /*receiver*/));
+ __ Str(scratch, MemOperand(sp, 4 * kSystemPointerSize));
// v8::InvocationCallback's argument.
DCHECK(!AreAliased(x0, api_function_address));
- __ add(x0, sp, Operand(1 * kPointerSize));
+ __ add(x0, sp, Operand(1 * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3649,11 +3663,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kSpillOffset = 1 + kApiStackSpace;
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(sp, 4 * kPointerSize);
+ MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3681,10 +3695,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
name));
- __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadAnyTaggedField(data,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
- __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedPointerField(
+ name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:
// receiver, data, return value, return value default, isolate, holder,
@@ -3700,8 +3716,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, sp); // x0 = Handle<Name>
- __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
+ __ Mov(x0, sp); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kSystemPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
@@ -3710,7 +3726,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ Poke(x1, 1 * kPointerSize);
+ __ Poke(x1, 1 * kSystemPointerSize);
__ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
@@ -3719,14 +3735,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register api_function_address = x2;
Register js_getter = x4;
- __ Ldr(js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadTaggedPointerField(
+ js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ Ldr(api_function_address,
FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index d492992232..bfc95a28bf 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_copywithin {
macro ConvertToRelativeIndex(index: Number, length: Number): Number {
return index < 0 ? Max(index + length, 0) : Min(index, length);
}
diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq
new file mode 100644
index 0000000000..245b07556c
--- /dev/null
+++ b/deps/v8/src/builtins/array-every.tq
@@ -0,0 +1,151 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ transitioning javascript builtin
+ ArrayEveryLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayEveryLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning javascript builtin
+ ArrayEveryLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, result: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ if (!ToBoolean(result)) {
+ return False;
+ }
+
+ numberK = numberK + 1;
+
+ return ArrayEveryLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ array: Object, o: JSReceiver, initialK: Number, length: Number,
+ initialTo: Object): Object {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: Object = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. If selected is true, then...
+ if (!ToBoolean(result)) {
+ return False;
+ }
+ }
+
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return True;
+ }
+
+ transitioning macro FastArrayEvery(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (!ToBoolean(result)) {
+ return False;
+ }
+ }
+ return True;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.every
+ transitioning javascript builtin
+ ArrayEvery(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ // Special cases.
+ try {
+ return FastArrayEvery(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label Bailout(kValue: Smi) deferred {
+ return ArrayEveryLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
+ }
+ }
+ label TypeError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.every');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 222e4e291b..4bf175a787 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_filter {
transitioning javascript builtin
ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)(
receiver: Object, callback: Object, thisArg: Object, array: Object,
@@ -14,14 +14,12 @@ namespace array {
// Also, this great mass of casts is necessary because the signature
// of Torque javascript builtins requires Object type for all parameters
// other than {context}.
- const jsreceiver: JSReceiver =
- Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
- const outputArray: JSReceiver =
- Cast<JSReceiver>(array) otherwise unreachable;
- const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
- const numberTo: Number = Cast<Number>(initialTo) otherwise unreachable;
- const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberTo = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
return ArrayFilterLoopContinuation(
jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
@@ -36,21 +34,19 @@ namespace array {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
- const jsreceiver: JSReceiver =
- Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
- const outputArray: JSReceiver =
- Cast<JSReceiver>(array) otherwise unreachable;
- let numberK: Number = Cast<Number>(initialK) otherwise unreachable;
- let numberTo: Number = Cast<Number>(initialTo) otherwise unreachable;
- const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ let numberTo = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. filter() needs
// to pick up at the next step, which is setting the callback result in
// the output array. After incrementing k and to, we can glide into the loop
// continuation builtin.
if (ToBoolean(result)) {
- CreateDataProperty(outputArray, numberTo, valueK);
+ FastCreateDataProperty(outputArray, numberTo, valueK);
numberTo = numberTo + 1;
}
@@ -87,7 +83,7 @@ namespace array {
// iii. If selected is true, then...
if (ToBoolean(result)) {
// 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
- CreateDataProperty(array, to, kValue);
+ FastCreateDataProperty(array, to, kValue);
// 2. Increase to by 1.
to = to + 1;
}
@@ -98,80 +94,42 @@ namespace array {
return array;
}
- transitioning macro
- FilterVisitAllElements<FixedArrayType: type>(implicit context: Context)(
- kind: constexpr ElementsKind, o: JSArray, len: Smi, callbackfn: Callable,
- thisArg: Object, a: JSArray) labels Bailout(Smi, Smi) {
+ transitioning macro FastArrayFilter(implicit context: Context)(
+ fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: Object,
+ output: FastJSArray) labels Bailout(Number, Number) {
let k: Smi = 0;
let to: Smi = 0;
- const fastOWitness: FastJSArrayWitness =
- MakeWitness(Cast<FastJSArray>(o) otherwise goto Bailout(k, to));
- const fastAWitness: FastJSArrayWitness =
- MakeWitness(Cast<FastJSArray>(a) otherwise goto Bailout(k, to));
+ let fastOW = NewFastJSArrayWitness(fastO);
+ let fastOutputW = NewFastJSArrayWitness(output);
+
+ fastOutputW.EnsureArrayPushable() otherwise goto Bailout(k, to);
- // Build a fast loop over the smi array.
+ // Build a fast loop over the array.
for (; k < len; k++) {
- let fastO: FastJSArray =
- Testify(fastOWitness) otherwise goto Bailout(k, to);
+ fastOW.Recheck() otherwise goto Bailout(k, to);
// Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastO.length) goto Bailout(k, to);
-
- try {
- const value: Object =
- LoadElementNoHole<FixedArrayType>(fastO, k) otherwise FoundHole;
- const result: Object =
- Call(context, callbackfn, thisArg, value, k, fastO);
- if (ToBoolean(result)) {
- try {
- // Since the call to {callbackfn} is observable, we can't
- // use the Bailout label until we've successfully stored.
- // Hence the {SlowStore} label.
- const fastA: FastJSArray =
- Testify(fastAWitness) otherwise SlowStore;
- if (fastA.length != to) goto SlowStore;
- BuildAppendJSArray(kind, fastA, value)
- otherwise SlowStore;
- }
- label SlowStore {
- CreateDataProperty(a, to, value);
- }
- to = to + 1;
+ if (k >= fastOW.Get().length) goto Bailout(k, to);
+ const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(result)) {
+ try {
+ // Since the call to {callbackfn} is observable, we can't
+ // use the Bailout label until we've successfully stored.
+ // Hence the {SlowStore} label.
+ fastOutputW.Recheck() otherwise SlowStore;
+ if (fastOutputW.Get().length != to) goto SlowStore;
+ fastOutputW.Push(value) otherwise SlowStore;
+ }
+ label SlowStore {
+ FastCreateDataProperty(fastOutputW.stable, to, value);
}
+ to = to + 1;
}
- label FoundHole {}
}
}
- transitioning macro FastArrayFilter(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object,
- array: JSReceiver): Object
- labels Bailout(Smi, Smi) {
- let k: Smi = 0;
- let to: Smi = 0;
- const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k, to);
- const fastArray: FastJSArray =
- Cast<FastJSArray>(array) otherwise goto Bailout(k, to);
- let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k, to);
- EnsureArrayPushable(fastArray.map) otherwise goto Bailout(k, to);
- const elementsKind: ElementsKind = fastO.map.elements_kind;
- if (IsElementsKindLessThanOrEqual(elementsKind, HOLEY_SMI_ELEMENTS)) {
- FilterVisitAllElements<FixedArray>(
- HOLEY_SMI_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
- otherwise Bailout;
- } else if (IsElementsKindLessThanOrEqual(elementsKind, HOLEY_ELEMENTS)) {
- FilterVisitAllElements<FixedArray>(
- HOLEY_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
- otherwise Bailout;
- } else {
- assert(IsDoubleElementsKind(elementsKind));
- FilterVisitAllElements<FixedDoubleArray>(
- HOLEY_DOUBLE_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
- otherwise Bailout;
- }
- return array;
- }
-
// This method creates a 0-length array with the ElementsKind of the
// receiver if possible, otherwise, bails out. It makes sense for the
// caller to know that the slow case needs to be invoked.
@@ -179,7 +137,7 @@ namespace array {
receiver: JSReceiver): JSReceiver labels Slow {
const len: Smi = 0;
if (IsArraySpeciesProtectorCellInvalid()) goto Slow;
- const o: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const o = Cast<FastJSArray>(receiver) otherwise Slow;
const newMap: Map =
LoadJSArrayElementsMap(o.map.elements_kind, LoadNativeContext(context));
return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, len, len);
@@ -204,41 +162,45 @@ namespace array {
if (arguments.length == 0) {
goto TypeError;
}
- const callbackfn: Callable =
- Cast<Callable>(arguments[0]) otherwise TypeError;
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let array: JSReceiver;
+ let output: JSReceiver;
// Special cases.
let k: Number = 0;
let to: Number = 0;
try {
- array = FastFilterSpeciesCreate(o) otherwise SlowSpeciesCreate;
+ output = FastFilterSpeciesCreate(o) otherwise SlowSpeciesCreate;
try {
- return FastArrayFilter(o, len, callbackfn, thisArg, array)
+ const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k, to);
+ const fastOutput =
+ Cast<FastJSArray>(output) otherwise goto Bailout(k, to);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, to);
+
+ FastArrayFilter(fastO, smiLen, callbackfn, thisArg, fastOutput)
otherwise Bailout;
+ return output;
}
- label Bailout(kValue: Smi, toValue: Smi) deferred {
+ label Bailout(kValue: Number, toValue: Number) deferred {
k = kValue;
to = toValue;
}
}
label SlowSpeciesCreate {
- array = ArraySpeciesCreate(context, receiver, 0);
+ output = ArraySpeciesCreate(context, receiver, 0);
}
return ArrayFilterLoopContinuation(
- o, callbackfn, thisArg, array, o, k, len, to);
+ o, callbackfn, thisArg, output, o, k, len, to);
}
label TypeError deferred {
- ThrowTypeError(context, kCalledNonCallable, arguments[0]);
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
}
label NullOrUndefinedError deferred {
- ThrowTypeError(
- context, kCalledOnNullOrUndefined, 'Array.prototype.filter');
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.filter');
}
}
}
diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq
new file mode 100644
index 0000000000..28223e4c49
--- /dev/null
+++ b/deps/v8/src/builtins/array-find.tq
@@ -0,0 +1,158 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array_find {
+ transitioning javascript builtin
+ ArrayFindLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object): Object {
+ // All continuation points in the optimized find implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFindLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayFindLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, result: Object): Object {
+ // This deopt continuation point is never actually called, it just
+ // exists to make stack traces correct from a ThrowTypeError if the
+ // callback was found to be non-callable.
+ unreachable;
+ }
+
+ // Continuation that is called after a lazy deoptimization from TF that
+ // happens right after the callback and it's returned value must be handled
+ // before iteration continues.
+ transitioning javascript builtin
+ ArrayFindLoopAfterCallbackLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, foundValue: Object, isFound: Object): Object {
+ // All continuation points in the optimized find implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the
+ // callback value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+
+ if (ToBoolean(isFound)) {
+ return foundValue;
+ }
+
+ return ArrayFindLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ o: JSReceiver, initialK: Number, length: Number): Object {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. i. Let kValue be ? Get(O, Pk).
+ const value: Object = GetProperty(o, k);
+
+ // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
+ // O>>)).
+ const testResult: Object =
+ Call(context, callbackfn, thisArg, value, k, o);
+
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+
+ // 6e. Increase k by 1. (done by the loop).
+ }
+ return Undefined;
+ }
+
+ transitioning macro FastArrayFind(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ const value: Object = fastOW.LoadElementOrUndefined(k);
+ const testResult: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(testResult)) {
+ return value;
+ }
+ }
+ return Undefined;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.find
+ transitioning javascript builtin
+ ArrayPrototypeFind(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const callbackfn =
+ Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ // Special cases.
+ try {
+ return FastArrayFind(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label Bailout(k: Smi) deferred {
+ return ArrayFindLoopContinuation(o, callbackfn, thisArg, o, k, len);
+ }
+ }
+ label NotCallableError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.find');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq
new file mode 100644
index 0000000000..00d8378dfa
--- /dev/null
+++ b/deps/v8/src/builtins/array-findindex.tq
@@ -0,0 +1,161 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array_findindex {
+ transitioning javascript builtin
+ ArrayFindIndexLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object): Object {
+ // All continuation points in the optimized findIndex implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFindIndexLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayFindIndexLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, result: Object): Object {
+ // This deopt continuation point is never actually called, it just
+ // exists to make stack traces correct from a ThrowTypeError if the
+ // callback was found to be non-callable.
+ unreachable;
+ }
+
+ // Continuation that is called after a lazy deoptimization from TF that
+ // happens right after the callback and it's returned value must be handled
+ // before iteration continues.
+ transitioning javascript builtin
+ ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(implicit context:
+ Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, foundValue: Object, isFound: Object): Object {
+ // All continuation points in the optimized findIndex implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the
+ // callback value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+
+ if (ToBoolean(isFound)) {
+ return foundValue;
+ }
+
+ return ArrayFindIndexLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning builtin ArrayFindIndexLoopContinuation(implicit context:
+ Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ o: JSReceiver, initialK: Number, length: Number): Number {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. i. Let kValue be ? Get(O, Pk).
+ const value: Object = GetProperty(o, k);
+
+ // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
+ // O>>)).
+ const testResult: Object =
+ Call(context, callbackfn, thisArg, value, k, o);
+
+ // 6d. If testResult is true, return k.
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+
+ // 6e. Increase k by 1. (done by the loop).
+ }
+ return Convert<Smi>(-1);
+ }
+
+ transitioning macro FastArrayFindIndex(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Number
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ const value: Object = fastOW.LoadElementOrUndefined(k);
+ const testResult: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(testResult)) {
+ return k;
+ }
+ }
+ return -1;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
+ transitioning javascript builtin
+ ArrayPrototypeFindIndex(implicit context:
+ Context)(receiver: Object, ...arguments): Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
+ }
+ const callbackfn =
+ Cast<Callable>(arguments[0]) otherwise NotCallableError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ // Special cases.
+ try {
+ return FastArrayFindIndex(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label Bailout(k: Smi) deferred {
+ return ArrayFindIndexLoopContinuation(
+ o, callbackfn, thisArg, o, k, len);
+ }
+ }
+ label NotCallableError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.findIndex');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index 7967058e6b..d362e95950 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_foreach {
transitioning javascript builtin
ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)(
receiver: Object, callback: Object, thisArg: Object, initialK: Object,
@@ -10,11 +10,10 @@ namespace array {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
- const jsreceiver: JSReceiver =
- Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
- const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
- const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
return ArrayForEachLoopContinuation(
jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
@@ -28,11 +27,10 @@ namespace array {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
- const jsreceiver: JSReceiver =
- Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
- const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
- const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
return ArrayForEachLoopContinuation(
jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
@@ -69,43 +67,23 @@ namespace array {
return Undefined;
}
- transitioning macro VisitAllElements<FixedArrayType: type>(implicit context:
- Context)(
- o: JSArray, len: Smi, callbackfn: Callable, thisArg: Object) labels
- Bailout(Smi) {
+ transitioning macro FastArrayForEach(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+ labels Bailout(Smi) {
let k: Smi = 0;
- const fastOWitness: FastJSArrayWitness =
- MakeWitness(Cast<FastJSArray>(o) otherwise goto Bailout(k));
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
- for (; k < len; k++) {
- let fastO: FastJSArray = Testify(fastOWitness) otherwise goto Bailout(k);
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
// Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastO.length) goto Bailout(k);
-
- try {
- const value: Object =
- LoadElementNoHole<FixedArrayType>(fastO, k) otherwise FoundHole;
- Call(context, callbackfn, thisArg, value, k, fastO);
- }
- label FoundHole {}
- }
- }
-
- transitioning macro FastArrayForEach(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- const elementsKind: ElementsKind = fastO.map.elements_kind;
- if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
- VisitAllElements<FixedDoubleArray>(fastO, smiLen, callbackfn, thisArg)
- otherwise Bailout;
- } else {
- VisitAllElements<FixedArray>(fastO, smiLen, callbackfn, thisArg)
- otherwise Bailout;
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: Object = fastOW.LoadElementNoHole(k)
+ otherwise continue;
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
}
return Undefined;
}
@@ -128,8 +106,7 @@ namespace array {
if (arguments.length == 0) {
goto TypeError;
}
- const callbackfn: Callable =
- Cast<Callable>(arguments[0]) otherwise TypeError;
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
@@ -148,11 +125,10 @@ namespace array {
o, callbackfn, thisArg, Undefined, o, k, len, Undefined);
}
label TypeError deferred {
- ThrowTypeError(context, kCalledNonCallable, arguments[0]);
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
}
label NullOrUndefinedError deferred {
- ThrowTypeError(
- context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.forEach');
}
}
}
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 16ac7a7104..f29f6694d4 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_join {
type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => Object;
// Fast C call to write a fixed array (see Buffer.fixedArray) to a single
@@ -16,7 +16,7 @@ namespace array {
return GetProperty(receiver, k);
}
- LoadJoinElement<DictionaryElements>(
+ LoadJoinElement<array::DictionaryElements>(
context: Context, receiver: JSReceiver, k: Number): Object {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const dict: NumberDictionary = UnsafeCast<NumberDictionary>(array.elements);
@@ -32,27 +32,22 @@ namespace array {
}
}
- LoadJoinElement<FastSmiOrObjectElements>(
+ LoadJoinElement<array::FastSmiOrObjectElements>(
context: Context, receiver: JSReceiver, k: Number): Object {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
- const element: Object = fixedArray[UnsafeCast<Smi>(k)];
+ const element: Object = fixedArray.objects[UnsafeCast<Smi>(k)];
return element == Hole ? kEmptyString : element;
}
- LoadJoinElement<FastDoubleElements>(
+ LoadJoinElement<array::FastDoubleElements>(
context: Context, receiver: JSReceiver, k: Number): Object {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const fixedDoubleArray: FixedDoubleArray =
UnsafeCast<FixedDoubleArray>(array.elements);
- try {
- const element: float64 = LoadDoubleWithHoleCheck(
- fixedDoubleArray, UnsafeCast<Smi>(k)) otherwise IfHole;
- return AllocateHeapNumberWithValue(element);
- }
- label IfHole {
- return kEmptyString;
- }
+ const element: float64 = LoadDoubleWithHoleCheck(
+ fixedDoubleArray, UnsafeCast<Smi>(k)) otherwise return kEmptyString;
+ return AllocateHeapNumberWithValue(element);
}
builtin LoadJoinTypedElement<T: type>(
@@ -83,7 +78,7 @@ namespace array {
return ToString_Inline(context, result);
}
label TypeError {
- ThrowTypeError(context, kCalledNonCallable, prop);
+ ThrowTypeError(kCalledNonCallable, prop);
}
}
@@ -98,7 +93,7 @@ namespace array {
loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
originalLen: Number): never
labels Cannot, Can {
- if (loadFn == LoadJoinElement<GenericElementsAccessor>) goto Can;
+ if (loadFn == LoadJoinElement<array::GenericElementsAccessor>) goto Can;
const array: JSArray = UnsafeCast<JSArray>(receiver);
if (originalMap != array.map) goto Cannot;
@@ -139,7 +134,7 @@ namespace array {
const length: intptr = fixedArray.length_intptr;
assert(index <= length);
if (index < length) {
- fixedArray[index] = element;
+ fixedArray.objects[index] = element;
return fixedArray;
} else
deferred {
@@ -147,16 +142,49 @@ namespace array {
assert(index < newLength);
const newfixedArray: FixedArray =
ExtractFixedArray(fixedArray, 0, length, newLength, kFixedArrays);
- newfixedArray[index] = element;
+ newfixedArray.objects[index] = element;
return newfixedArray;
}
}
// Contains the information necessary to create a single, separator delimited,
// flattened one or two byte string.
- // The buffer is maintained and updated by BufferInit(), BufferAdd(),
- // BufferAddSeparators().
+ // The buffer is maintained and updated by Buffer.constructor, Buffer.Add(),
+ // Buffer.AddSeparators().
struct Buffer {
+ Add(implicit context: Context)(
+ str: String, nofSeparators: intptr, separatorLength: intptr) {
+ // Add separators if necessary (at the beginning or more than one)
+ const writeSeparators: bool = this.index == 0 | nofSeparators > 1;
+ this.AddSeparators(nofSeparators, separatorLength, writeSeparators);
+
+ this.totalStringLength =
+ AddStringLength(this.totalStringLength, str.length);
+ this.fixedArray =
+ StoreAndGrowFixedArray(this.fixedArray, this.index++, str);
+ this.isOneByte =
+ IsOneByteStringInstanceType(str.instanceType) & this.isOneByte;
+ }
+
+ AddSeparators(implicit context: Context)(
+ nofSeparators: intptr, separatorLength: intptr, write: bool) {
+ if (nofSeparators == 0 || separatorLength == 0) return;
+
+ const nofSeparatorsInt: intptr = nofSeparators;
+ const sepsLen: intptr = separatorLength * nofSeparatorsInt;
+ // Detect integer overflow
+ // TODO(tebbi): Replace with overflow-checked multiplication.
+ if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
+ ThrowInvalidStringLength(context);
+ }
+
+ this.totalStringLength = AddStringLength(this.totalStringLength, sepsLen);
+ if (write) deferred {
+ this.fixedArray = StoreAndGrowFixedArray(
+ this.fixedArray, this.index++, Convert<Smi>(nofSeparatorsInt));
+ }
+ }
+
// Fixed array holding elements that are either:
// 1) String result of `ToString(next)`.
// 2) Smi representing the number of consecutive separators.
@@ -185,57 +213,17 @@ namespace array {
isOneByte: bool;
}
- macro BufferInit(len: uintptr, sep: String): Buffer {
+ macro NewBuffer(len: uintptr, sep: String): Buffer {
const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
kMaxNewSpaceFixedArrayElements :
Signed(len);
assert(cappedBufferSize > 0);
- const fixedArray: FixedArray = AllocateZeroedFixedArray(cappedBufferSize);
- const isOneByte: bool = HasOnlyOneByteChars(sep.instanceType);
- return Buffer{fixedArray, 0, 0, isOneByte};
- }
-
- macro BufferAdd(implicit context: Context)(
- initialBuffer: Buffer, str: String, nofSeparators: intptr,
- separatorLength: intptr): Buffer {
- let buffer: Buffer = initialBuffer;
- // Add separators if necessary (at the beginning or more than one)
- const writeSeparators: bool = buffer.index == 0 | nofSeparators > 1;
- buffer = BufferAddSeparators(
- buffer, nofSeparators, separatorLength, writeSeparators);
-
- const totalStringLength: intptr =
- AddStringLength(buffer.totalStringLength, str.length);
- let index: intptr = buffer.index;
- const fixedArray: FixedArray =
- StoreAndGrowFixedArray(buffer.fixedArray, index++, str);
- const isOneByte: bool =
- HasOnlyOneByteChars(str.instanceType) & buffer.isOneByte;
- return Buffer{fixedArray, index, totalStringLength, isOneByte};
- }
-
- macro BufferAddSeparators(implicit context: Context)(
- buffer: Buffer, nofSeparators: intptr, separatorLength: intptr,
- write: bool): Buffer {
- if (nofSeparators == 0 || separatorLength == 0) return buffer;
-
- const nofSeparatorsInt: intptr = nofSeparators;
- const sepsLen: intptr = separatorLength * nofSeparatorsInt;
- // Detect integer overflow
- // TODO(tebbi): Replace with overflow-checked multiplication.
- if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
- ThrowInvalidStringLength(context);
- }
-
- const totalStringLength: intptr =
- AddStringLength(buffer.totalStringLength, sepsLen);
- let index: intptr = buffer.index;
- let fixedArray: FixedArray = buffer.fixedArray;
- if (write) deferred {
- fixedArray = StoreAndGrowFixedArray(
- buffer.fixedArray, index++, Convert<Smi>(nofSeparatorsInt));
- }
- return Buffer{fixedArray, index, totalStringLength, buffer.isOneByte};
+ return Buffer{
+ AllocateZeroedFixedArray(cappedBufferSize),
+ 0,
+ 0,
+ IsOneByteStringInstanceType(sep.instanceType)
+ };
}
macro BufferJoin(implicit context: Context)(buffer: Buffer, sep: String):
@@ -246,7 +234,7 @@ namespace array {
// Fast path when there's only one buffer element.
if (buffer.index == 1) {
const fixedArray: FixedArray = buffer.fixedArray;
- typeswitch (fixedArray[0]) {
+ typeswitch (fixedArray.objects[0]) {
// When the element is a string, just return it and completely avoid
// allocating another string.
case (str: String): {
@@ -280,7 +268,7 @@ namespace array {
const separatorLength: intptr = sep.length;
let nofSeparators: intptr = 0;
let loadFn: LoadJoinElementFn = initialLoadFn;
- let buffer: Buffer = BufferInit(len, sep);
+ let buffer: Buffer = NewBuffer(len, sep);
// 6. Let k be 0.
let k: uintptr = 0;
@@ -290,7 +278,7 @@ namespace array {
if (CannotUseSameArrayAccessor<T>(
loadFn, receiver, initialMap, lengthNumber))
deferred {
- loadFn = LoadJoinElement<GenericElementsAccessor>;
+ loadFn = LoadJoinElement<array::GenericElementsAccessor>;
}
if (k > 0) {
@@ -324,12 +312,12 @@ namespace array {
}
// d. Set R to the string-concatenation of R and next.
- buffer = BufferAdd(buffer, next, nofSeparators, separatorLength);
+ buffer.Add(next, nofSeparators, separatorLength);
nofSeparators = 0;
}
// Add any separators at the end.
- buffer = BufferAddSeparators(buffer, nofSeparators, separatorLength, true);
+ buffer.AddSeparators(nofSeparators, separatorLength, true);
// 8. Return R.
return BufferJoin(buffer, sep);
@@ -353,9 +341,9 @@ namespace array {
if (IsNoElementsProtectorCellInvalid()) goto IfSlowPath;
if (IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS)) {
- loadFn = LoadJoinElement<FastSmiOrObjectElements>;
+ loadFn = LoadJoinElement<array::FastSmiOrObjectElements>;
} else if (IsElementsKindLessThanOrEqual(kind, HOLEY_DOUBLE_ELEMENTS)) {
- loadFn = LoadJoinElement<FastDoubleElements>;
+ loadFn = LoadJoinElement<array::FastDoubleElements>;
} else if (kind == DICTIONARY_ELEMENTS)
deferred {
const dict: NumberDictionary =
@@ -372,7 +360,7 @@ namespace array {
ThrowInvalidStringLength(context);
}
} else {
- loadFn = LoadJoinElement<DictionaryElements>;
+ loadFn = LoadJoinElement<array::DictionaryElements>;
}
}
else {
@@ -380,7 +368,7 @@ namespace array {
}
}
label IfSlowPath {
- loadFn = LoadJoinElement<GenericElementsAccessor>;
+ loadFn = LoadJoinElement<array::GenericElementsAccessor>;
}
return ArrayJoinImpl<JSArray>(
receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
@@ -457,11 +445,11 @@ namespace array {
stack: FixedArray, receiver: JSReceiver): Boolean {
const capacity: intptr = stack.length_intptr;
for (let i: intptr = 0; i < capacity; i++) {
- const previouslyVisited: Object = stack[i];
+ const previouslyVisited: Object = stack.objects[i];
// Add `receiver` to the first open slot
if (previouslyVisited == Hole) {
- stack[i] = receiver;
+ stack.objects[i] = receiver;
return True;
}
@@ -485,8 +473,8 @@ namespace array {
try {
const stack: FixedArray = LoadJoinStack()
otherwise IfUninitialized;
- if (stack[0] == Hole) {
- stack[0] = receiver;
+ if (stack.objects[0] == Hole) {
+ stack.objects[0] = receiver;
} else if (JoinStackPush(stack, receiver) == False)
deferred {
goto ReceiverNotAdded;
@@ -495,7 +483,7 @@ namespace array {
label IfUninitialized {
const stack: FixedArray =
AllocateFixedArrayWithHoles(kMinJoinStackSize, kNone);
- stack[0] = receiver;
+ stack.objects[0] = receiver;
SetJoinStack(stack);
}
goto ReceiverAdded;
@@ -507,7 +495,7 @@ namespace array {
stack: FixedArray, receiver: JSReceiver): Object {
const len: intptr = stack.length_intptr;
for (let i: intptr = 0; i < len; i++) {
- if (stack[i] == receiver) {
+ if (stack.objects[i] == receiver) {
// Shrink the Join Stack if the stack will be empty and is larger than
// the minimum size.
if (i == 0 && len > kMinJoinStackSize) deferred {
@@ -516,7 +504,7 @@ namespace array {
SetJoinStack(newStack);
}
else {
- stack[i] = Hole;
+ stack.objects[i] = Hole;
}
return Undefined;
}
@@ -532,7 +520,7 @@ namespace array {
// Builtin call was not nested (receiver is the first entry) and
// did not contain other nested arrays that expanded the stack.
- if (stack[0] == receiver && len == kMinJoinStackSize) {
+ if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER);
} else
deferred {
@@ -580,7 +568,7 @@ namespace array {
// Only handle valid array lengths. Although the spec allows larger values,
// this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1) ThrowTypeError(context, kInvalidArrayLength);
+ if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
false, o, len, separator, Undefined, Undefined);
@@ -600,7 +588,7 @@ namespace array {
// Only handle valid array lengths. Although the spec allows larger values,
// this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1) ThrowTypeError(context, kInvalidArrayLength);
+ if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
true, o, len, ',', locales, options);
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 967d640e8f..2ade54156c 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_lastindexof {
macro LoadWithHoleCheck<Elements: type>(
elements: FixedArrayBase, index: Smi): Object
labels IfHole;
@@ -11,7 +11,7 @@ namespace array {
elements: FixedArrayBase, index: Smi): Object
labels IfHole {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- const element: Object = elements[index];
+ const element: Object = elements.objects[index];
if (element == Hole) goto IfHole;
return element;
}
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
new file mode 100644
index 0000000000..d3bba56220
--- /dev/null
+++ b/deps/v8/src/builtins/array-map.tq
@@ -0,0 +1,281 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array_map {
+ transitioning javascript builtin
+ ArrayMapLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, array: Object,
+ initialK: Object, length: Object): Object {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayMapLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayMapLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, array: Object,
+ initialK: Object, length: Object, result: Object): Object {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. map() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k, we can glide into the loop
+ // continuation builtin.
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ FastCreateDataProperty(outputArray, numberK, result);
+
+ // 7d. Increase k by 1.
+ numberK = numberK + 1;
+
+ return ArrayMapLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength);
+ }
+
+ transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ array: JSReceiver, o: JSReceiver, initialK: Number,
+ length: Number): Object {
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 7a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 7b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 7c. If kPresent is true, then:
+ if (kPresent == True) {
+ // i. Let kValue be ? Get(O, Pk).
+ const kValue: Object = GetProperty(o, k);
+
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ const mappedValue: Object =
+ Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ FastCreateDataProperty(array, k, mappedValue);
+ }
+
+ // 7d. Increase k by 1. (done by the loop).
+ }
+
+ // 8. Return A.
+ return array;
+ }
+
+ struct Vector {
+ ReportSkippedElement() {
+ this.skippedElements = true;
+ }
+
+ CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
+ let length: Smi = this.fixedArray.length;
+ assert(validLength <= length);
+ let kind: ElementsKind = PACKED_SMI_ELEMENTS;
+ if (!this.onlySmis) {
+ if (this.onlyNumbers) {
+ kind = PACKED_DOUBLE_ELEMENTS;
+ } else {
+ kind = PACKED_ELEMENTS;
+ }
+ }
+
+ if (this.skippedElements || validLength < length) {
+ // We also need to create a holey output array if we are
+ // bailing out of the fast path partway through the array.
+ // This is indicated by {validLength} < {length}.
+ // Who knows if the bailout condition will continue to fill in
+ // every element?
+ kind = FastHoleyElementsKind(kind);
+ }
+
+ let map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ let a: JSArray;
+
+ if (IsDoubleElementsKind(kind)) {
+ // We need to allocate and copy.
+ // First, initialize the elements field before allocation to prevent
+ // heap corruption.
+ const elements: FixedDoubleArray = AllocateFixedDoubleArrayWithHoles(
+ SmiUntag(length), kAllowLargeObjectAllocation);
+ a = NewJSArray(map, this.fixedArray);
+ for (let i: Smi = 0; i < validLength; i++) {
+ typeswitch (this.fixedArray.objects[i]) {
+ case (n: Number): {
+ elements.floats[i] = Float64SilenceNaN(Convert<float64>(n));
+ }
+ case (h: HeapObject): {
+ assert(h == Hole);
+ }
+ }
+ }
+ a.elements = elements;
+ } else {
+ // Simply install the given fixedArray in {vector}.
+ a = NewJSArray(map, this.fixedArray);
+ }
+
+ // Paranoia. the FixedArray now "belongs" to JSArray {a}.
+ this.fixedArray = kEmptyFixedArray;
+ return a;
+ }
+
+ StoreResult(implicit context: Context)(index: Smi, result: Object) {
+ typeswitch (result) {
+ case (s: Smi): {
+ this.fixedArray.objects[index] = s;
+ }
+ case (s: HeapNumber): {
+ this.onlySmis = false;
+ this.fixedArray.objects[index] = s;
+ }
+ case (s: HeapObject): {
+ this.onlySmis = false;
+ this.onlyNumbers = false;
+ this.fixedArray.objects[index] = s;
+ }
+ }
+ }
+
+ fixedArray: FixedArray;
+ onlySmis: bool; // initially true.
+ onlyNumbers: bool; // initially true.
+ skippedElements: bool; // initially false.
+ }
+
+ macro NewVector(implicit context: Context)(length: Smi): Vector {
+ const fixedArray = length > 0 ?
+ AllocateFixedArrayWithHoles(
+ SmiUntag(length), kAllowLargeObjectAllocation) :
+ kEmptyFixedArray;
+ return Vector{fixedArray, true, true, false};
+ }
+
+ transitioning macro FastArrayMap(implicit context: Context)(
+ fastO: FastJSArray, len: Smi, callbackfn: Callable,
+ thisArg: Object): JSArray
+ labels Bailout(JSArray, Smi) {
+ let k: Smi = 0;
+ let fastOW = NewFastJSArrayWitness(fastO);
+ let vector = NewVector(len);
+
+ // Build a fast loop over the smi array.
+ // 7. Repeat, while k < len.
+ try {
+ for (; k < len; k++) {
+ fastOW.Recheck() otherwise goto PrepareBailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto PrepareBailout(k);
+
+ try {
+ const value: Object = fastOW.LoadElementNoHole(k)
+ otherwise FoundHole;
+ const result: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ vector.StoreResult(k, result);
+ }
+ label FoundHole {
+ // Our output array must necessarily be holey because of holes in
+ // the input array.
+ vector.ReportSkippedElement();
+ }
+ }
+ }
+ label PrepareBailout(k: Smi) deferred {
+ // Transform {vector} into a JSArray and bail out.
+ goto Bailout(vector.CreateJSArray(k), k);
+ }
+
+ return vector.CreateJSArray(len);
+ }
+
+ // Bails out if the slow path needs to be taken.
+ // It's useful to structure it this way, because the consequences of
+ // using the slow path on species creation are interesting to the caller.
+ macro FastMapSpeciesCreate(implicit context: Context)(
+ receiver: JSReceiver, length: Number): JSArray labels Bailout {
+ if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
+ const o = Cast<FastJSArray>(receiver) otherwise Bailout;
+ const smiLength = Cast<Smi>(length) otherwise Bailout;
+ const newMap: Map =
+ LoadJSArrayElementsMap(PACKED_SMI_ELEMENTS, LoadNativeContext(context));
+ return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, smiLength, smiLength);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.map
+ transitioning javascript builtin
+ ArrayMap(implicit context: Context)(receiver: Object, ...arguments): Object {
+ try {
+ if (IsNullOrUndefined(receiver)) goto NullOrUndefinedError;
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) goto TypeError;
+
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ let array: JSReceiver;
+ let k: Number = 0;
+ try {
+ // 5. Let A be ? ArraySpeciesCreate(O, len).
+ if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate;
+ const o: FastJSArray = Cast<FastJSArray>(receiver)
+ otherwise SlowSpeciesCreate;
+ const smiLength: Smi = Cast<Smi>(len)
+ otherwise SlowSpeciesCreate;
+
+ return FastArrayMap(o, smiLength, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label SlowSpeciesCreate {
+ array = ArraySpeciesCreate(context, receiver, len);
+ }
+ label Bailout(output: JSArray, kValue: Smi) deferred {
+ array = output;
+ k = kValue;
+ }
+
+ return ArrayMapLoopContinuation(o, callbackfn, thisArg, array, o, k, len);
+ }
+ label TypeError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.map');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 6434dbc8c8..70fda8d2eb 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_of {
// https://tc39.github.io/ecma262/#sec-array.of
transitioning javascript builtin
ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object {
@@ -39,14 +39,14 @@ namespace array {
// b. Let Pk be ! ToString(k).
// c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
- CreateDataProperty(a, k, kValue);
+ FastCreateDataProperty(a, k, kValue);
// d. Increase k by 1.
k++;
}
// 8. Perform ? Set(A, "length", len, true).
- SetPropertyLength(a, len);
+ array::SetPropertyLength(a, len);
// 9. Return A.
return a;
diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq
new file mode 100644
index 0000000000..33661c38d1
--- /dev/null
+++ b/deps/v8/src/builtins/array-reduce-right.tq
@@ -0,0 +1,183 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ transitioning javascript builtin
+ ArrayReduceRightPreLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, length: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ return ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayReduceRightLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, initialK: Object, length: Object,
+ accumulator: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayReduceRightLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, initialK: Object, length: Object,
+ result: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // The accumulator is the result from the callback call which just occured.
+ let r = ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
+ return r;
+ }
+
+ transitioning builtin ArrayReduceRightLoopContinuation(implicit context:
+ Context)(
+ receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ o: JSReceiver, initialK: Number, length: Number): Object {
+ let accumulator = initialAccumulator;
+
+ // 8b and 9. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 8b i and 9a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
+ const present: Boolean = HasProperty_Inline(o, k);
+
+ // 8b iii and 9c. If kPresent is true, then
+ if (present == True) {
+ // 8b iii and 9c i. Let kValue be ? Get(O, Pk).
+ const value: Object = GetProperty(o, k);
+
+ if (accumulator == Hole) {
+ // 8b iii 1.
+ accumulator = value;
+ } else {
+ // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+ // <accumulator, kValue, k, O>).
+ accumulator =
+ Call(context, callbackfn, Undefined, accumulator, value, k, o);
+ }
+ }
+
+ // 8b iv and 9d. Decrease k by 1. (done by the loop).
+ }
+
+ // 8c. if kPresent is false, throw a TypeError exception.
+ // If the accumulator is discovered with the sentinel hole value,
+ // this means kPresent is false.
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+ }
+ return accumulator;
+ }
+
+ transitioning macro FastArrayReduceRight(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable,
+ initialAccumulator: Object): Object
+ labels Bailout(Number, Object) {
+ let accumulator = initialAccumulator;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
+ let fastO =
+ Cast<FastJSArray>(o) otherwise goto Bailout(len - 1, accumulator);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the array.
+ for (let k: Smi = smiLen - 1; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k, accumulator);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
+
+ const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
+ if (accumulator == Hole) {
+ accumulator = value;
+ } else {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulator, value, k,
+ fastOW.Get());
+ }
+ }
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
+ }
+ return accumulator;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
+ transitioning javascript builtin
+ ArrayReduceRight(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NoCallableError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
+
+ // 4. If len is 0 and initialValue is not present, throw a TypeError
+ // exception. (This case is handled at the end of
+ // ArrayReduceRightLoopContinuation).
+
+ const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+
+ try {
+ return FastArrayReduceRight(o, len, callbackfn, initialValue)
+ otherwise Bailout;
+ }
+ label Bailout(value: Number, accumulator: Object) {
+ return ArrayReduceRightLoopContinuation(
+ o, callbackfn, accumulator, o, value, len);
+ }
+ }
+ label NoCallableError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduceRight');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq
new file mode 100644
index 0000000000..67a112fd41
--- /dev/null
+++ b/deps/v8/src/builtins/array-reduce.tq
@@ -0,0 +1,182 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ transitioning javascript builtin
+ ArrayReducePreLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, length: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ return ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayReduceLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, initialK: Object, length: Object,
+ accumulator: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
+ }
+
+ transitioning javascript builtin
+ ArrayReduceLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, initialK: Object, length: Object,
+ result: Object): Object {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // The accumulator is the result from the callback call which just occured.
+ let r = ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
+ return r;
+ }
+
+ transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ o: JSReceiver, initialK: Number, length: Number): Object {
+ let accumulator = initialAccumulator;
+
+ // 8b and 9. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 8b i and 9a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
+ const present: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (present == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const value: Object = GetProperty(o, k);
+
+ if (accumulator == Hole) {
+ // 8b.
+ accumulator = value;
+ } else {
+ // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+ // <accumulator, kValue, k, O>).
+ accumulator =
+ Call(context, callbackfn, Undefined, accumulator, value, k, o);
+ }
+ }
+
+ // 8b iv and 9d. Increase k by 1. (done by the loop).
+ }
+
+ // 8c. if kPresent is false, throw a TypeError exception.
+ // If the accumulator is discovered with the sentinel hole value,
+ // this means kPresent is false.
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+ }
+ return accumulator;
+ }
+
+ transitioning macro FastArrayReduce(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable,
+ initialAccumulator: Object): Object
+ labels Bailout(Number, Object) {
+ const k = 0;
+ let accumulator = initialAccumulator;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
+ let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, accumulator);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the array.
+ for (let k: Smi = 0; k < len; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k, accumulator);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
+
+ const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
+ if (accumulator == Hole) {
+ accumulator = value;
+ } else {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulator, value, k,
+ fastOW.Get());
+ }
+ }
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
+ }
+ return accumulator;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.reduce
+ transitioning javascript builtin
+ ArrayReduce(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NoCallableError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
+
+ // 4. If len is 0 and initialValue is not present, throw a TypeError
+ // exception. (This case is handled at the end of
+ // ArrayReduceLoopContinuation).
+
+ const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+
+ try {
+ return FastArrayReduce(o, len, callbackfn, initialValue)
+ otherwise Bailout;
+ }
+ label Bailout(value: Number, accumulator: Object) {
+ return ArrayReduceLoopContinuation(
+ o, callbackfn, accumulator, o, value, len);
+ }
+ }
+ label NoCallableError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduce');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index dddad7b42c..80e9efe2f0 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -2,55 +2,54 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_reverse {
macro LoadElement<ElementsAccessor: type, T: type>(
elements: FixedArrayBase, index: Smi): T;
- LoadElement<FastPackedSmiElements, Smi>(implicit context: Context)(
+ LoadElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi): Smi {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- return UnsafeCast<Smi>(elems[index]);
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ return UnsafeCast<Smi>(elements.objects[index]);
}
- LoadElement<FastPackedObjectElements, Object>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): Object {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- return elems[index];
+ LoadElement<array::FastPackedObjectElements, Object>(
+ implicit context: Context)(elements: FixedArrayBase, index: Smi): Object {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ return elements.objects[index];
}
- LoadElement<FastPackedDoubleElements, float64>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): float64 {
- try {
- const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- return LoadDoubleWithHoleCheck(elems, index) otherwise Hole;
- }
- label Hole {
- // This macro is only used for PACKED_DOUBLE, loading the hole should
- // be impossible.
- unreachable;
- }
+ LoadElement<array::FastPackedDoubleElements, float64>(
+ implicit context: Context)(elements: FixedArrayBase, index: Smi):
+ float64 {
+ const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ // This macro is only used for PACKED_DOUBLE, loading the hole should
+ // be impossible.
+ return LoadDoubleWithHoleCheck(elements, index)
+ otherwise unreachable;
}
macro StoreElement<ElementsAccessor: type, T: type>(
implicit context:
Context)(elements: FixedArrayBase, index: Smi, value: T);
- StoreElement<FastPackedSmiElements, Smi>(implicit context: Context)(
+ StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Smi) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
}
- StoreElement<FastPackedObjectElements, Object>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: Object) {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- elems[index] = value;
+ StoreElement<array::FastPackedObjectElements, Object>(
+ implicit context:
+ Context)(elements: FixedArrayBase, index: Smi, value: Object) {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ elements.objects[index] = value;
}
- StoreElement<FastPackedDoubleElements, float64>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: float64) {
+ StoreElement<array::FastPackedDoubleElements, float64>(
+ implicit context:
+ Context)(elements: FixedArrayBase, index: Smi, value: float64) {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value);
+ StoreFixedDoubleArrayElementSmi(elems, index, value);
}
// Fast-path for all PACKED_* elements kinds. These do not need to check
@@ -149,15 +148,15 @@ namespace array {
const kind: ElementsKind = array.map.elements_kind;
if (kind == PACKED_SMI_ELEMENTS) {
- EnsureWriteableFastElements(array);
- FastPackedArrayReverse<FastPackedSmiElements, Smi>(
+ array::EnsureWriteableFastElements(array);
+ FastPackedArrayReverse<array::FastPackedSmiElements, Smi>(
array.elements, array.length);
} else if (kind == PACKED_ELEMENTS) {
- EnsureWriteableFastElements(array);
- FastPackedArrayReverse<FastPackedObjectElements, Object>(
+ array::EnsureWriteableFastElements(array);
+ FastPackedArrayReverse<array::FastPackedObjectElements, Object>(
array.elements, array.length);
} else if (kind == PACKED_DOUBLE_ELEMENTS) {
- FastPackedArrayReverse<FastPackedDoubleElements, float64>(
+ FastPackedArrayReverse<array::FastPackedDoubleElements, float64>(
array.elements, array.length);
} else {
goto Slow;
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 615b4b7073..847729b607 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_slice {
macro HandleSimpleArgumentsSlice(
context: Context, args: JSArgumentsObjectWithLength, start: Smi,
count: Smi): JSArray
@@ -43,13 +43,13 @@ namespace array {
// defined arguments
const end: Smi = start + count;
const unmappedElements: FixedArray =
- Cast<FixedArray>(sloppyElements[kSloppyArgumentsArgumentsIndex])
+ Cast<FixedArray>(sloppyElements.objects[kSloppyArgumentsArgumentsIndex])
otherwise Bailout;
const unmappedElementsLength: Smi = unmappedElements.length;
if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
- const argumentsContext: Context =
- UnsafeCast<Context>(sloppyElements[kSloppyArgumentsContextIndex]);
+ const argumentsContext: Context = UnsafeCast<Context>(
+ sloppyElements.objects[kSloppyArgumentsContextIndex]);
const arrayMap: Map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, context);
const result: JSArray =
@@ -62,10 +62,10 @@ namespace array {
// Fill in the part of the result that map to context-mapped parameters.
for (let current: Smi = start; current < to; ++current) {
const e: Object =
- sloppyElements[current + kSloppyArgumentsParameterMapStart];
+ sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
const newElement: Object = e != Hole ?
argumentsContext[UnsafeCast<Smi>(e)] :
- unmappedElements[current];
+ unmappedElements.objects[current];
StoreFixedArrayElementSmi(
resultElements, indexOut++, newElement, SKIP_WRITE_BARRIER);
}
@@ -193,7 +193,7 @@ namespace array {
const kValue: Object = GetProperty(o, pK);
// ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue).
- CreateDataProperty(a, n, kValue);
+ FastCreateDataProperty(a, n, kValue);
}
// d. Increase k by 1.
diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq
new file mode 100644
index 0000000000..f68ea4ac30
--- /dev/null
+++ b/deps/v8/src/builtins/array-some.tq
@@ -0,0 +1,150 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ transitioning javascript builtin
+ ArraySomeLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object): Object {
+ // All continuation points in the optimized some implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArraySomeLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning javascript builtin
+ ArraySomeLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, result: Object): Object {
+ // All continuation points in the optimized some implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. some() needs
+ // to pick up at the next step: if the result is true, then return,
+ // otherwise, keep going through the array starting from k + 1.
+ if (ToBoolean(result)) {
+ return True;
+ }
+
+ numberK = numberK + 1;
+
+ return ArraySomeLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ array: Object, o: JSReceiver, initialK: Number, length: Number,
+ initialTo: Object): Object {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: Object = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. If selected is true, then...
+ if (ToBoolean(result)) {
+ return True;
+ }
+ }
+
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return False;
+ }
+
+ transitioning macro FastArraySome(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: Object =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(result)) {
+ return True;
+ }
+ }
+ return False;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.some
+ transitioning javascript builtin
+ ArraySome(implicit context: Context)(receiver: Object, ...arguments): Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+
+ // Special cases.
+ try {
+ return FastArraySome(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ }
+ label Bailout(kValue: Smi) deferred {
+ return ArraySomeLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
+ }
+ }
+ label TypeError deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.some');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index 7307f45f34..586630cd39 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_splice {
// Given {elements}, we want to create a non-zero length array of type
// FixedArrayType. Most of this behavior is outsourced to ExtractFixedArray(),
// but the special case of wanting to have a FixedDoubleArray when given a
@@ -39,7 +39,7 @@ namespace array {
macro StoreHoles<FixedArrayType: type>(
elements: FixedArrayType, holeStartIndex: Smi, holeEndIndex: Smi): void {
for (let i: Smi = holeStartIndex; i < holeEndIndex; i++) {
- StoreArrayHole(elements, i);
+ array::StoreArrayHole(elements, i);
}
}
@@ -57,7 +57,7 @@ namespace array {
lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
actualDeleteCount: Smi): void labels Bailout {
// Make sure elements are writable.
- EnsureWriteableFastElements(a);
+ array::EnsureWriteableFastElements(a);
if (insertCount != actualDeleteCount) {
const elements: FixedArrayBase = a.elements;
@@ -197,7 +197,7 @@ namespace array {
const fromValue: Object = GetProperty(o, from);
// ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
- CreateDataProperty(a, k, fromValue);
+ FastCreateDataProperty(a, k, fromValue);
}
// d. Increment k by 1.
@@ -398,7 +398,7 @@ namespace array {
// Bailout exception.
const newLength: Number = len + insertCount - actualDeleteCount;
if (newLength > kMaxSafeInteger) {
- ThrowTypeError(context, kInvalidArrayLength, start);
+ ThrowTypeError(kInvalidArrayLength, start);
}
try {
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index 7d7647427a..b193e751fd 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace array {
+namespace array_unshift {
extern builtin ArrayUnshift(Context, JSFunction, Object, int32);
macro TryFastArrayUnshift(
context: Context, receiver: Object, arguments: constexpr Arguments): never
labels Slow {
const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
- EnsureWriteableFastElements(array);
+ array::EnsureWriteableFastElements(array);
const map: Map = array.map;
if (!IsExtensibleMap(map)) goto Slow;
@@ -36,7 +36,7 @@ namespace array {
if (argCount > 0) {
// a. If len + argCount > 2**53 - 1, throw a TypeError exception.
if (length + argCount > kMaxSafeInteger) {
- ThrowTypeError(context, kInvalidArrayLength);
+ ThrowTypeError(kInvalidArrayLength);
}
// b. Let k be len.
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 8e3b3ea704..9807db19c6 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -43,19 +43,10 @@ namespace array {
}
macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
- const e: Object = a[i];
+ const e: Object = a.objects[i];
return e == Hole ? Undefined : e;
}
- macro LoadElementOrUndefined(a: FixedArray, i: intptr): Object {
- const e: Object = a[i];
- return e == Hole ? Undefined : e;
- }
-
- macro LoadElementOrUndefined(a: FixedArray, i: constexpr int31): Object {
- return LoadElementOrUndefined(a, Convert<intptr>(i));
- }
-
macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
try {
const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
@@ -66,34 +57,18 @@ namespace array {
}
}
- macro LoadElementOrUndefined(a: FixedDoubleArray, i: intptr):
- NumberOrUndefined {
- try {
- const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
- return AllocateHeapNumberWithValue(f);
- }
- label IfHole {
- return Undefined;
- }
- }
-
- macro LoadElementOrUndefined(a: FixedDoubleArray, i: constexpr int31):
- NumberOrUndefined {
- return LoadElementOrUndefined(a, Convert<intptr>(i));
- }
-
macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
StoreFixedDoubleArrayHoleSmi(elements, k);
}
macro StoreArrayHole(elements: FixedArray, k: Smi): void {
- elements[k] = Hole;
+ elements.objects[k] = Hole;
}
macro CopyArrayElement(
elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void {
- const e: Object = elements[from];
- newElements[to] = e;
+ const e: Object = elements.objects[from];
+ newElements.objects[to] = e;
}
macro CopyArrayElement(
@@ -102,7 +77,7 @@ namespace array {
try {
const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from)
otherwise FoundHole;
- newElements[to] = floatValue;
+ newElements.floats[to] = floatValue;
}
label FoundHole {
StoreArrayHole(newElements, to);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 7887fa1383..eca9e4f667 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -10,6 +10,10 @@
#include 'src/objects.h'
#include 'src/objects/arguments.h'
#include 'src/objects/bigint.h'
+#include 'src/objects/js-generator.h'
+#include 'src/objects/js-promise.h'
+#include 'src/objects/module.h'
+#include 'src/objects/stack-frame-info.h'
type Arguments constexpr 'CodeStubArguments*';
type void;
@@ -19,15 +23,27 @@ type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';
type Smi extends Tagged generates 'TNode<Smi>' constexpr 'Smi';
// A Smi that is greater than or equal to 0. See TaggedIsPositiveSmi.
-type PositiveSmi extends Smi generates 'TNode<Smi>';
+type PositiveSmi extends Smi;
-class HeapObject extends Tagged {
- map_untyped: Tagged;
-}
+// The Smi value zero, which is often used as null for HeapObject types.
+type Zero extends PositiveSmi;
+
+extern class HeapObject extends Tagged { map: Map; }
type Object = Smi | HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
+type int31 extends int32
+ generates 'TNode<Int32T>' constexpr 'int31_t';
+type uint31 extends uint32
+ generates 'TNode<Uint32T>' constexpr 'uint31_t';
+type int16 extends int31
+ generates 'TNode<Int32T>' constexpr 'int16_t';
+type uint16 extends uint31
+ generates 'TNode<Uint32T>' constexpr 'uint16_t';
+type int8 extends int16 generates 'TNode<Int32T>' constexpr 'int8_t';
+type uint8 extends uint16
+ generates 'TNode<Uint32T>' constexpr 'uint8_t';
type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
@@ -37,14 +53,12 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
-type int31 extends int32
- generates 'TNode<Int32T>' constexpr 'int31_t';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type AbstractCode extends HeapObject generates 'TNode<AbstractCode>';
type Code extends AbstractCode generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
-type NativeContext extends Context generates 'TNode<Context>';
+type NativeContext extends Context;
type String extends HeapObject generates 'TNode<String>';
type Oddball extends HeapObject generates 'TNode<Oddball>';
type HeapNumber extends HeapObject generates 'TNode<HeapNumber>';
@@ -52,70 +66,129 @@ type Number = Smi | HeapNumber;
type BigInt extends HeapObject generates 'TNode<BigInt>';
type Numeric = Number | BigInt;
+extern class Struct extends HeapObject {}
+
+extern class Tuple2 extends Struct {
+ value_1: Object;
+ value_2: Object;
+}
+
+extern class Tuple3 extends Tuple2 { value_3: Object; }
+
+// A direct string can be accessed directly through CSA without going into the
+// C++ runtime. See also: ToDirectStringAssembler.
+type DirectString extends String;
+
+type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
+
type Map extends HeapObject generates 'TNode<Map>';
-// The accessors for HeapObject's map cannot be declared before Map
-// is declared because forward declarations are not (yet) supported.
-// TODO(danno): Make circular references in classes possible. One way to do that
-// would be to pre-process all class declarations and create bindings for them
-// with an uninitialized class type, and then process them later properly
-extern operator '.map' macro LoadMap(HeapObject): Map;
-extern transitioning operator '.map=' macro StoreMap(HeapObject, Map);
-
-// This intrinsic should never be called from Torque code. It's used internally
-// by the 'new' operator and only declared here because it's simpler than
-// building the definition from C++.
-intrinsic %Allocate<Class: type>(size: intptr): Class;
-type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
-type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
-type FixedDoubleArray extends FixedArrayBase
- generates 'TNode<FixedDoubleArray>';
+extern class FixedArrayBase extends HeapObject { length: Smi; }
-class JSReceiver extends HeapObject {
- properties_or_hash: Object;
+extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
+
+extern class FixedDoubleArray extends FixedArrayBase {
+ floats[length]: float64;
}
-type Constructor extends JSReceiver generates 'TNode<JSReceiver>';
-type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
+// These intrinsics should never be called from Torque code. They're used
+// internally by the 'new' operator and only declared here because it's simpler
+// than building the definition from C++.
+intrinsic %GetAllocationBaseSize<Class: type>(map: Map): intptr;
+intrinsic %Allocate<Class: type>(size: intptr): Class;
+intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr): Class;
-class JSObject extends JSReceiver {
- elements: FixedArrayBase;
+extern class JSReceiver extends HeapObject {
+ properties_or_hash: FixedArrayBase | Smi;
}
-class JSArgumentsObjectWithLength extends JSObject {
- length: Object;
+type Constructor extends JSReceiver;
+
+extern class JSObject extends JSReceiver { elements: FixedArrayBase; }
+
+macro NewJSObject(
+ map: Map, properties: FixedArrayBase | Smi,
+ elements: FixedArrayBase): JSObject {
+ return new JSObject{map, properties, elements};
+}
+macro NewJSObject(implicit context: Context)(): JSObject {
+ const objectFunction: JSFunction = GetObjectFunction();
+ const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
+ otherwise unreachable;
+ return new JSObject{map, kEmptyFixedArray, kEmptyFixedArray};
}
-class JSArray extends JSObject {
- constructor(implicit context: Context)() {
- super(
- GetFastPackedSmiElementsJSArrayMap(), kEmptyFixedArray,
- kEmptyFixedArray);
- this.length = 0;
- }
+extern class JSProxy extends JSReceiver {
+ target: Object;
+ handler: Object;
+}
+
+extern class JSProxyRevocableResult extends JSObject {
+ proxy: Object;
+ revoke: Object;
+}
+
+extern class JSGlobalProxy extends JSObject { native_context: Object; }
+
+extern class JSValue extends JSObject { value: Object; }
+
+extern class JSArgumentsObjectWithLength extends JSObject { length: Object; }
+
+extern class JSArray extends JSObject {
IsEmpty(): bool {
return this.length == 0;
}
length: Number;
}
+macro NewJSArray(implicit context: Context)(
+ map: Map, elements: FixedArrayBase): JSArray {
+ return new JSArray{map, kEmptyFixedArray, elements, elements.length};
+}
+
+macro NewJSArray(implicit context: Context)(): JSArray {
+ return new JSArray{
+ GetFastPackedSmiElementsJSArrayMap(),
+ kEmptyFixedArray,
+ kEmptyFixedArray,
+ 0
+ };
+}
+
// A HeapObject with a JSArray map, and either fast packed elements, or fast
// holey elements when the global NoElementsProtector is not invalidated.
-transient type FastJSArray extends JSArray
- generates 'TNode<JSArray>';
+transient type FastJSArray extends JSArray;
// A FastJSArray when the global ArraySpeciesProtector is not invalidated.
-transient type FastJSArrayForCopy extends FastJSArray
- generates 'TNode<JSArray>';
+transient type FastJSArrayForCopy extends FastJSArray;
// A FastJSArray when the global ArrayIteratorProtector is not invalidated.
-transient type FastJSArrayWithNoCustomIteration extends FastJSArray
- generates 'TNode<JSArray>';
+transient type FastJSArrayWithNoCustomIteration extends FastJSArray;
+
+type NoSharedNameSentinel extends Smi;
+type Script extends HeapObject;
+type DebugInfo extends HeapObject;
+
+type ScopeInfo extends Object generates 'TNode<ScopeInfo>';
+
+extern class SharedFunctionInfo extends HeapObject {
+ weak function_data: Object;
+ name_or_scope_info: String | NoSharedNameSentinel | ScopeInfo;
+ outer_scope_info_or_feedback_metadata: HeapObject;
+ script_or_debug_info: Script | DebugInfo;
+ length: int16;
+ formal_parameter_count: uint16;
+ expected_nof_properties: int8;
+ builtin_function_id: int8;
+ function_token_offset: int16;
+ flags: int32;
+}
-type SharedFunctionInfo extends HeapObject
- generates 'TNode<SharedFunctionInfo>';
+extern class SharedFunctionInfoWithID extends SharedFunctionInfo {
+ unique_id: int32;
+}
-class JSFunction extends JSObject {
+extern class JSFunction extends JSObject {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: Smi;
@@ -123,35 +196,41 @@ class JSFunction extends JSObject {
weak prototype_or_initial_map: JSReceiver | Map;
}
-extern operator '.formal_parameter_count'
- macro LoadSharedFunctionInfoFormalParameterCount(SharedFunctionInfo): int32;
-
-class JSBoundFunction extends JSObject {
+extern class JSBoundFunction extends JSObject {
bound_target_function: JSReceiver;
bound_this: Object;
bound_arguments: FixedArray;
}
type Callable = JSFunction | JSBoundFunction | JSProxy;
-type FixedTypedArrayBase extends FixedArrayBase
- generates 'TNode<FixedTypedArrayBase>';
+
+extern class FixedTypedArrayBase extends FixedArrayBase {
+ base_pointer: Smi;
+ external_pointer: RawPtr;
+}
+extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
+ FixedArrayBase): intptr;
+
type FixedTypedArray extends FixedTypedArrayBase
generates 'TNode<FixedTypedArray>';
-type SloppyArgumentsElements extends FixedArray
- generates 'TNode<FixedArray>';
+
+extern class SloppyArgumentsElements extends FixedArray {}
type NumberDictionary extends HeapObject
generates 'TNode<NumberDictionary>';
-// RawObjectCasts should *never* be used anywhere in Torque code except for
+// %RawDownCast should *never* be used anywhere in Torque code except for
// in Torque-based UnsafeCast operators preceeded by an appropriate
// type assert()
-intrinsic %RawObjectCast<A: type>(o: Object): A;
-intrinsic %RawPointerCast<A: type>(p: RawPtr): A;
+intrinsic %RawDownCast<To: type, From: type>(x: From): To;
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
type NativeContextSlot generates 'TNode<IntPtrT>' constexpr 'int32_t';
const ARRAY_BUFFER_FUN_INDEX: constexpr NativeContextSlot
generates 'Context::ARRAY_BUFFER_FUN_INDEX';
+const ARRAY_BUFFER_NOINIT_FUN_INDEX: constexpr NativeContextSlot
+ generates 'Context::ARRAY_BUFFER_NOINIT_FUN_INDEX';
+const ARRAY_BUFFER_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::ARRAY_BUFFER_MAP_INDEX';
const ARRAY_JOIN_STACK_INDEX: constexpr NativeContextSlot
generates 'Context::ARRAY_JOIN_STACK_INDEX';
const OBJECT_FUNCTION_INDEX: constexpr NativeContextSlot
@@ -168,23 +247,65 @@ extern operator '[]=' macro StoreContextElement(
extern operator '[]' macro LoadContextElement(Context, intptr): Object;
extern operator '[]' macro LoadContextElement(Context, Smi): Object;
-type JSArrayBuffer extends JSObject generates 'TNode<JSArrayBuffer>';
-type JSArrayBufferView extends JSObject
- generates 'TNode<JSArrayBufferView>';
-type JSTypedArray extends JSArrayBufferView
- generates 'TNode<JSTypedArray>';
+extern class JSArrayBuffer extends JSObject {
+ byte_length: uintptr;
+ backing_store: RawPtr;
+}
+
+extern class JSArrayBufferView extends JSObject {
+ buffer: JSArrayBuffer;
+ byte_offset: uintptr;
+ byte_length: uintptr;
+}
+
+extern class JSTypedArray extends JSArrayBufferView {
+ AttachOffHeapBuffer(
+ buffer: JSArrayBuffer, map: Map, length: PositiveSmi,
+ byteOffset: uintptr): void {
+ const basePointer: Smi = 0;
+
+ // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
+ // platforms are self-limiting, because we can't allocate an array bigger
+ // than our 32-bit arithmetic range anyway. 64 bit platforms could
+ // theoretically have an offset up to 2^35 - 1.
+ const backingStore = buffer.backing_store;
+ const externalPointer = backingStore + Convert<intptr>(byteOffset);
+
+ // Assert no overflow has occurred. Only assert if the mock array buffer
+ // allocator is NOT used. When the mock array buffer is used, impossibly
+ // large allocations are allowed that would erroneously cause an overflow
+ // and this assertion to fail.
+ assert(
+ IsMockArrayBufferAllocatorFlag() ||
+ Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
+
+ this.buffer = buffer;
+ this.elements = new
+ FixedTypedArrayBase{map, length, basePointer, externalPointer};
+ }
+
+ length: Smi;
+}
+
+extern class JSAccessorPropertyDescriptor extends JSObject {
+ get: Object;
+ set: Object;
+ enumerable: Object;
+ configurable: Object;
+}
+
+extern class JSCollection extends JSObject { table: Object; }
+
type JSDataView extends JSArrayBufferView generates 'TNode<JSDataView>';
type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
-type LanguageMode extends Tagged
- generates 'TNode<Smi>' constexpr 'LanguageMode';
+type LanguageMode extends Smi constexpr 'LanguageMode';
type ExtractFixedArrayFlags
generates 'TNode<Smi>'
constexpr 'CodeStubAssembler::ExtractFixedArrayFlags';
type ParameterMode
generates 'TNode<Int32T>' constexpr 'ParameterMode';
-type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
@@ -193,6 +314,79 @@ type ToIntegerTruncationMode
constexpr 'CodeStubAssembler::ToIntegerTruncationMode';
type AllocationFlags constexpr 'AllocationFlags';
+extern class Foreign extends HeapObject { foreign_address: RawPtr; }
+
+extern class InterceptorInfo extends Struct {
+ getter: Foreign | Zero;
+ setter: Foreign | Zero;
+ query: Foreign | Zero;
+ descriptor: Foreign | Zero;
+ deleter: Foreign | Zero;
+ enumerator: Foreign | Zero;
+ definer: Foreign | Zero;
+ data: Object;
+ flags: Smi;
+}
+
+extern class AccessCheckInfo extends Struct {
+ callback: Foreign | Zero;
+ named_interceptor: InterceptorInfo | Zero;
+ indexed_interceptor: InterceptorInfo | Zero;
+ data: Object;
+}
+
+extern class ArrayBoilerplateDescription extends Struct {
+ flags: Smi;
+ constant_elements: FixedArrayBase;
+}
+
+extern class AliasedArgumentsEntry extends Struct { aliased_context_slot: Smi; }
+
+extern class Cell extends HeapObject { value: Object; }
+
+extern class DataHandler extends Struct {
+ smi_handler: Smi | Code;
+ validity_cell: Smi | Cell;
+ weak data_1: Object;
+ weak data_2: Object;
+ weak data_3: Object;
+}
+
+extern class JSPromise extends JSObject {
+ reactions_or_result: Object;
+ flags: Smi;
+}
+
+extern class Microtask extends Struct {}
+
+extern class CallbackTask extends Microtask {
+ callback: Foreign;
+ data: Foreign;
+}
+
+extern class CallableTask extends Microtask {
+ callable: JSReceiver;
+ context: Context;
+}
+
+extern class StackFrameInfo extends Struct {
+ line_number: Smi;
+ column_number: Smi;
+ script_id: Smi;
+ script_name: Object;
+ script_name_or_source_url: Object;
+ function_name: Object;
+ flag: Smi;
+ id: Smi;
+}
+
+extern class ClassPositions extends Struct {
+ start: Smi;
+ end: Smi;
+}
+
+extern class WasmExceptionTag extends Struct { index: Smi; }
+
const kSmiTagSize: constexpr int31 generates 'kSmiTagSize';
const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
@@ -232,11 +426,12 @@ const BIGINT64_ELEMENTS:
const kNone:
constexpr AllocationFlags generates 'CodeStubAssembler::kNone';
-const kDoubleAlignment:
- constexpr AllocationFlags generates 'kDoubleAlignment';
-const kPretenured: constexpr AllocationFlags generates 'kPretenured';
-const kAllowLargeObjectAllocation:
- constexpr AllocationFlags generates 'kAllowLargeObjectAllocation';
+const kDoubleAlignment: constexpr AllocationFlags
+ generates 'CodeStubAssembler::kDoubleAlignment';
+const kPretenured:
+ constexpr AllocationFlags generates 'CodeStubAssembler::kPretenured';
+const kAllowLargeObjectAllocation: constexpr AllocationFlags
+ generates 'CodeStubAssembler::kAllowLargeObjectAllocation';
type FixedUint8Array extends FixedTypedArray;
type FixedInt8Array extends FixedTypedArray;
@@ -264,27 +459,48 @@ const kEmptyFixedArrayRootIndex:
const kTheHoleValueRootIndex:
constexpr RootIndex generates 'RootIndex::kTheHoleValue';
+const kInvalidArrayBufferLength: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidArrayBufferLength';
const kInvalidArrayLength: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidArrayLength';
const kCalledNonCallable: constexpr MessageTemplate
generates 'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: constexpr MessageTemplate
generates 'MessageTemplate::kCalledOnNullOrUndefined';
+const kInvalidOffset: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidOffset';
const kInvalidTypedArrayLength: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidTypedArrayLength';
+const kIteratorSymbolNonCallable: constexpr MessageTemplate
+ generates 'MessageTemplate::kIteratorSymbolNonCallable';
const kIteratorValueNotAnObject: constexpr MessageTemplate
generates 'MessageTemplate::kIteratorValueNotAnObject';
const kNotIterable: constexpr MessageTemplate
generates 'MessageTemplate::kNotIterable';
+const kReduceNoInitial: constexpr MessageTemplate
+ generates 'MessageTemplate::kReduceNoInitial';
+const kFirstArgumentNotRegExp: constexpr MessageTemplate
+ generates 'MessageTemplate::kFirstArgumentNotRegExp';
+const kBigIntMixedTypes: constexpr MessageTemplate
+ generates 'MessageTemplate::kBigIntMixedTypes';
+const kTypedArrayTooShort: constexpr MessageTemplate
+ generates 'MessageTemplate::kTypedArrayTooShort';
const kMaxArrayIndex:
constexpr uint32 generates 'JSArray::kMaxArrayIndex';
const kTypedArrayMaxByteLength:
constexpr uintptr generates 'FixedTypedArrayBase::kMaxByteLength';
+const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP:
+ constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
+const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue';
const kStringMaxLength: constexpr int31 generates 'String::kMaxLength';
const kFixedArrayMaxLength:
constexpr int31 generates 'FixedArray::kMaxLength';
+const kFixedTypedArrayBaseHeaderSize: constexpr intptr
+ generates 'FixedTypedArrayBase::kHeaderSize';
+const kObjectAlignmentMask: constexpr intptr
+ generates 'kObjectAlignmentMask';
const kMaxRegularHeapObjectSize: constexpr int31
generates 'kMaxRegularHeapObjectSize';
@@ -315,11 +531,12 @@ const kInvalidDataViewAccessorOffset: constexpr MessageTemplate
const kStrictReadOnlyProperty: constexpr MessageTemplate
generates 'MessageTemplate::kStrictReadOnlyProperty';
-type Hole extends Oddball generates 'TNode<Oddball>';
-type Null extends Oddball generates 'TNode<Oddball>';
-type Undefined extends Oddball generates 'TNode<Oddball>';
-type True extends Oddball generates 'TNode<Oddball>';
-type False extends Oddball generates 'TNode<Oddball>';
+type Hole extends Oddball;
+type Null extends Oddball;
+type Undefined extends Oddball;
+type True extends Oddball;
+type False extends Oddball;
+type EmptyString extends String;
type Boolean = True | False;
type NumberOrUndefined = Number | Undefined;
@@ -331,7 +548,7 @@ extern macro TrueConstant(): True;
extern macro FalseConstant(): False;
extern macro Int32TrueConstant(): bool;
extern macro Int32FalseConstant(): bool;
-extern macro EmptyStringConstant(): String;
+extern macro EmptyStringConstant(): EmptyString;
extern macro LengthStringConstant(): String;
const Hole: Hole = TheHoleConstant();
@@ -339,7 +556,7 @@ const Null: Null = NullConstant();
const Undefined: Undefined = UndefinedConstant();
const True: True = TrueConstant();
const False: False = FalseConstant();
-const kEmptyString: String = EmptyStringConstant();
+const kEmptyString: EmptyString = EmptyStringConstant();
const kLengthString: String = LengthStringConstant();
const true: constexpr bool generates 'true';
@@ -356,6 +573,36 @@ const INTPTR_PARAMETERS: constexpr ParameterMode
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
+extern class AsyncGeneratorRequest extends Struct {
+ next: AsyncGeneratorRequest | Undefined;
+ resume_mode: Smi;
+ value: Object;
+ promise: JSPromise;
+}
+
+extern class ModuleInfoEntry extends Struct {
+ export_name: String | Undefined;
+ local_name: String | Undefined;
+ import_name: String | Undefined;
+ module_request: Smi;
+ cell_index: Smi;
+ beg_pos: Smi;
+ end_pos: Smi;
+}
+
+extern class PromiseCapability extends Struct {
+ promise: JSReceiver | Undefined;
+ resolve: Object;
+ reject: Object;
+}
+
+extern class PromiseReaction extends Struct {
+ next: PromiseReaction | Zero;
+ reject_handler: Callable | Undefined;
+ fulfill_handler: Callable | Undefined;
+ promise_or_capability: JSPromise | PromiseCapability | Undefined;
+}
+
extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
@@ -388,14 +635,18 @@ extern transitioning builtin HasProperty(implicit context: Context)(
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, Object): Boolean;
-extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
-extern macro ThrowRangeError(Context, constexpr MessageTemplate, Object): never;
-extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
-extern macro ThrowTypeError(
- Context, constexpr MessageTemplate, constexpr string): never;
-extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
-extern macro ThrowTypeError(
- Context, constexpr MessageTemplate, Object, Object, Object): never;
+extern macro ThrowRangeError(implicit context: Context)(
+ constexpr MessageTemplate): never;
+extern macro ThrowRangeError(implicit context: Context)(
+ constexpr MessageTemplate, Object): never;
+extern macro ThrowTypeError(implicit context: Context)(
+ constexpr MessageTemplate): never;
+extern macro ThrowTypeError(implicit context: Context)(
+ constexpr MessageTemplate, constexpr string): never;
+extern macro ThrowTypeError(implicit context: Context)(
+ constexpr MessageTemplate, Object): never;
+extern macro ThrowTypeError(implicit context: Context)(
+ constexpr MessageTemplate, Object, Object, Object): never;
extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver;
extern macro ArrayCreate(implicit context: Context)(Number): JSArray;
extern macro BuildAppendJSArray(
@@ -411,6 +662,12 @@ extern macro Construct(implicit context: Context)(
Constructor, Object, Object): JSReceiver;
extern macro Construct(implicit context: Context)(
Constructor, Object, Object, Object): JSReceiver;
+extern macro ConstructWithTarget(implicit context: Context)(
+ Constructor, JSReceiver): JSReceiver;
+extern macro ConstructWithTarget(implicit context: Context)(
+ Constructor, JSReceiver, Object): JSReceiver;
+extern macro SpeciesConstructor(implicit context: Context)(
+ Object, JSReceiver): JSReceiver;
extern builtin ToObject(Context, Object): JSReceiver;
extern macro ToObject_Inline(Context, Object): JSReceiver;
@@ -422,19 +679,19 @@ extern builtin ToString(Context, Object): String;
extern transitioning runtime NormalizeElements(Context, JSObject);
extern transitioning runtime TransitionElementsKindWithKind(
Context, JSObject, Smi);
-extern transitioning runtime CreateDataProperty(implicit context: Context)(
- JSReceiver, Object, Object);
extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi;
+extern macro LoadFixedTypedArrayOnHeapBackingStore(FixedTypedArrayBase): RawPtr;
extern macro LoadRoot(constexpr RootIndex): Object;
extern macro StoreRoot(constexpr RootIndex, Object): Object;
-extern macro LoadAndUntagToWord32Root(constexpr RootIndex): int32;
extern runtime StringEqual(Context, String, String): Oddball;
extern builtin StringLessThan(Context, String, String): Boolean;
+extern macro StringCharCodeAt(String, intptr): int32;
+extern runtime StringCompareSequence(Context, String, String, Number): Boolean;
extern macro StrictEqual(Object, Object): Boolean;
extern macro SmiLexicographicCompare(Smi, Smi): Smi;
@@ -443,6 +700,8 @@ extern runtime ThrowInvalidStringLength(Context): never;
extern operator '==' macro WordEqual(RawPtr, RawPtr): bool;
extern operator '!=' macro WordNotEqual(RawPtr, RawPtr): bool;
+extern operator '+' macro RawPtrAdd(RawPtr, intptr): RawPtr;
+extern operator '+' macro RawPtrAdd(intptr, RawPtr): RawPtr;
extern operator '<' macro Int32LessThan(int32, int32): bool;
extern operator '<' macro Uint32LessThan(uint32, uint32): bool;
@@ -493,6 +752,11 @@ extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool;
extern operator '<=' macro UintPtrLessThanOrEqual(uintptr, uintptr): bool;
extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
+extern operator '~' macro WordNot(intptr): intptr;
+extern operator '~' macro WordNot(uintptr): uintptr;
+extern operator '~' macro ConstexprWordNot(constexpr intptr): constexpr intptr;
+extern operator '~' macro ConstexprWordNot(constexpr uintptr):
+ constexpr uintptr;
extern operator '==' macro Float64Equal(float64, float64): bool;
extern operator '!=' macro Float64NotEqual(float64, float64): bool;
@@ -503,8 +767,9 @@ extern macro BranchIfNumberEqual(Number, Number): never
operator '==' macro IsNumberEqual(a: Number, b: Number): bool {
return (BranchIfNumberEqual(a, b)) ? true : false;
}
-extern operator '!=' macro BranchIfNumberNotEqual(Number, Number): never
- labels Taken, NotTaken;
+operator '!=' macro IsNumberNotEqual(a: Number, b: Number): bool {
+ return (BranchIfNumberEqual(a, b)) ? false : true;
+}
extern operator '<' macro BranchIfNumberLessThan(Number, Number): never
labels Taken, NotTaken;
extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never
@@ -516,8 +781,22 @@ extern operator '>=' macro BranchIfNumberGreaterThanOrEqual(
Number, Number): never
labels Taken, NotTaken;
-extern operator '==' macro WordEqual(Object, Object): bool;
-extern operator '!=' macro WordNotEqual(Object, Object): bool;
+// The type of all tagged values that can safely be compared with WordEqual.
+type TaggedWithIdentity =
+ JSReceiver | FixedArrayBase | Oddball | Map | EmptyString;
+
+extern operator '==' macro WordEqual(TaggedWithIdentity, Object): bool;
+extern operator '==' macro WordEqual(Object, TaggedWithIdentity): bool;
+extern operator '==' macro WordEqual(
+ TaggedWithIdentity, TaggedWithIdentity): bool;
+extern operator '!=' macro WordNotEqual(TaggedWithIdentity, Object): bool;
+extern operator '!=' macro WordNotEqual(Object, TaggedWithIdentity): bool;
+extern operator '!=' macro WordNotEqual(
+ TaggedWithIdentity, TaggedWithIdentity): bool;
+// Do not overload == and != if it is unclear if object identity is the right
+// equality.
+extern macro WordEqual(Object, Object): bool;
+extern macro WordNotEqual(Object, Object): bool;
extern operator '+' macro SmiAdd(Smi, Smi): Smi;
extern operator '-' macro SmiSub(Smi, Smi): Smi;
@@ -527,6 +806,10 @@ extern operator '<<' macro SmiShl(Smi, constexpr int31): Smi;
extern operator '>>' macro SmiSar(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
+extern operator '+' macro ConstexprIntPtrAdd(
+ constexpr intptr, constexpr intptr): constexpr intptr;
+extern operator '+' macro ConstexprUintPtrAdd(
+ constexpr uintptr, constexpr uintptr): constexpr intptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
extern operator '*' macro IntPtrMul(intptr, intptr): intptr;
extern operator '/' macro IntPtrDiv(intptr, intptr): intptr;
@@ -537,6 +820,7 @@ extern operator '|' macro WordOr(intptr, intptr): intptr;
extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr;
extern operator '-' macro UintPtrSub(uintptr, uintptr): uintptr;
+extern operator '<<' macro WordShl(uintptr, uintptr): uintptr;
extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
extern operator '&' macro WordAnd(uintptr, uintptr): uintptr;
extern operator '|' macro WordOr(uintptr, uintptr): uintptr;
@@ -551,6 +835,9 @@ extern operator '&' macro Word32And(int32, int32): int32;
extern operator '&' macro Word32And(uint32, uint32): uint32;
extern operator '==' macro
ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
+extern operator '>=' macro
+ConstexprInt31GreaterThanEqual(
+ constexpr int31, constexpr int31): constexpr bool;
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '==' macro Word32Equal(uint32, uint32): bool;
@@ -563,6 +850,8 @@ extern operator '|' macro Word32Or(int32, int32): int32;
extern operator '|' macro Word32Or(uint32, uint32): uint32;
extern operator '&' macro Word32And(bool, bool): bool;
extern operator '|' macro Word32Or(bool, bool): bool;
+extern operator '==' macro Word32Equal(bool, bool): bool;
+extern operator '!=' macro Word32NotEqual(bool, bool): bool;
extern operator '+' macro Float64Add(float64, float64): float64;
@@ -585,6 +874,10 @@ extern operator '>>>' macro ConstexprUintPtrShr(
extern macro SmiMax(Smi, Smi): Smi;
extern macro SmiMin(Smi, Smi): Smi;
extern macro SmiMul(Smi, Smi): Number;
+extern macro SmiMod(Smi, Smi): Number;
+
+extern macro IntPtrMax(intptr, intptr): intptr;
+extern macro IntPtrMin(intptr, intptr): intptr;
extern operator '!' macro ConstexprBoolNot(constexpr bool): constexpr bool;
extern operator '!' macro Word32BinaryNot(bool): bool;
@@ -594,6 +887,7 @@ extern operator '.instanceType' macro LoadInstanceType(HeapObject):
InstanceType;
extern operator '.length' macro LoadStringLengthAsWord(String): intptr;
+extern operator '.length_smi' macro LoadStringLengthAsSmi(String): Smi;
extern operator '.length' macro GetArgumentsLength(constexpr Arguments): intptr;
extern operator '[]' macro GetArgumentValue(
@@ -606,7 +900,7 @@ extern macro IsValidPositiveSmi(intptr): bool;
extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
-extern macro HeapObjectToJSTypedArray(HeapObject): JSTypedArray
+extern macro HeapObjectToJSArrayBuffer(HeapObject): JSArrayBuffer
labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject
labels CastError;
@@ -614,6 +908,8 @@ extern macro TaggedToSmi(Object): Smi
labels CastError;
extern macro TaggedToPositiveSmi(Object): PositiveSmi
labels CastError;
+extern macro TaggedToDirectString(Object): DirectString
+ labels CastError;
extern macro HeapObjectToJSArray(HeapObject): JSArray
labels CastError;
extern macro HeapObjectToCallable(HeapObject): Callable
@@ -634,98 +930,141 @@ extern macro HeapObjectToSloppyArgumentsElements(HeapObject):
extern macro TaggedToNumber(Object): Number
labels CastError;
-macro CastHeapObject<A: type>(o: HeapObject): A
+macro Cast<A: type>(implicit context: Context)(o: Object): A
+ labels CastError {
+ return Cast<A>(TaggedToHeapObject(o) otherwise CastError)
+ otherwise CastError;
+}
+
+Cast<Smi>(o: Object): Smi
+ labels CastError {
+ return TaggedToSmi(o) otherwise CastError;
+}
+
+Cast<PositiveSmi>(o: Object): PositiveSmi
+ labels CastError {
+ return TaggedToPositiveSmi(o) otherwise CastError;
+}
+
+Cast<Number>(o: Object): Number
+ labels CastError {
+ return TaggedToNumber(o) otherwise CastError;
+}
+
+macro Cast<A: type>(o: HeapObject): A
labels CastError;
-CastHeapObject<HeapObject>(o: HeapObject): HeapObject
+Cast<HeapObject>(o: HeapObject): HeapObject
labels CastError {
return o;
}
-CastHeapObject<FixedArray>(o: HeapObject): FixedArray
+Cast<FixedArray>(o: HeapObject): FixedArray
labels CastError {
return HeapObjectToFixedArray(o) otherwise CastError;
}
-CastHeapObject<FixedDoubleArray>(o: HeapObject): FixedDoubleArray
+Cast<FixedDoubleArray>(o: HeapObject): FixedDoubleArray
labels CastError {
return HeapObjectToFixedDoubleArray(o) otherwise CastError;
}
-CastHeapObject<SloppyArgumentsElements>(o: HeapObject): SloppyArgumentsElements
+Cast<SloppyArgumentsElements>(o: HeapObject): SloppyArgumentsElements
labels CastError {
return HeapObjectToSloppyArgumentsElements(o) otherwise CastError;
}
-CastHeapObject<JSDataView>(o: HeapObject): JSDataView
+Cast<JSDataView>(o: HeapObject): JSDataView
labels CastError {
return HeapObjectToJSDataView(o) otherwise CastError;
}
-CastHeapObject<JSTypedArray>(o: HeapObject): JSTypedArray
+Cast<JSTypedArray>(o: HeapObject): JSTypedArray
labels CastError {
- if (IsJSTypedArray(o)) return %RawObjectCast<JSTypedArray>(o);
+ if (IsJSTypedArray(o)) return %RawDownCast<JSTypedArray>(o);
goto CastError;
}
-CastHeapObject<Callable>(o: HeapObject): Callable
+Cast<JSTypedArray>(implicit context: Context)(o: Object): JSTypedArray
+ labels CastError {
+ const heapObject = Cast<HeapObject>(o) otherwise CastError;
+ return Cast<JSTypedArray>(heapObject) otherwise CastError;
+}
+
+Cast<Callable>(o: HeapObject): Callable
+ labels CastError {
+ return HeapObjectToCallable(o) otherwise CastError;
+}
+
+Cast<Undefined | Callable>(o: HeapObject): Undefined | Callable
labels CastError {
+ if (o == Undefined) return Undefined;
return HeapObjectToCallable(o) otherwise CastError;
}
-CastHeapObject<JSArray>(o: HeapObject): JSArray
+Cast<JSArray>(o: HeapObject): JSArray
labels CastError {
return HeapObjectToJSArray(o) otherwise CastError;
}
-CastHeapObject<Context>(o: HeapObject): Context
+Cast<JSArrayBuffer>(o: HeapObject): JSArrayBuffer
labels CastError {
- if (IsContext(o)) return %RawObjectCast<Context>(o);
+ return HeapObjectToJSArrayBuffer(o) otherwise CastError;
+}
+
+Cast<Context>(o: HeapObject): Context
+ labels CastError {
+ if (IsContext(o)) return %RawDownCast<Context>(o);
goto CastError;
}
-CastHeapObject<JSObject>(o: HeapObject): JSObject
+Cast<JSObject>(o: HeapObject): JSObject
labels CastError {
- if (IsJSObject(o)) return %RawObjectCast<JSObject>(o);
+ if (IsJSObject(o)) return %RawDownCast<JSObject>(o);
goto CastError;
}
-CastHeapObject<NumberDictionary>(o: HeapObject): NumberDictionary
+Cast<NumberDictionary>(o: HeapObject): NumberDictionary
labels CastError {
- if (IsNumberDictionary(o)) return %RawObjectCast<NumberDictionary>(o);
+ if (IsNumberDictionary(o)) return %RawDownCast<NumberDictionary>(o);
goto CastError;
}
-CastHeapObject<FixedTypedArrayBase>(o: HeapObject): FixedTypedArrayBase
+Cast<FixedTypedArrayBase>(o: HeapObject): FixedTypedArrayBase
labels CastError {
- if (IsFixedTypedArray(o)) return %RawObjectCast<FixedTypedArrayBase>(o);
+ if (IsFixedTypedArray(o)) return %RawDownCast<FixedTypedArrayBase>(o);
goto CastError;
}
-CastHeapObject<String>(o: HeapObject): String
+Cast<String>(o: HeapObject): String
labels CastError {
return HeapObjectToString(o) otherwise CastError;
}
-CastHeapObject<Constructor>(o: HeapObject): Constructor
+Cast<DirectString>(o: HeapObject): DirectString
+ labels CastError {
+ return TaggedToDirectString(o) otherwise CastError;
+}
+
+Cast<Constructor>(o: HeapObject): Constructor
labels CastError {
return HeapObjectToConstructor(o) otherwise CastError;
}
-CastHeapObject<HeapNumber>(o: HeapObject): HeapNumber
+Cast<HeapNumber>(o: HeapObject): HeapNumber
labels CastError {
- if (IsHeapNumber(o)) return %RawObjectCast<HeapNumber>(o);
+ if (IsHeapNumber(o)) return %RawDownCast<HeapNumber>(o);
goto CastError;
}
-CastHeapObject<Map>(implicit context: Context)(o: HeapObject): Map
+Cast<Map>(implicit context: Context)(o: HeapObject): Map
labels CastError {
- if (IsMap(o)) return %RawObjectCast<Map>(o);
+ if (IsMap(o)) return %RawDownCast<Map>(o);
goto CastError;
}
-CastHeapObject<JSArgumentsObjectWithLength>(implicit context: Context)(
- o: HeapObject): JSArgumentsObjectWithLength
+Cast<JSArgumentsObjectWithLength>(implicit context: Context)(o: HeapObject):
+ JSArgumentsObjectWithLength
labels CastError {
const map: Map = o.map;
try {
@@ -736,12 +1075,11 @@ CastHeapObject<JSArgumentsObjectWithLength>(implicit context: Context)(
goto CastError;
}
label True {
- return %RawObjectCast<JSArgumentsObjectWithLength>(o);
+ return %RawDownCast<JSArgumentsObjectWithLength>(o);
}
}
-CastHeapObject<FastJSArray>(implicit context: Context)(o: HeapObject):
- FastJSArray
+Cast<FastJSArray>(implicit context: Context)(o: HeapObject): FastJSArray
labels CastError {
const map: Map = o.map;
if (!IsJSArrayMap(map)) goto CastError;
@@ -754,95 +1092,50 @@ CastHeapObject<FastJSArray>(implicit context: Context)(o: HeapObject):
if (!IsPrototypeInitialArrayPrototype(map)) goto CastError;
if (IsNoElementsProtectorCellInvalid()) goto CastError;
- return %RawObjectCast<FastJSArray>(o);
-}
-
-struct FastJSArrayWitness {
- array: HeapObject;
- map: Map;
-}
-
-macro MakeWitness(array: FastJSArray): FastJSArrayWitness {
- return FastJSArrayWitness{array, array.map};
-}
-
-macro Testify(witness: FastJSArrayWitness): FastJSArray labels CastError {
- if (witness.array.map != witness.map) goto CastError;
- // We don't need to check elements kind or whether the prototype
- // has changed away from the default JSArray prototype, because
- // if the map remains the same then those properties hold.
- //
- // However, we have to make sure there are no elements in the
- // prototype chain.
- if (IsNoElementsProtectorCellInvalid()) goto CastError;
- return %RawObjectCast<FastJSArray>(witness.array);
+ return %RawDownCast<FastJSArray>(o);
}
-CastHeapObject<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
+Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawObjectCast<FastJSArrayForCopy>(o);
+ return %RawDownCast<FastJSArrayForCopy>(o);
}
-CastHeapObject<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
+Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayWithNoCustomIteration
labels CastError {
if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawObjectCast<FastJSArrayWithNoCustomIteration>(o);
+ return %RawDownCast<FastJSArrayWithNoCustomIteration>(o);
}
-CastHeapObject<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
+Cast<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
labels CastError {
- if (IsJSReceiver(o)) return %RawObjectCast<JSReceiver>(o);
+ if (IsJSReceiver(o)) return %RawDownCast<JSReceiver>(o);
goto CastError;
}
-CastHeapObject<JSFunction>(implicit context: Context)(o: HeapObject): JSFunction
+Cast<JSFunction>(implicit context: Context)(o: HeapObject): JSFunction
labels CastError {
- if (IsJSFunction(o)) return %RawObjectCast<JSFunction>(o);
+ if (IsJSFunction(o)) return %RawDownCast<JSFunction>(o);
goto CastError;
}
-macro Cast<A: type>(implicit context: Context)(o: HeapObject): A
- labels CastError {
- return CastHeapObject<A>(o) otherwise CastError;
-}
-
-// CastHeapObject allows this default-implementation to be non-recursive.
-// Otherwise the generated CSA code might run into infinite recursion.
-macro Cast<A: type>(implicit context: Context)(o: Object): A
- labels CastError {
- return CastHeapObject<A>(TaggedToHeapObject(o) otherwise CastError)
- otherwise CastError;
-}
-
-Cast<Smi>(o: Object): Smi
- labels CastError {
- return TaggedToSmi(o) otherwise CastError;
-}
-
-Cast<PositiveSmi>(o: Object): PositiveSmi
- labels CastError {
- return TaggedToPositiveSmi(o) otherwise CastError;
-}
-
-Cast<Number>(o: Object): Number
- labels CastError {
- return TaggedToNumber(o) otherwise CastError;
-}
-
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
extern macro ChangeInt32ToTagged(int32): Number;
extern macro ChangeUint32ToTagged(uint32): Number;
extern macro ChangeUintPtrToFloat64(uintptr): float64;
extern macro ChangeUintPtrToTagged(uintptr): Number;
extern macro Unsigned(int32): uint32;
+extern macro Unsigned(int16): uint16;
+extern macro Unsigned(int8): uint8;
extern macro Unsigned(intptr): uintptr;
extern macro Unsigned(RawPtr): uintptr;
extern macro Signed(uint32): int32;
+extern macro Signed(uint16): int16;
+extern macro Signed(uint8): int8;
extern macro Signed(uintptr): intptr;
extern macro Signed(RawPtr): intptr;
extern macro TruncateIntPtrToInt32(intptr): int32;
@@ -861,6 +1154,11 @@ extern macro LoadNativeContext(Context): NativeContext;
extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
+extern macro TryNumberToUintPtr(Number): uintptr labels IfNegative;
+macro TryUintPtrToPositiveSmi(ui: uintptr): PositiveSmi labels IfOverflow {
+ if (ui > kSmiMaxValue) goto IfOverflow;
+ return %RawDownCast<PositiveSmi>(SmiTag(Signed(ui)));
+}
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
@@ -870,6 +1168,7 @@ extern macro IntPtrConstant(constexpr int32): intptr;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
extern macro Float64Constant(constexpr int31): float64;
+extern macro Float64Constant(constexpr float64): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro SmiConstant(constexpr Smi): Smi;
extern macro BoolConstant(constexpr bool): bool;
@@ -911,6 +1210,10 @@ FromConstexpr<uintptr, constexpr uintptr>(i: constexpr uintptr): uintptr {
FromConstexpr<Smi, constexpr int31>(i: constexpr int31): Smi {
return %FromConstexpr<Smi>(i);
}
+FromConstexpr<PositiveSmi, constexpr int31>(i: constexpr int31): PositiveSmi {
+ assert(i >= 0);
+ return %FromConstexpr<PositiveSmi>(i);
+}
FromConstexpr<String, constexpr string>(s: constexpr string): String {
return %FromConstexpr<String>(s);
}
@@ -941,12 +1244,15 @@ FromConstexpr<uintptr, constexpr int31>(i: constexpr int31): uintptr {
FromConstexpr<float64, constexpr int31>(i: constexpr int31): float64 {
return Float64Constant(i);
}
+FromConstexpr<float64, constexpr float64>(i: constexpr float64): float64 {
+ return Float64Constant(i);
+}
FromConstexpr<bool, constexpr bool>(b: constexpr bool): bool {
return BoolConstant(b);
}
FromConstexpr<LanguageMode, constexpr LanguageMode>(m: constexpr LanguageMode):
LanguageMode {
- return %RawObjectCast<LanguageMode>(%FromConstexpr<Smi>(m));
+ return %RawDownCast<LanguageMode>(%FromConstexpr<Smi>(m));
}
FromConstexpr<ElementsKind, constexpr ElementsKind>(e: constexpr ElementsKind):
ElementsKind {
@@ -986,6 +1292,15 @@ Convert<Smi, uint32>(ui: uint32): Smi {
Convert<uintptr, uint32>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
+Convert<int32, uint8>(i: uint8): int32 {
+ return Signed(Convert<uint32>(i));
+}
+Convert<int32, uint16>(i: uint16): int32 {
+ return Signed(Convert<uint32>(i));
+}
+Convert<int32, uint31>(i: uint31): int32 {
+ return Signed(Convert<uint32>(i));
+}
Convert<int32, intptr>(i: intptr): int32 {
return TruncateIntPtrToInt32(i);
}
@@ -998,6 +1313,18 @@ Convert<uint32, uintptr>(ui: uintptr): uint32 {
Convert<intptr, Smi>(s: Smi): intptr {
return SmiUntag(s);
}
+Convert<uintptr, PositiveSmi>(ps: PositiveSmi): uintptr {
+ return Unsigned(SmiUntag(ps));
+}
+Convert<intptr, uintptr>(ui: uintptr): intptr {
+ const i = Signed(ui);
+ assert(i >= 0);
+ return i;
+}
+Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi {
+ assert(IsValidPositiveSmi(i));
+ return %RawDownCast<PositiveSmi>(SmiTag(i));
+}
Convert<int32, Smi>(s: Smi): int32 {
return SmiToInt32(s);
}
@@ -1064,16 +1391,16 @@ macro Is<A: type, B: type>(implicit context: Context)(o: B): bool {
macro UnsafeCast<A: type>(implicit context: Context)(o: Object): A {
assert(Is<A>(o));
- return %RawObjectCast<A>(o);
+ return %RawDownCast<A>(o);
}
UnsafeCast<Object>(o: Object): Object {
return o;
}
-const kCOWMap: Map = %RawObjectCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
-const kEmptyFixedArray: FixedArrayBase =
- %RawObjectCast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
+const kCOWMap: Map = %RawDownCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
+const kEmptyFixedArray: FixedArray =
+ %RawDownCast<FixedArray>(LoadRoot(kEmptyFixedArrayRootIndex));
extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
bool;
@@ -1082,9 +1409,9 @@ extern macro IsArrayIteratorProtectorCellInvalid(): bool;
extern macro IsArraySpeciesProtectorCellInvalid(): bool;
extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
-
-extern operator '.buffer' macro
-TypedArrayBuiltinsAssembler::LoadTypedArrayBuffer(JSTypedArray): JSArrayBuffer;
+extern macro IsMockArrayBufferAllocatorFlag(): bool;
+extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map):
+ bool;
extern operator '.data_ptr' macro TypedArrayBuiltinsAssembler::LoadDataPtr(
JSTypedArray): RawPtr;
@@ -1093,49 +1420,55 @@ extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
ElementsKind;
-extern operator '.length' macro LoadJSTypedArrayLength(JSTypedArray): Smi;
extern operator '.length' macro LoadFastJSArrayLength(FastJSArray): Smi;
-extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
-extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
- FixedArrayBase): intptr;
-extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
-extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
-extern operator '[]' macro LoadFixedArrayElement(
+extern operator '.objects[]' macro LoadFixedArrayElement(
+ FixedArray, intptr): Object;
+extern operator '.objects[]' macro LoadFixedArrayElement(
+ FixedArray, Smi): Object;
+extern operator '.objects[]' macro LoadFixedArrayElement(
FixedArray, constexpr int31): Object;
-extern operator '[]=' macro StoreFixedArrayElement(
+extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, intptr, Smi): void;
-extern operator '[]=' macro StoreFixedArrayElement(
+extern operator '.objects[]=' macro StoreFixedArrayElement(
+ FixedArray, Smi, Smi): void;
+extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, intptr, HeapObject): void;
-extern operator '[]=' macro StoreFixedArrayElement(
+extern operator '.objects[]=' macro StoreFixedArrayElement(
+ FixedArray, intptr, Object): void;
+extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Smi): void;
-extern operator '[]=' macro StoreFixedArrayElement(
+extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, HeapObject): void;
-extern operator '[]=' macro StoreFixedArrayElementSmi(
+extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object): void;
-operator '[]=' macro StoreFixedDoubleArrayNumber(
- a: FixedDoubleArray, index: Smi, value: Number): void {
- a[index] = Convert<float64>(value);
-}
-
-extern macro StoreFixedArrayElementSmi(
+extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object, constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElement(
+ FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
+extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
+ FixedDoubleArray, intptr, float64): void;
+extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
+ FixedDoubleArray, Smi, float64): void;
+operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
+ a: FixedDoubleArray, i: Smi, n: Number): void {
+ StoreFixedDoubleArrayElementSmi(a, i, Convert<float64>(n));
+}
+operator '[]=' macro StoreFixedDoubleArrayDirect(
+ a: FixedDoubleArray, i: Smi, v: Number) {
+ a.floats[i] = Convert<float64>(v);
+}
+operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
+ a.objects[i] = v;
+}
extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
-extern macro LoadFixedDoubleArrayElement(FixedDoubleArray, Smi): float64;
extern macro Float64SilenceNaN(float64): float64;
-extern macro StoreFixedDoubleArrayElement(
- FixedDoubleArray, Object, float64, constexpr ParameterMode);
-extern macro StoreFixedArrayElement(
- FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
-
-macro StoreFixedDoubleArrayElementWithSmiIndex(
- array: FixedDoubleArray, index: Smi, value: float64) {
- StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
-}
extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi;
+extern macro GetIteratorMethod(implicit context: Context)(HeapObject): Object
+ labels IfIteratorUndefined;
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object
labels NotData, IfHole;
@@ -1148,6 +1481,16 @@ extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool;
extern macro IsFastSmiElementsKind(ElementsKind): bool;
extern macro IsHoleyFastElementsKind(ElementsKind): bool;
+macro FastHoleyElementsKind(kind: ElementsKind): ElementsKind {
+ if (kind == PACKED_SMI_ELEMENTS) {
+ return HOLEY_SMI_ELEMENTS;
+ } else if (kind == PACKED_DOUBLE_ELEMENTS) {
+ return HOLEY_DOUBLE_ELEMENTS;
+ }
+ assert(kind == PACKED_ELEMENTS);
+ return HOLEY_ELEMENTS;
+}
+
macro AllowDoubleElements(kind: ElementsKind): ElementsKind {
if (kind == PACKED_SMI_ELEMENTS) {
return PACKED_DOUBLE_ELEMENTS;
@@ -1177,6 +1520,8 @@ extern macro CalculateNewElementsCapacity(intptr): intptr;
extern macro AllocateFixedArrayWithHoles(
intptr, constexpr AllocationFlags): FixedArray;
+extern macro AllocateFixedDoubleArrayWithHoles(
+ intptr, constexpr AllocationFlags): FixedDoubleArray;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
intptr, intptr, intptr): void;
@@ -1186,12 +1531,9 @@ extern macro CopyFixedArrayElements(
extern macro AllocateJSArray(constexpr ElementsKind, Map, intptr, Smi): JSArray;
extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
-
+extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray;
extern macro AllocateJSObjectFromMap(Map): JSObject;
-extern operator '[]=' macro StoreFixedDoubleArrayElementSmi(
- FixedDoubleArray, Smi, float64): void;
-
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, intptr): float64
@@ -1202,10 +1544,14 @@ macro GetObjectFunction(implicit context: Context)(): JSFunction {
return UnsafeCast<JSFunction>(
LoadNativeContext(context)[OBJECT_FUNCTION_INDEX]);
}
-macro GetArrayBufferFunction(implicit context: Context)(): JSFunction {
- return UnsafeCast<JSFunction>(
+macro GetArrayBufferFunction(implicit context: Context)(): Constructor {
+ return UnsafeCast<Constructor>(
LoadNativeContext(context)[ARRAY_BUFFER_FUN_INDEX]);
}
+macro GetArrayBufferNoInitFunction(implicit context: Context)(): JSFunction {
+ return UnsafeCast<JSFunction>(
+ LoadNativeContext(context)[ARRAY_BUFFER_NOINIT_FUN_INDEX]);
+}
macro GetFastPackedSmiElementsJSArrayMap(implicit context: Context)(): Map {
return UnsafeCast<Map>(
@@ -1274,7 +1620,7 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
try {
let elements: FixedArray =
Cast<FixedArray>(a.elements) otherwise Unexpected;
- let e: Object = elements[index];
+ let e: Object = elements.objects[index];
if (e == Hole) {
goto IfHole;
}
@@ -1299,10 +1645,91 @@ LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
}
}
+struct FastJSArrayWitness {
+ Get(): FastJSArray {
+ return this.unstable;
+ }
+
+ Recheck() labels CastError {
+ if (this.stable.map != this.map) goto CastError;
+ // We don't need to check elements kind or whether the prototype
+ // has changed away from the default JSArray prototype, because
+ // if the map remains the same then those properties hold.
+ //
+ // However, we have to make sure there are no elements in the
+ // prototype chain.
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ this.unstable = %RawDownCast<FastJSArray>(this.stable);
+ }
+
+ LoadElementNoHole(implicit context: Context)(k: Smi): Object
+ labels FoundHole {
+ if (this.hasDoubles) {
+ return LoadElementNoHole<FixedDoubleArray>(this.unstable, k)
+ otherwise FoundHole;
+ } else {
+ return LoadElementNoHole<FixedArray>(this.unstable, k)
+ otherwise FoundHole;
+ }
+ }
+
+ LoadElementOrUndefined(implicit context: Context)(k: Smi): Object {
+ try {
+ return this.LoadElementNoHole(k) otherwise FoundHole;
+ }
+ label FoundHole {
+ return Undefined;
+ }
+ }
+
+ EnsureArrayPushable() labels Failed {
+ EnsureArrayPushable(this.map) otherwise Failed;
+ this.arrayIsPushable = true;
+ }
+
+ Push(value: Object) labels Failed {
+ assert(this.arrayIsPushable);
+ if (this.hasDoubles) {
+ BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, this.unstable, value)
+ otherwise Failed;
+ } else if (this.hasSmis) {
+ BuildAppendJSArray(HOLEY_SMI_ELEMENTS, this.unstable, value)
+ otherwise Failed;
+ } else {
+ assert(
+ this.map.elements_kind == HOLEY_ELEMENTS ||
+ this.map.elements_kind == PACKED_ELEMENTS);
+ BuildAppendJSArray(HOLEY_ELEMENTS, this.unstable, value)
+ otherwise Failed;
+ }
+ }
+
+ stable: JSArray;
+ unstable: FastJSArray;
+ map: Map;
+ hasDoubles: bool;
+ hasSmis: bool;
+ arrayIsPushable: bool;
+}
+
+macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness {
+ let kind = array.map.elements_kind;
+ return FastJSArrayWitness{
+ array,
+ array,
+ array.map,
+ !IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS),
+ IsElementsKindLessThanOrEqual(kind, HOLEY_SMI_ELEMENTS),
+ false
+ };
+}
+
extern macro TransitionElementsKind(
- JSObject, Map, ElementsKind, ElementsKind): void labels Bailout;
+ JSObject, Map, constexpr ElementsKind,
+ constexpr ElementsKind): void labels Bailout;
extern macro IsCallable(HeapObject): bool;
+extern macro IsConstructor(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
extern macro IsMap(HeapObject): bool;
extern macro IsJSFunction(HeapObject): bool;
@@ -1384,13 +1811,15 @@ transitioning macro GetLengthProperty(implicit context: Context)(o: Object):
}
extern macro NumberToString(Number): String;
-extern macro HasOnlyOneByteChars(InstanceType): bool;
+extern macro IsOneByteStringInstanceType(InstanceType): bool;
extern macro AllocateSeqOneByteString(implicit context: Context)(uint32):
String;
extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32):
String;
extern macro TryIntPtrAdd(intptr, intptr): intptr
labels IfOverflow;
+extern macro ConvertToRelativeIndex(implicit context: Context)(
+ Object, intptr): intptr;
extern builtin ObjectToString(Context, Object): Object;
extern builtin StringRepeat(Context, String, Number): String;
@@ -1439,3 +1868,51 @@ macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never
macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool {
return Is<FastJSArrayWithNoCustomIteration>(o);
}
+
+extern transitioning runtime
+CreateDataProperty(implicit context: Context)(JSReceiver, Object, Object);
+
+transitioning builtin FastCreateDataProperty(implicit context: Context)(
+ receiver: JSReceiver, key: Object, value: Object): Object {
+ try {
+ let array = Cast<FastJSArray>(receiver) otherwise Slow;
+ const index: Smi = Cast<Smi>(key) otherwise goto Slow;
+ if (index < 0 || index > array.length) goto Slow;
+ array::EnsureWriteableFastElements(array);
+ const isAppend = index == array.length;
+ const kind = array.map.elements_kind;
+ // We may have to transition a.
+ // For now, if transition is required, jump away to slow.
+ if (IsFastSmiElementsKind(kind)) {
+ const smiValue = Cast<Smi>(value) otherwise Slow;
+ if (isAppend) {
+ BuildAppendJSArray(HOLEY_SMI_ELEMENTS, array, value) otherwise Slow;
+ } else {
+ const elements = Cast<FixedArray>(array.elements) otherwise unreachable;
+ elements[index] = smiValue;
+ }
+ } else if (IsDoubleElementsKind(kind)) {
+ const numberValue = Cast<Number>(value) otherwise Slow;
+ if (isAppend) {
+ BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, array, value)
+ otherwise Slow;
+ } else {
+ const doubleElements = Cast<FixedDoubleArray>(array.elements)
+ otherwise unreachable;
+ doubleElements[index] = numberValue;
+ }
+ } else {
+ assert(IsFastSmiOrTaggedElementsKind(kind));
+ if (isAppend) {
+ BuildAppendJSArray(HOLEY_ELEMENTS, array, value) otherwise Slow;
+ } else {
+ const elements = Cast<FixedArray>(array.elements) otherwise unreachable;
+ elements[index] = value;
+ }
+ }
+ }
+ label Slow {
+ CreateDataProperty(receiver, key, value);
+ }
+ return Undefined;
+}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 21831e9f46..2d25cdc32a 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -101,7 +101,7 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
[this, elements, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset.value(), arg);
- Increment(&offset, kSystemPointerSize);
+ Increment(&offset, kTaggedSize);
},
first_arg, nullptr, param_mode);
return result;
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index db58ecf152..7dfe705811 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -12,8 +12,10 @@
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
#include "src/heap/factory-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/property-cell.h"
+#include "torque-generated/builtins-typed-array-createtypedarray-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -59,16 +61,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
return a();
}
- void ArrayBuiltinsAssembler::ForEachResultGenerator() {
- a_.Bind(UndefinedConstant());
- }
-
- Node* ArrayBuiltinsAssembler::ForEachProcessor(Node* k_value, Node* k) {
- CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
- k_value, k, o());
- return a();
- }
-
void ArrayBuiltinsAssembler::SomeResultGenerator() {
a_.Bind(FalseConstant());
}
@@ -99,46 +91,15 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
return a();
}
- void ArrayBuiltinsAssembler::ReduceResultGenerator() {
- return a_.Bind(this_arg());
- }
-
- Node* ArrayBuiltinsAssembler::ReduceProcessor(Node* k_value, Node* k) {
- VARIABLE(result, MachineRepresentation::kTagged);
- Label done(this, {&result}), initial(this);
- GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
- result.Bind(CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
- UndefinedConstant(), a(), k_value, k, o()));
- Goto(&done);
-
- BIND(&initial);
- result.Bind(k_value);
- Goto(&done);
-
- BIND(&done);
- return result.value();
- }
-
- void ArrayBuiltinsAssembler::ReducePostLoopAction() {
- Label ok(this);
- GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
- ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
- BIND(&ok);
- }
-
- void ArrayBuiltinsAssembler::MapResultGenerator() {
- GenerateArraySpeciesCreate(len_);
- }
-
void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
TNode<JSTypedArray> original_array = CAST(o());
TNode<Smi> length = CAST(len_);
const char* method_name = "%TypedArray%.prototype.map";
- TypedArrayBuiltinsAssembler typedarray_asm(state());
+ TypedArrayCreatetypedarrayBuiltinsFromDSLAssembler typedarray_asm(state());
TNode<JSTypedArray> a = typedarray_asm.TypedArraySpeciesCreateByLength(
- context(), original_array, length, method_name);
+ context(), method_name, original_array, length);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadJSTypedArrayLength(a)));
@@ -148,120 +109,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
a_.Bind(a);
}
- Node* ArrayBuiltinsAssembler::SpecCompliantMapProcessor(Node* k_value,
- Node* k) {
- // i. Let kValue be ? Get(O, Pk). Performed by the caller of
- // SpecCompliantMapProcessor.
- // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
- Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
-
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mapped_value);
- return a();
- }
-
- Node* ArrayBuiltinsAssembler::FastMapProcessor(Node* k_value, Node* k) {
- // i. Let kValue be ? Get(O, Pk). Performed by the caller of
- // FastMapProcessor.
- // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
- Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
-
- // mode is SMI_PARAMETERS because k has tagged representation.
- ParameterMode mode = SMI_PARAMETERS;
- Label runtime(this), finished(this);
- Label transition_pre(this), transition_smi_fast(this),
- transition_smi_double(this);
- Label array_not_smi(this), array_fast(this), array_double(this);
-
- TNode<Int32T> kind = LoadElementsKind(a());
- Node* elements = LoadElements(a());
- GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
- TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
- mapped_value);
- Goto(&finished);
-
- BIND(&transition_pre);
- {
- // array is smi. Value is either tagged or a heap number.
- CSA_ASSERT(this, TaggedIsNotSmi(mapped_value));
- GotoIf(IsHeapNumberMap(LoadMap(mapped_value)), &transition_smi_double);
- Goto(&transition_smi_fast);
- }
-
- BIND(&array_not_smi);
- {
- Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &array_double,
- &array_fast);
- }
-
- BIND(&transition_smi_fast);
- {
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
- Node* const native_context = LoadNativeContext(context());
- Node* const fast_map = LoadContextElement(
- native_context, Context::JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX);
-
- // Since this transition is only a map change, just do it right here.
- // Since a() doesn't have an allocation site, it's safe to do the
- // map store directly, otherwise I'd call TransitionElementsKind().
- StoreMap(a(), fast_map);
- Goto(&array_fast);
- }
-
- BIND(&array_fast);
- {
- TryStoreArrayElement(HOLEY_ELEMENTS, mode, &runtime, elements, k,
- mapped_value);
- Goto(&finished);
- }
-
- BIND(&transition_smi_double);
- {
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
- Node* const native_context = LoadNativeContext(context());
- Node* const double_map = LoadContextElement(
- native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
-
- const ElementsKind kFromKind = HOLEY_SMI_ELEMENTS;
- const ElementsKind kToKind = HOLEY_DOUBLE_ELEMENTS;
-
- Label transition_in_runtime(this, Label::kDeferred);
- TransitionElementsKind(a(), double_map, kFromKind, kToKind,
- &transition_in_runtime);
- Goto(&array_double);
-
- BIND(&transition_in_runtime);
- CallRuntime(Runtime::kTransitionElementsKind, context(), a(), double_map);
- Goto(&array_double);
- }
-
- BIND(&array_double);
- {
- // TODO(mvstanton): If we use a variable for elements and bind it
- // appropriately, we can avoid an extra load of elements by binding the
- // value only after a transition from smi to double.
- elements = LoadElements(a());
- // If the mapped_value isn't a number, this will bail out to the runtime
- // to make the transition.
- TryStoreArrayElement(HOLEY_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
- mapped_value);
- Goto(&finished);
- }
-
- BIND(&runtime);
- {
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k,
- mapped_value);
- Goto(&finished);
- }
-
- BIND(&finished);
- return a();
- }
-
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
// 8. c. Let mapped_value be ? Call(callbackfn, T, Ā« kValue, k, O Ā»).
@@ -350,97 +197,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
argc_ = argc;
}
- void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinBody(
- const char* name, const BuiltinResultGenerator& generator,
- const CallResultProcessor& processor, const PostLoopAction& action,
- const Callable& slow_case_continuation,
- MissingPropertyMode missing_property_mode, ForEachDirection direction) {
- Label non_array(this), array_changes(this, {&k_, &a_, &to_});
-
- // TODO(danno): Seriously? Do we really need to throw the exact error
- // message on null and undefined so that the webkit tests pass?
- Label throw_null_undefined_exception(this, Label::kDeferred);
- GotoIf(IsNullOrUndefined(receiver()), &throw_null_undefined_exception);
-
- // By the book: taken directly from the ECMAScript 2015 specification
-
- // 1. Let O be ToObject(this value).
- // 2. ReturnIfAbrupt(O)
- o_ = ToObject_Inline(context(), receiver());
-
- // 3. Let len be ToLength(Get(O, "length")).
- // 4. ReturnIfAbrupt(len).
- TVARIABLE(Number, merged_length);
- Label has_length(this, &merged_length), not_js_array(this);
- GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), &not_js_array);
- merged_length = LoadJSArrayLength(CAST(o()));
- Goto(&has_length);
-
- BIND(&not_js_array);
- {
- Node* len_property =
- GetProperty(context(), o(), isolate()->factory()->length_string());
- merged_length = ToLength_Inline(context(), len_property);
- Goto(&has_length);
- }
- BIND(&has_length);
- {
- len_ = merged_length.value();
-
- // 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
- Label type_exception(this, Label::kDeferred);
- Label done(this);
- GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
- Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
-
- BIND(&throw_null_undefined_exception);
- ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined,
- name);
-
- BIND(&type_exception);
- ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
- callbackfn());
-
- BIND(&done);
- }
-
- // 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
- // [Already done by the arguments adapter]
-
- if (direction == ForEachDirection::kForward) {
- // 7. Let k be 0.
- k_.Bind(SmiConstant(0));
- } else {
- k_.Bind(NumberDec(len()));
- }
-
- generator(this);
-
- HandleFastElements(processor, action, &fully_spec_compliant_, direction,
- missing_property_mode);
-
- BIND(&fully_spec_compliant_);
-
- Node* result =
- CallStub(slow_case_continuation, context(), receiver(), callbackfn(),
- this_arg(), a_.value(), o(), k_.value(), len(), to_.value());
- ReturnFromBuiltin(result);
- }
-
- void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
- TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
- TNode<Number> len, Node* to) {
- context_ = context;
- this_arg_ = this_arg;
- callbackfn_ = callbackfn;
- a_.Bind(a);
- k_.Bind(initial_k);
- o_ = o;
- len_ = len;
- to_.Bind(to);
- }
-
void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
@@ -485,9 +241,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TYPED_ARRAYS(INSTANCE_TYPE)
#undef INSTANCE_TYPE
};
- std::vector<Label> labels;
+ std::list<Label> labels;
for (size_t i = 0; i < instance_types.size(); ++i) {
- labels.push_back(Label(this));
+ labels.emplace_back(this);
}
std::vector<Label*> label_ptrs;
for (Label& label : labels) {
@@ -508,8 +264,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
- for (size_t i = 0; i < labels.size(); ++i) {
- BIND(&labels[i]);
+ size_t i = 0;
+ for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
+ BIND(&*it);
Label done(this);
source_elements_kind_ = ElementsKindForInstanceType(
static_cast<InstanceType>(instance_types[i]));
@@ -526,65 +283,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
}
}
- void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinLoopContinuation(
- const CallResultProcessor& processor, const PostLoopAction& action,
- MissingPropertyMode missing_property_mode, ForEachDirection direction) {
- Label loop(this, {&k_, &a_, &to_});
- Label after_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- if (direction == ForEachDirection::kForward) {
- // 8. Repeat, while k < len
- GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
- } else {
- // OR
- // 10. Repeat, while k >= 0
- GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
- }
-
- Label done_element(this, &to_);
- // a. Let Pk be ToString(k).
- // k() is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
- CSA_ASSERT(this, IsSafeInteger(k()));
-
- if (missing_property_mode == MissingPropertyMode::kSkip) {
- // b. Let kPresent be HasProperty(O, Pk).
- // c. ReturnIfAbrupt(kPresent).
- TNode<Oddball> k_present =
- HasProperty(context(), o(), k(), kHasProperty);
-
- // d. If kPresent is true, then
- GotoIf(IsFalse(k_present), &done_element);
- }
-
- // i. Let kValue be Get(O, Pk).
- // ii. ReturnIfAbrupt(kValue).
- Node* k_value = GetProperty(context(), o(), k());
-
- // iii. Let funcResult be Call(callbackfn, T, Ā«kValue, k, OĀ»).
- // iv. ReturnIfAbrupt(funcResult).
- a_.Bind(processor(this, k_value, k()));
- Goto(&done_element);
-
- BIND(&done_element);
-
- if (direction == ForEachDirection::kForward) {
- // e. Increase k by 1.
- k_.Bind(NumberInc(k()));
- } else {
- // e. Decrease k by 1.
- k_.Bind(NumberDec(k()));
- }
- Goto(&loop);
- }
- BIND(&after_loop);
-
- action(this);
- Return(a_.value());
- }
-
ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
InstanceType type) {
switch (type) {
@@ -632,133 +330,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
advance_mode);
}
- void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
- ElementsKind kind, const CallResultProcessor& processor,
- Label* array_changed, ParameterMode mode, ForEachDirection direction,
- MissingPropertyMode missing_property_mode, TNode<Smi> length) {
- Comment("begin VisitAllFastElementsOneKind");
- // We only use this kind of processing if the no-elements protector is
- // in place at the start. We'll continue checking during array iteration.
- CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
- VARIABLE(original_map, MachineRepresentation::kTagged);
- original_map.Bind(LoadMap(o()));
- VariableList list({&original_map, &a_, &k_, &to_}, zone());
- Node* start = IntPtrOrSmiConstant(0, mode);
- Node* end = TaggedToParameter(length, mode);
- IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
- ? IndexAdvanceMode::kPre
- : IndexAdvanceMode::kPost;
- if (direction == ForEachDirection::kReverse) std::swap(start, end);
- BuildFastLoop(
- list, start, end,
- [=, &original_map](Node* index) {
- k_.Bind(ParameterToTagged(index, mode));
- Label one_element_done(this), hole_element(this),
- process_element(this);
-
- // Check if o's map has changed during the callback. If so, we have to
- // fall back to the slower spec implementation for the rest of the
- // iteration.
- Node* o_map = LoadMap(o());
- GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
-
- TNode<JSArray> o_array = CAST(o());
- // Check if o's length has changed during the callback and if the
- // index is now out of range of the new length.
- GotoIf(SmiGreaterThanOrEqual(CAST(k_.value()),
- CAST(LoadJSArrayLength(o_array))),
- array_changed);
-
- // Re-load the elements array. If may have been resized.
- Node* elements = LoadElements(o_array);
-
- // Fast case: load the element directly from the elements FixedArray
- // and call the callback if the element is not the hole.
- DCHECK(kind == PACKED_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS);
- int base_size = kind == PACKED_ELEMENTS
- ? FixedArray::kHeaderSize
- : (FixedArray::kHeaderSize - kHeapObjectTag);
- Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
- VARIABLE(value, MachineRepresentation::kTagged);
- if (kind == PACKED_ELEMENTS) {
- value.Bind(LoadObjectField(elements, offset));
- GotoIf(WordEqual(value.value(), TheHoleConstant()), &hole_element);
- } else {
- Node* double_value =
- LoadDoubleWithHoleCheck(elements, offset, &hole_element);
- value.Bind(AllocateHeapNumberWithValue(double_value));
- }
- Goto(&process_element);
-
- BIND(&hole_element);
- if (missing_property_mode == MissingPropertyMode::kSkip) {
- // The NoElementsProtectorCell could go invalid during callbacks.
- Branch(IsNoElementsProtectorCellInvalid(), array_changed,
- &one_element_done);
- } else {
- value.Bind(UndefinedConstant());
- Goto(&process_element);
- }
- BIND(&process_element);
- {
- a_.Bind(processor(this, value.value(), k()));
- Goto(&one_element_done);
- }
- BIND(&one_element_done);
- },
- 1, mode, advance_mode);
- Comment("end VisitAllFastElementsOneKind");
- }
-
- void ArrayBuiltinsAssembler::HandleFastElements(
- const CallResultProcessor& processor, const PostLoopAction& action,
- Label* slow, ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
- Label switch_on_elements_kind(this), fast_elements(this),
- maybe_double_elements(this), fast_double_elements(this);
-
- Comment("begin HandleFastElements");
- // Non-smi lengths must use the slow path.
- GotoIf(TaggedIsNotSmi(len()), slow);
-
- BranchIfFastJSArray(o(), context(),
- &switch_on_elements_kind, slow);
-
- BIND(&switch_on_elements_kind);
- TNode<Smi> smi_len = CAST(len());
- // Select by ElementsKind
- Node* kind = LoadElementsKind(o());
- Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
- &maybe_double_elements, &fast_elements);
-
- ParameterMode mode = OptimalParameterMode();
- BIND(&fast_elements);
- {
- VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode, smi_len);
-
- action(this);
-
- // No exception, return success
- ReturnFromBuiltin(a_.value());
- }
-
- BIND(&maybe_double_elements);
- Branch(IsElementsKindGreaterThan(kind, HOLEY_DOUBLE_ELEMENTS), slow,
- &fast_double_elements);
-
- BIND(&fast_double_elements);
- {
- VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode, smi_len);
-
- action(this);
-
- // No exception, return success
- ReturnFromBuiltin(a_.value());
- }
- }
-
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
Label runtime(this, Label::kDeferred), done(this);
@@ -1201,199 +772,6 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
HoleConversionMode::kConvertToUndefined));
}
-TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::FindProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
-}
-
-// Continuation that is called after an eager deoptimization from TF (ex. the
-// array changes during iteration).
-TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
- callbackfn, this_arg, UndefinedConstant(), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-// Continuation that is called after a lazy deoptimization from TF (ex. the
-// callback function is no longer callable).
-TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
- callbackfn, this_arg, UndefinedConstant(), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-// Continuation that is called after a lazy deoptimization from TF that happens
-// right after the callback and it's returned value must be handled before
-// iteration continues.
-TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* found_value = Parameter(Descriptor::kFoundValue);
- Node* is_found = Parameter(Descriptor::kIsFound);
-
- // This custom lazy deopt point is right after the callback. find() needs
- // to pick up at the next step, which is returning the element if the callback
- // value is truthy. Otherwise, continue the search by calling the
- // continuation.
- Label if_true(this), if_false(this);
- BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
- BIND(&if_true);
- Return(found_value);
- BIND(&if_false);
- Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
- callbackfn, this_arg, UndefinedConstant(), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.find", &ArrayBuiltinsAssembler::FindResultGenerator,
- &ArrayBuiltinsAssembler::FindProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
- MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
-}
-
-TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::FindIndexProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
-}
-
-TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
- receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
- receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* found_value = Parameter(Descriptor::kFoundValue);
- Node* is_found = Parameter(Descriptor::kIsFound);
-
- // This custom lazy deopt point is right after the callback. find() needs
- // to pick up at the next step, which is returning the element if the callback
- // value is truthy. Otherwise, continue the search by calling the
- // continuation.
- Label if_true(this), if_false(this);
- BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
- BIND(&if_true);
- Return(found_value);
- BIND(&if_false);
- Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
- receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.findIndex",
- &ArrayBuiltinsAssembler::FindIndexResultGenerator,
- &ArrayBuiltinsAssembler::FindIndexProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(),
- Builtins::kArrayFindIndexLoopContinuation),
- MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
-}
-
class ArrayPopulatorAssembler : public CodeStubAssembler {
public:
explicit ArrayPopulatorAssembler(compiler::CodeAssemblerState* state)
@@ -1707,104 +1085,6 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
&ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingTypedArrayBuiltinBody(
- "%TypedArray%.prototype.forEach",
- &ArrayBuiltinsAssembler::ForEachResultGenerator,
- &ArrayBuiltinsAssembler::ForEachProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction);
-}
-
-TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* result = Parameter(Descriptor::kResult);
-
- // This custom lazy deopt point is right after the callback. every() needs
- // to pick up at the next step, which is either continuing to the next
- // array element or returning false if {result} is false.
- Label true_continue(this), false_continue(this);
-
- // iii. If selected is true, then...
- BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
- BIND(&true_continue);
- { Return(TrueConstant()); }
- BIND(&false_continue);
- {
- // Increment k.
- initial_k = NumberInc(initial_k);
-
- Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
- callbackfn, this_arg, FalseConstant(), receiver,
- initial_k, len, UndefinedConstant()));
- }
-}
-
-TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
- callbackfn, this_arg, FalseConstant(), receiver, initial_k,
- len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::SomeProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.some", &ArrayBuiltinsAssembler::SomeResultGenerator,
- &ArrayBuiltinsAssembler::SomeProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
- MissingPropertyMode::kSkip);
-}
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
@@ -1824,87 +1104,6 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
&ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* result = Parameter(Descriptor::kResult);
-
- // This custom lazy deopt point is right after the callback. every() needs
- // to pick up at the next step, which is either continuing to the next
- // array element or returning false if {result} is false.
- Label true_continue(this), false_continue(this);
-
- // iii. If selected is true, then...
- BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
- BIND(&true_continue);
- {
- // Increment k.
- initial_k = NumberInc(initial_k);
-
- Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
- callbackfn, this_arg, TrueConstant(), receiver,
- initial_k, len, UndefinedConstant()));
- }
- BIND(&false_continue);
- { Return(FalseConstant()); }
-}
-
-TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
- callbackfn, this_arg, TrueConstant(), receiver, initial_k,
- len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::EveryProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.every", &ArrayBuiltinsAssembler::EveryResultGenerator,
- &ArrayBuiltinsAssembler::EveryProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
- MissingPropertyMode::kSkip);
-}
-
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
@@ -1923,289 +1122,6 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
&ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* accumulator = Parameter(Descriptor::kAccumulator);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, accumulator, object,
- initial_k, len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction,
- MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- // Simulate starting the loop at 0, but ensuring that the accumulator is
- // the hole. The continuation stub will search for the initial non-hole
- // element, rightly throwing an exception if not found.
- Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
- callbackfn, UndefinedConstant(), TheHoleConstant(),
- receiver, SmiConstant(0), len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
- callbackfn, UndefinedConstant(), accumulator, receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* result = Parameter(Descriptor::kResult);
-
- Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
- callbackfn, UndefinedConstant(), result, receiver,
- initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.reduce", &ArrayBuiltinsAssembler::ReduceResultGenerator,
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
- MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- argc);
-
- GenerateIteratingTypedArrayBuiltinBody(
- "%TypedArray%.prototype.reduce",
- &ArrayBuiltinsAssembler::ReduceResultGenerator,
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction);
-}
-
-TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* accumulator = Parameter(Descriptor::kAccumulator);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, accumulator, object,
- initial_k, len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction, MissingPropertyMode::kSkip,
- ForEachDirection::kReverse);
-}
-
-TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
- ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- TNode<Smi> len = CAST(Parameter(Descriptor::kLength));
-
- // Simulate starting the loop at 0, but ensuring that the accumulator is
- // the hole. The continuation stub will search for the initial non-hole
- // element, rightly throwing an exception if not found.
- Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
- receiver, callbackfn, UndefinedConstant(),
- TheHoleConstant(), receiver, SmiSub(len, SmiConstant(1)),
- len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
- receiver, callbackfn, UndefinedConstant(), accumulator,
- receiver, initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* result = Parameter(Descriptor::kResult);
-
- Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
- receiver, callbackfn, UndefinedConstant(), result,
- receiver, initial_k, len, UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.reduceRight",
- &ArrayBuiltinsAssembler::ReduceResultGenerator,
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction,
- Builtins::CallableFor(isolate(),
- Builtins::kArrayReduceRightLoopContinuation),
- MissingPropertyMode::kSkip, ForEachDirection::kReverse);
-}
-
-TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- argc);
-
- GenerateIteratingTypedArrayBuiltinBody(
- "%TypedArray%.prototype.reduceRight",
- &ArrayBuiltinsAssembler::ReduceResultGenerator,
- &ArrayBuiltinsAssembler::ReduceProcessor,
- &ArrayBuiltinsAssembler::ReducePostLoopAction,
- ForEachDirection::kReverse);
-}
-
-TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
-
- Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
- callbackfn, this_arg, array, receiver, initial_k, len,
- UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* result = Parameter(Descriptor::kResult);
-
- // This custom lazy deopt point is right after the callback. map() needs
- // to pick up at the next step, which is setting the callback result in
- // the output array. After incrementing k, we can glide into the loop
- // continuation builtin.
-
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- CallRuntime(Runtime::kCreateDataProperty, context, array, initial_k, result);
- // Then we have to increment k before going on.
- initial_k = NumberInc(initial_k);
-
- Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
- callbackfn, this_arg, array, receiver, initial_k, len,
- UndefinedConstant()));
-}
-
-TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.map", &ArrayBuiltinsAssembler::MapResultGenerator,
- &ArrayBuiltinsAssembler::FastMapProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
- MissingPropertyMode::kSkip);
-}
-
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
@@ -2447,7 +1363,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
{
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
+ Node* element_k =
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(WordEqual(element_k, search_element), &return_found);
Increment(&index_var);
@@ -2459,7 +1376,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
+ Node* element_k =
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(IsUndefined(element_k), &return_found);
GotoIf(IsTheHole(element_k), &return_found);
@@ -2479,7 +1397,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
Node* element_k =
- LoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
GotoIfNot(TaggedIsSmi(element_k), &not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
&return_found, &continue_loop);
@@ -2501,7 +1419,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
Node* element_k =
- LoadFixedArrayElement(CAST(elements), index_var.value());
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
@@ -2524,7 +1442,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
BIND(&next_iteration);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
+ Node* element_k =
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIf(WordEqual(search_element_string, element_k), &return_found);
Node* element_k_type = LoadInstanceType(element_k);
@@ -2552,7 +1471,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
+ Node* element_k =
+ UnsafeLoadFixedArrayElement(CAST(elements), index_var.value());
Label continue_loop(this);
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
@@ -3620,39 +2540,39 @@ TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
// The ArrayNoArgumentConstructor builtin family.
GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS, DontOverride,
- DONT_OVERRIDE);
+ DONT_OVERRIDE)
GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
- DONT_OVERRIDE);
+ DONT_OVERRIDE)
GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS, DisableAllocationSites,
- DISABLE_ALLOCATION_SITES);
+ DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS, DisableAllocationSites,
- DISABLE_ALLOCATION_SITES);
+ DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(NoArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(NoArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
// The ArraySingleArgumentConstructor builtin family.
GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
- DontOverride, DONT_OVERRIDE);
+ DontOverride, DONT_OVERRIDE)
GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
- DONT_OVERRIDE);
+ DONT_OVERRIDE)
GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(SingleArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
GENERATE_ARRAY_CTOR(SingleArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
- DisableAllocationSites, DISABLE_ALLOCATION_SITES);
+ DisableAllocationSites, DISABLE_ALLOCATION_SITES)
#undef GENERATE_ARRAY_CTOR
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 86fc09f8b4..8aa12b8858 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -23,8 +23,6 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
typedef std::function<void(ArrayBuiltinsAssembler* masm)> PostLoopAction;
- enum class MissingPropertyMode { kSkip, kUseUndefined };
-
void FindResultGenerator();
Node* FindProcessor(Node* k_value, Node* k);
@@ -51,12 +49,6 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void ReducePostLoopAction();
- void FilterResultGenerator();
-
- Node* FilterProcessor(Node* k_value, Node* k);
-
- void MapResultGenerator();
-
void TypedArrayMapResultGenerator();
Node* SpecCompliantMapProcessor(Node* k_value, Node* k);
@@ -106,27 +98,11 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> receiver, Node* callbackfn,
Node* this_arg, TNode<IntPtrT> argc);
- void GenerateIteratingArrayBuiltinBody(
- const char* name, const BuiltinResultGenerator& generator,
- const CallResultProcessor& processor, const PostLoopAction& action,
- const Callable& slow_case_continuation,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward);
- void InitIteratingArrayBuiltinLoopContinuation(
- TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
- TNode<Number> len, Node* to);
-
void GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
ForEachDirection direction = ForEachDirection::kForward);
- void GenerateIteratingArrayBuiltinLoopContinuation(
- const CallResultProcessor& processor, const PostLoopAction& action,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward);
-
void TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context,
TNode<JSFunction> target, TNode<HeapObject> allocation_site_or_undefined,
@@ -167,18 +143,6 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
Label* detached, ForEachDirection direction,
TNode<JSTypedArray> typed_array);
- void VisitAllFastElementsOneKind(ElementsKind kind,
- const CallResultProcessor& processor,
- Label* array_changed, ParameterMode mode,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode,
- TNode<Smi> length);
-
- void HandleFastElements(const CallResultProcessor& processor,
- const PostLoopAction& action, Label* slow,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode);
-
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
// This version is specialized to create a zero length array
// of the elements kind of the input array.
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 9774f24fe0..8df340ece7 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -176,7 +176,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLengthProperty(
return Object::SetProperty(
isolate, receiver, isolate->factory()->length_string(),
- isolate->factory()->NewNumber(length), LanguageMode::kStrict);
+ isolate->factory()->NewNumber(length), StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError));
}
V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate,
@@ -190,9 +191,9 @@ V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate,
isolate->factory()->NewNumber(start));
// b. Perform ? Set(O, Pk, value, true).
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetPropertyOrElement(isolate, receiver, index, value,
- LanguageMode::kStrict));
+ RETURN_FAILURE_ON_EXCEPTION(isolate, Object::SetPropertyOrElement(
+ isolate, receiver, index, value,
+ Just(ShouldThrow::kThrowOnError)));
// c. Increase k by 1.
++start;
@@ -332,15 +333,15 @@ V8_WARN_UNUSED_RESULT Object GenericArrayPush(Isolate* isolate,
if (length <= static_cast<double>(JSArray::kMaxArrayIndex)) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetElement(isolate, receiver, length, element,
- LanguageMode::kStrict));
+ ShouldThrow::kThrowOnError));
} else {
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, isolate->factory()->NewNumber(length), &success);
// Must succeed since we always pass a valid key.
DCHECK(success);
- MAYBE_RETURN(Object::SetProperty(&it, element, LanguageMode::kStrict,
- StoreOrigin::kMaybeKeyed),
+ MAYBE_RETURN(Object::SetProperty(&it, element, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
ReadOnlyRoots(isolate).exception());
}
@@ -353,7 +354,8 @@ V8_WARN_UNUSED_RESULT Object GenericArrayPush(Isolate* isolate,
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetProperty(isolate, receiver,
isolate->factory()->length_string(),
- final_length, LanguageMode::kStrict));
+ final_length, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)));
// 8. Return len.
return *final_length;
@@ -406,9 +408,11 @@ V8_WARN_UNUSED_RESULT Object GenericArrayPop(Isolate* isolate,
if (length == 0) {
// a. Perform ? Set(O, "length", 0, true).
RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetProperty(
- isolate, receiver, isolate->factory()->length_string(),
- Handle<Smi>(Smi::zero(), isolate), LanguageMode::kStrict));
+ isolate, Object::SetProperty(isolate, receiver,
+ isolate->factory()->length_string(),
+ Handle<Smi>(Smi::zero(), isolate),
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)));
// b. Return undefined.
return ReadOnlyRoots(isolate).undefined_value();
@@ -435,7 +439,8 @@ V8_WARN_UNUSED_RESULT Object GenericArrayPop(Isolate* isolate,
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetProperty(isolate, receiver,
isolate->factory()->length_string(),
- new_length, LanguageMode::kStrict));
+ new_length, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)));
// f. Return element.
return *element;
@@ -525,8 +530,9 @@ V8_WARN_UNUSED_RESULT Object GenericArrayShift(Isolate* isolate,
// ii. Perform ? Set(O, to, fromVal, true).
RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetPropertyOrElement(isolate, receiver, to, from_val,
- LanguageMode::kStrict));
+ isolate,
+ Object::SetPropertyOrElement(isolate, receiver, to, from_val,
+ Just(ShouldThrow::kThrowOnError)));
} else { // e. Else fromPresent is false,
// i. Perform ? DeletePropertyOrThrow(O, to).
MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, to,
@@ -657,8 +663,8 @@ class ArrayConcatVisitor {
if (!is_fixed_array()) {
LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
- MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, elm, kThrowOnError),
- false);
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(&it, elm, Just(kThrowOnError)), false);
return true;
}
@@ -731,9 +737,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
RETURN_ON_EXCEPTION(
isolate_,
- Object::SetProperty(isolate_, result,
- isolate_->factory()->length_string(), length,
- LanguageMode::kStrict),
+ Object::SetProperty(
+ isolate_, result, isolate_->factory()->length_string(), length,
+ StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError)),
JSReceiver);
return result;
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index c4146a359a..a966122c97 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/maybe-handles-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6bf8cd0fd3..c23660e116 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -7,6 +7,9 @@
#include "src/conversions.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#endif
namespace v8 {
namespace internal {
@@ -100,19 +103,18 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
isolate, x, ThisBigIntValue(isolate, receiver, builtin_name));
// 2. If radix is not present, let radixNumber be 10.
// 3. Else if radix is undefined, let radixNumber be 10.
- int radix_number;
- if (radix->IsUndefined(isolate)) {
- radix_number = 10;
- } else {
+ int radix_number = 10;
+ if (!radix->IsUndefined(isolate)) {
// 4. Else, let radixNumber be ? ToInteger(radix).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix,
Object::ToInteger(isolate, radix));
- radix_number = static_cast<int>(radix->Number());
- }
- // 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
- if (radix_number < 2 || radix_number > 36) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kToRadixFormatRange));
+ double radix_double = radix->Number();
+ // 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
+ if (radix_double < 2 || radix_double > 36) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kToRadixFormatRange));
+ }
+ radix_number = static_cast<int>(radix_double);
}
// Return the String representation of this Number value using the radix
// specified by radixNumber.
@@ -123,6 +125,16 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
BUILTIN(BigIntPrototypeToLocaleString) {
HandleScope scope(isolate);
+#ifdef V8_INTL_SUPPORT
+ if (FLAG_harmony_intl_bigint) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::NumberToLocaleString(isolate, args.receiver(),
+ args.atOrUndefined(isolate, 1),
+ args.atOrUndefined(isolate, 2)));
+ }
+ // Fallbacks to old toString implemention if flag is off or no
+ // V8_INTL_SUPPORT
+#endif // V8_INTL_SUPPORT
Handle<Object> radix = isolate->factory()->undefined_value();
return BigIntToStringImpl(args.receiver(), radix, isolate,
"BigInt.prototype.toLocaleString");
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index 52ed0563c6..b10f013020 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 24935bfbf4..2912c51136 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -9,12 +9,17 @@
#include "src/globals.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
+#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
#include "src/objects/property-cell.h"
+#include "src/objects/templates.h"
namespace v8 {
namespace internal {
+template <typename T>
+using TNode = compiler::TNode<T>;
+
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
@@ -373,5 +378,253 @@ TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
+TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
+ TNode<JSReceiver> receiver, TNode<HeapObject> signature,
+ TNode<Context> context) {
+ // Walk up the hidden prototype chain to find the compatible holder
+ // for the {signature}, starting with the {receiver} itself.
+ //
+ // Be careful, these loops are hand-tuned for (close to) ideal CSA
+ // code generation. Especially the sharing of the {var_template}
+ // below is intentional (even though it reads a bit funny in the
+ // first loop).
+ TVARIABLE(HeapObject, var_holder, receiver);
+ Label holder_loop(this, &var_holder), holder_found(this, &var_holder),
+ holder_next(this, Label::kDeferred);
+ Goto(&holder_loop);
+ BIND(&holder_loop);
+ {
+ // Find the template to compare against the {signature}. We don't
+ // bother checking that the template is a FunctionTemplateInfo here,
+ // but instead do that as part of the template loop below. The only
+ // thing we care about is that the template is actually a HeapObject.
+ TNode<HeapObject> holder = var_holder.value();
+ TVARIABLE(HeapObject, var_template, LoadMap(holder));
+ Label template_map_loop(this, &var_template),
+ template_loop(this, &var_template),
+ template_from_closure(this, &var_template);
+ Goto(&template_map_loop);
+ BIND(&template_map_loop);
+ {
+ // Load the constructor field from the current map (in the
+ // {var_template} variable), and see if that is a HeapObject.
+ // If it's a Smi then it is non-instance prototype on some
+ // initial map, which cannot be the case for API instances.
+ TNode<Object> constructor = LoadObjectField(
+ var_template.value(), Map::kConstructorOrBackPointerOffset);
+ GotoIf(TaggedIsSmi(constructor), &holder_next);
+
+ // Now there are three cases for {constructor} that we care
+ // about here:
+ //
+ // 1. {constructor} is a JSFunction, and we can load the template
+ // from its SharedFunctionInfo::function_data field (which
+ // may not actually be a FunctionTemplateInfo).
+ // 2. {constructor} is a Map, in which case it's not a constructor
+ // but a back-pointer and we follow that.
+ // 3. {constructor} is a FunctionTemplateInfo (or some other
+ // HeapObject), in which case we can directly use that for
+ // the template loop below (non-FunctionTemplateInfo objects
+ // will be ruled out there).
+ //
+ var_template = CAST(constructor);
+ TNode<Int32T> template_type = LoadInstanceType(var_template.value());
+ GotoIf(InstanceTypeEqual(template_type, JS_FUNCTION_TYPE),
+ &template_from_closure);
+ Branch(InstanceTypeEqual(template_type, MAP_TYPE), &template_map_loop,
+ &template_loop);
+ }
+
+ BIND(&template_from_closure);
+ {
+ // The first case from above, where we load the template from the
+ // SharedFunctionInfo of the closure. We only check that the
+ // SharedFunctionInfo::function_data is a HeapObject and blindly
+ // use that as a template, since a non-FunctionTemplateInfo objects
+ // will be ruled out automatically by the template loop below.
+ TNode<SharedFunctionInfo> template_shared =
+ LoadObjectField<SharedFunctionInfo>(
+ var_template.value(), JSFunction::kSharedFunctionInfoOffset);
+ TNode<Object> template_data = LoadObjectField(
+ template_shared, SharedFunctionInfo::kFunctionDataOffset);
+ GotoIf(TaggedIsSmi(template_data), &holder_next);
+ var_template = CAST(template_data);
+ Goto(&template_loop);
+ }
+
+ BIND(&template_loop);
+ {
+ // This loop compares the template to the expected {signature},
+ // following the chain of parent templates until it hits the
+ // end, in which case we continue with the next holder (the
+ // hidden prototype) if there's any.
+ TNode<HeapObject> current = var_template.value();
+ GotoIf(WordEqual(current, signature), &holder_found);
+
+ GotoIfNot(IsFunctionTemplateInfoMap(LoadMap(current)), &holder_next);
+
+ TNode<HeapObject> current_rare = LoadObjectField<HeapObject>(
+ current, FunctionTemplateInfo::kFunctionTemplateRareDataOffset);
+ GotoIf(IsUndefined(current_rare), &holder_next);
+ var_template = LoadObjectField<HeapObject>(
+ current_rare, FunctionTemplateRareData::kParentTemplateOffset);
+ Goto(&template_loop);
+ }
+
+ BIND(&holder_next);
+ {
+ // Continue with the hidden prototype of the {holder} if it
+ // has one, or throw an illegal invocation exception, since
+ // the receiver did not pass the {signature} check.
+ TNode<Map> holder_map = LoadMap(holder);
+ var_holder = LoadMapPrototype(holder_map);
+ GotoIf(IsSetWord32(LoadMapBitField3(holder_map),
+ Map::HasHiddenPrototypeBit::kMask),
+ &holder_loop);
+ ThrowTypeError(context, MessageTemplate::kIllegalInvocation);
+ }
+ }
+
+ BIND(&holder_found);
+ return CAST(var_holder.value());
+}
+
+// This calls an API callback by passing a {FunctionTemplateInfo},
+// does appropriate access and compatible receiver checks.
+void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
+ CallFunctionTemplateMode mode,
+ TNode<FunctionTemplateInfo> function_template_info, TNode<IntPtrT> argc,
+ TNode<Context> context) {
+ CodeStubArguments args(this, argc);
+ Label throw_illegal_invocation(this, Label::kDeferred);
+
+ // For API callbacks we need to call ToObject on the receiver.
+ // And in case the receiver is a JSObject already, we might
+ // need to perform access checks in the current {context},
+ // depending on whether the "needs access check" bit is
+ // set on the receiver _and_ the {function_template_info}
+ // doesn't have the "accepts any receiver" bit set.
+ TVARIABLE(Object, var_receiver, args.GetReceiver());
+ if (mode == CallFunctionTemplateMode::kCheckCompatibleReceiver) {
+ // We are only interested to see that receiver is compatible
+ // for the {function_template_info}, and don't need to bother
+ // doing any access checks. So ensure that the receiver is
+ // actually a JSReceiver.
+ var_receiver = ToObject_Inline(context, var_receiver.value());
+ } else {
+ Label receiver_is_primitive(this, Label::kDeferred),
+ receiver_needs_access_check(this, &var_receiver, Label::kDeferred),
+ receiver_done(this);
+
+ // Check if the receiver needs to be converted, or if it's already
+ // a JSReceiver, see if the "needs access check" bit is set _and_
+ // the {function_template_info} doesn't just accept any receiver.
+ GotoIf(TaggedIsSmi(var_receiver.value()), &receiver_is_primitive);
+ TNode<Map> receiver_map = LoadMap(CAST(var_receiver.value()));
+ GotoIfNot(IsJSReceiverMap(receiver_map), &receiver_is_primitive);
+ GotoIfNot(
+ IsSetWord32<Map::IsAccessCheckNeededBit>(LoadMapBitField(receiver_map)),
+ &receiver_done);
+ TNode<WordT> function_template_info_flags = LoadAndUntagObjectField(
+ function_template_info, FunctionTemplateInfo::kFlagOffset);
+ Branch(IsSetWord(function_template_info_flags,
+ 1 << FunctionTemplateInfo::kAcceptAnyReceiver),
+ &receiver_done, &receiver_needs_access_check);
+
+ BIND(&receiver_is_primitive);
+ {
+ // Convert primitives to wrapper objects as necessary. In case
+ // null or undefined were passed, we need to do the access check
+ // on the global proxy here.
+ var_receiver = ToObject(context, var_receiver.value());
+ args.SetReceiver(var_receiver.value());
+ GotoIfNot(IsSetWord32<Map::IsAccessCheckNeededBit>(
+ LoadMapBitField(LoadMap(CAST(var_receiver.value())))),
+ &receiver_done);
+ TNode<WordT> function_template_info_flags = LoadAndUntagObjectField(
+ function_template_info, FunctionTemplateInfo::kFlagOffset);
+ Branch(IsSetWord(function_template_info_flags,
+ 1 << FunctionTemplateInfo::kAcceptAnyReceiver),
+ &receiver_done, &receiver_needs_access_check);
+ }
+
+ BIND(&receiver_needs_access_check);
+ {
+ CallRuntime(Runtime::kAccessCheck, context, var_receiver.value());
+ Goto(&receiver_done);
+ }
+
+ BIND(&receiver_done);
+ }
+ TNode<JSReceiver> receiver = CAST(var_receiver.value());
+
+ // Figure out the API holder for the {receiver} depending on the
+ // {mode} and the signature on the {function_template_info}.
+ TNode<JSReceiver> holder;
+ if (mode == CallFunctionTemplateMode::kCheckAccess) {
+ // We did the access check (including the ToObject) above, so
+ // {receiver} is a JSReceiver at this point, and we don't need
+ // to perform any "compatible receiver check", so {holder} is
+ // actually the {receiver}.
+ holder = receiver;
+ } else {
+ // If the {function_template_info} doesn't specify any signature, we
+ // just use the receiver as the holder for the API callback, otherwise
+ // we need to look for a compatible holder in the receiver's hidden
+ // prototype chain.
+ TNode<HeapObject> signature = LoadObjectField<HeapObject>(
+ function_template_info, FunctionTemplateInfo::kSignatureOffset);
+ holder = Select<JSReceiver>(
+ IsUndefined(signature), // --
+ [&]() { return receiver; },
+ [&]() { return GetCompatibleReceiver(receiver, signature, context); });
+ }
+
+ // Perform the actual API callback invocation via CallApiCallback.
+ TNode<CallHandlerInfo> call_handler_info = LoadObjectField<CallHandlerInfo>(
+ function_template_info, FunctionTemplateInfo::kCallCodeOffset);
+ TNode<Foreign> foreign = LoadObjectField<Foreign>(
+ call_handler_info, CallHandlerInfo::kJsCallbackOffset);
+ TNode<RawPtrT> callback =
+ LoadObjectField<RawPtrT>(foreign, Foreign::kForeignAddressOffset);
+ TNode<Object> call_data =
+ LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
+ TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
+ call_data, holder);
+}
+
+TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<FunctionTemplateInfo> function_template_info =
+ CAST(Parameter(Descriptor::kFunctionTemplateInfo));
+ TNode<IntPtrT> argc =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ CallFunctionTemplate(CallFunctionTemplateMode::kCheckAccess,
+ function_template_info, argc, context);
+}
+
+TF_BUILTIN(CallFunctionTemplate_CheckCompatibleReceiver,
+ CallOrConstructBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<FunctionTemplateInfo> function_template_info =
+ CAST(Parameter(Descriptor::kFunctionTemplateInfo));
+ TNode<IntPtrT> argc =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ CallFunctionTemplate(CallFunctionTemplateMode::kCheckCompatibleReceiver,
+ function_template_info, argc, context);
+}
+
+TF_BUILTIN(CallFunctionTemplate_CheckAccessAndCompatibleReceiver,
+ CallOrConstructBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<FunctionTemplateInfo> function_template_info =
+ CAST(Parameter(Descriptor::kFunctionTemplateInfo));
+ TNode<IntPtrT> argc =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ CallFunctionTemplate(
+ CallFunctionTemplateMode::kCheckAccessAndCompatibleReceiver,
+ function_template_info, argc, context);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index 013093f38b..5a64d36c34 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -28,6 +28,21 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
void CallOrConstructWithSpread(TNode<Object> target, TNode<Object> new_target,
TNode<Object> spread, TNode<Int32T> args_count,
TNode<Context> context);
+
+ enum class CallFunctionTemplateMode : uint8_t {
+ kCheckAccess,
+ kCheckCompatibleReceiver,
+ kCheckAccessAndCompatibleReceiver,
+ };
+
+ void CallFunctionTemplate(CallFunctionTemplateMode mode,
+ TNode<FunctionTemplateInfo> function_template_info,
+ TNode<IntPtrT> argc, TNode<Context> context);
+
+ private:
+ TNode<JSReceiver> GetCompatibleReceiver(TNode<JSReceiver> receiver,
+ TNode<HeapObject> signature,
+ TNode<Context> context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 7f7699927c..51580899d2 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean.
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index d782f241b0..6672569f19 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -9,6 +9,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection.h"
#include "torque-generated/builtins-base-from-dsl-gen.h"
@@ -538,7 +539,7 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<FixedArray> elements, TNode<IntPtrT> index) {
- TNode<Object> element = LoadFixedArrayElement(elements, index);
+ TNode<Object> element = UnsafeLoadFixedArrayElement(elements, index);
return Select<Object>(IsTheHole(element), [=] { return UndefinedConstant(); },
[=] { return element; });
}
@@ -1002,10 +1003,10 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
CSA_ASSERT(this, InstanceTypeEqual(LoadInstanceType(iterator),
JS_MAP_VALUE_ITERATOR_TYPE));
TNode<Object> entry_value =
- LoadFixedArrayElement(table, entry_start_position,
- (OrderedHashMap::HashTableStartIndex() +
- OrderedHashMap::kValueOffset) *
- kTaggedSize);
+ UnsafeLoadFixedArrayElement(table, entry_start_position,
+ (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset) *
+ kTaggedSize);
Store(elements, var_offset.value(), entry_value);
Goto(&continue_loop);
@@ -1409,9 +1410,9 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
entry_start_position = IntPtrAdd(
IntPtrMul(var_index.value(), IntPtrConstant(TableType::kEntrySize)),
number_of_buckets);
- entry_key =
- LoadFixedArrayElement(table, entry_start_position,
- TableType::HashTableStartIndex() * kTaggedSize);
+ entry_key = UnsafeLoadFixedArrayElement(
+ table, entry_start_position,
+ TableType::HashTableStartIndex() * kTaggedSize);
Increment(&var_index);
Branch(IsTheHole(entry_key), &loop, &done_loop);
}
@@ -1533,8 +1534,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
TVARIABLE(OrderedHashMap, table_var, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(CAST(
- LoadFixedArrayElement(table, OrderedHashMap::NumberOfBucketsIndex()))));
+ number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table, OrderedHashMap::NumberOfBucketsIndex()))));
STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1549,7 +1550,7 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kMapGrow, context, receiver);
table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashMap::NumberOfElementsOffset())));
@@ -1571,25 +1572,29 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const bucket_entry = LoadFixedArrayElement(
+ Node* const bucket_entry = UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)),
number_of_buckets);
- StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
- kTaggedSize * OrderedHashMap::HashTableStartIndex());
- StoreFixedArrayElement(table, entry_start, value, UPDATE_WRITE_BARRIER,
- kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
- OrderedHashMap::kValueOffset));
- StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
- kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
- OrderedHashMap::kChainOffset));
+ UnsafeStoreFixedArrayElement(
+ table, entry_start, key, UPDATE_WRITE_BARRIER,
+ kTaggedSize * OrderedHashMap::HashTableStartIndex());
+ UnsafeStoreFixedArrayElement(
+ table, entry_start, value, UPDATE_WRITE_BARRIER,
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset));
+ UnsafeStoreFixedArrayElement(
+ table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kChainOffset));
// Update the bucket head.
- StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
- OrderedHashMap::HashTableStartIndex() * kTaggedSize);
+ UnsafeStoreFixedArrayElement(
+ table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
+ OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
@@ -1703,8 +1708,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
TVARIABLE(OrderedHashSet, table_var, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(CAST(
- LoadFixedArrayElement(table, OrderedHashSet::NumberOfBucketsIndex()))));
+ number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
+ table, OrderedHashSet::NumberOfBucketsIndex()))));
STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1719,7 +1724,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kSetGrow, context, receiver);
table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashSet::NumberOfElementsOffset())));
@@ -1741,22 +1746,25 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const bucket_entry = LoadFixedArrayElement(
+ Node* const bucket_entry = UnsafeLoadFixedArrayElement(
table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)),
number_of_buckets);
- StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
- kTaggedSize * OrderedHashSet::HashTableStartIndex());
- StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
- kTaggedSize * (OrderedHashSet::HashTableStartIndex() +
- OrderedHashSet::kChainOffset));
+ UnsafeStoreFixedArrayElement(
+ table, entry_start, key, UPDATE_WRITE_BARRIER,
+ kTaggedSize * OrderedHashSet::HashTableStartIndex());
+ UnsafeStoreFixedArrayElement(
+ table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
+ kTaggedSize * (OrderedHashSet::HashTableStartIndex() +
+ OrderedHashSet::kChainOffset));
// Update the bucket head.
- StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
- OrderedHashSet::HashTableStartIndex() * kTaggedSize);
+ UnsafeStoreFixedArrayElement(
+ table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
+ OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
@@ -2333,12 +2341,13 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
TNode<Object> key, TNode<Object> value, TNode<IntPtrT> number_of_elements) {
// See EphemeronHashTable::AddEntry().
TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
- StoreFixedArrayElement(table, key_index, key);
- StoreFixedArrayElement(table, value_index, value);
+ UnsafeStoreFixedArrayElement(table, key_index, key);
+ UnsafeStoreFixedArrayElement(table, value_index, value);
// See HashTableBase::ElementAdded().
- StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
- SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(
+ table, EphemeronHashTable::kNumberOfElementsIndex,
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
@@ -2403,7 +2412,8 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
TNode<IntPtrT> key_index;
{
key_index = KeyIndexFromEntry(var_entry.value());
- TNode<Object> entry_key = LoadFixedArrayElement(CAST(table), key_index);
+ TNode<Object> entry_key =
+ UnsafeLoadFixedArrayElement(CAST(table), key_index);
key_compare(entry_key, &if_found);
@@ -2453,14 +2463,14 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
TNode<EphemeronHashTable> table, int offset) {
- TNode<IntPtrT> number_of_elements = SmiUntag(CAST(LoadFixedArrayElement(
+ TNode<IntPtrT> number_of_elements = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table, EphemeronHashTable::kNumberOfElementsIndex)));
return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
TNode<EphemeronHashTable> table, int offset) {
- TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(LoadFixedArrayElement(
+ TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table, EphemeronHashTable::kNumberOfDeletedElementsIndex)));
return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
}
@@ -2472,8 +2482,8 @@ TNode<EphemeronHashTable> WeakCollectionsBuiltinsAssembler::LoadTable(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
TNode<EphemeronHashTable> table) {
- return SmiUntag(
- CAST(LoadFixedArrayElement(table, EphemeronHashTable::kCapacityIndex)));
+ return SmiUntag(CAST(
+ UnsafeLoadFixedArrayElement(table, EphemeronHashTable::kCapacityIndex)));
}
TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 779e96c31f..cb26b38c00 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -297,6 +297,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
Node* context) {
Label call_runtime(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(feedback_vector), &call_runtime);
+
VARIABLE(result, MachineRepresentation::kTagged);
TNode<Object> literal_site =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index dc8cb9e1e5..ffbeab26a3 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -14,6 +14,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format.h"
#endif
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
@@ -142,12 +143,23 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
+typedef base::SmallVector<char, 128> DateBuffer;
+
+template <class... Args>
+DateBuffer FormatDate(const char* format, Args... args) {
+ DateBuffer buffer;
+ SmallStringOptimizedAllocator<DateBuffer::kInlineSize> allocator(&buffer);
+ StringStream sstream(&allocator);
+ sstream.Add(format, args...);
+ buffer.resize_no_init(sstream.length());
+ return buffer;
+}
+
// ES6 section 20.3.4.41.1 ToDateString(tv)
-void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
- ToDateStringMode mode = kDateAndTime) {
+DateBuffer ToDateString(double time_val, DateCache* date_cache,
+ ToDateStringMode mode = kDateAndTime) {
if (std::isnan(time_val)) {
- SNPrintF(str, "Invalid Date");
- return;
+ return FormatDate("Invalid Date");
}
int64_t time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = date_cache->ToLocal(time_ms);
@@ -160,22 +172,17 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
const char* local_timezone = date_cache->LocalTimezone(time_ms);
switch (mode) {
case kDateOnly:
- SNPrintF(str, "%s %s %02d %04d", kShortWeekDays[weekday],
- kShortMonths[month], day, year);
- return;
+ return FormatDate("%s %s %02d %04d", kShortWeekDays[weekday],
+ kShortMonths[month], day, year);
case kTimeOnly:
- // TODO(842085): str may be silently truncated.
- SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
- (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
- local_timezone);
- return;
+ return FormatDate("%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+ (timezone_offset < 0) ? '-' : '+', timezone_hour,
+ timezone_min, local_timezone);
case kDateAndTime:
- // TODO(842085): str may be silently truncated.
- SNPrintF(str, "%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
- kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
- min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
- timezone_min, local_timezone);
- return;
+ return FormatDate("%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ kShortWeekDays[weekday], kShortMonths[month], day, year,
+ hour, min, sec, (timezone_offset < 0) ? '-' : '+',
+ timezone_hour, timezone_min, local_timezone);
}
UNREACHABLE();
}
@@ -198,10 +205,9 @@ BUILTIN(DateConstructor) {
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) {
double const time_val = JSDate::CurrentTimeValue(isolate);
- char buffer[128];
- ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
+ DateBuffer buffer = ToDateString(time_val, isolate->date_cache());
RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
// [Construct]
int const argc = args.length() - 1;
@@ -786,11 +792,10 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
BUILTIN(DatePrototypeToDateString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
- char buffer[128];
- ToDateString(date->value()->Number(), ArrayVector(buffer),
- isolate->date_cache(), kDateOnly);
+ DateBuffer buffer =
+ ToDateString(date->value()->Number(), isolate->date_cache(), kDateOnly);
RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
@@ -824,22 +829,20 @@ BUILTIN(DatePrototypeToISOString) {
BUILTIN(DatePrototypeToString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
- char buffer[128];
- ToDateString(date->value()->Number(), ArrayVector(buffer),
- isolate->date_cache());
+ DateBuffer buffer =
+ ToDateString(date->value()->Number(), isolate->date_cache());
RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
BUILTIN(DatePrototypeToTimeString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
- char buffer[128];
- ToDateString(date->value()->Number(), ArrayVector(buffer),
- isolate->date_cache(), kTimeOnly);
+ DateBuffer buffer =
+ ToDateString(date->value()->Number(), isolate->date_cache(), kTimeOnly);
RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index e0abf90f0d..b53e72a376 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -22,7 +22,7 @@ namespace internal {
// TFS: Builtin in Turbofan, with CodeStub linkage.
// Args: name, explicit argument names...
// TFC: Builtin in Turbofan, with CodeStub linkage and custom descriptor.
-// Args: name, interface descriptor, return_size
+// Args: name, interface descriptor
// TFH: Handlers in Turbofan, with CodeStub linkage.
// Args: name, interface descriptor
// BCH: Bytecode Handlers, with bytecode dispatch linkage.
@@ -35,11 +35,11 @@ namespace internal {
#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
/* GC write barrirer */ \
- TFC(RecordWrite, RecordWrite, 1) \
+ TFC(RecordWrite, RecordWrite) \
\
/* Adaptors for CPP/API builtin */ \
- TFC(AdaptorWithExitFrame, CppBuiltinAdaptor, 1) \
- TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor, 1) \
+ TFC(AdaptorWithExitFrame, CppBuiltinAdaptor) \
+ TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
\
/* Calls */ \
ASM(ArgumentsAdaptorTrampoline, ArgumentsAdaptor) \
@@ -48,46 +48,52 @@ namespace internal {
ASM(CallFunction_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(CallFunction_ReceiverIsAny, CallTrampoline) \
/* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallBoundFunction, Dummy) \
+ ASM(CallBoundFunction, CallTrampoline) \
/* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsAny, CallTrampoline) \
\
/* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \
- TFC(CallProxy, CallTrampoline, 1) \
+ TFC(CallProxy, CallTrampoline) \
ASM(CallVarargs, CallVarargs) \
- TFC(CallWithSpread, CallWithSpread, 1) \
- TFC(CallWithArrayLike, CallWithArrayLike, 1) \
+ TFC(CallWithSpread, CallWithSpread) \
+ TFC(CallWithArrayLike, CallWithArrayLike) \
ASM(CallForwardVarargs, CallForwardVarargs) \
ASM(CallFunctionForwardVarargs, CallForwardVarargs) \
+ /* Call an API callback via a {FunctionTemplateInfo}, doing appropriate */ \
+ /* access and compatible receiver checks. */ \
+ TFC(CallFunctionTemplate_CheckAccess, CallFunctionTemplate) \
+ TFC(CallFunctionTemplate_CheckCompatibleReceiver, CallFunctionTemplate) \
+ TFC(CallFunctionTemplate_CheckAccessAndCompatibleReceiver, \
+ CallFunctionTemplate) \
\
/* Construct */ \
/* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
ASM(ConstructFunction, JSTrampoline) \
/* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
- ASM(ConstructBoundFunction, Dummy) \
- ASM(ConstructedNonConstructable, Dummy) \
+ ASM(ConstructBoundFunction, JSTrampoline) \
+ ASM(ConstructedNonConstructable, JSTrampoline) \
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct, JSTrampoline) \
ASM(ConstructVarargs, ConstructVarargs) \
- TFC(ConstructWithSpread, ConstructWithSpread, 1) \
- TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
+ TFC(ConstructWithSpread, ConstructWithSpread) \
+ TFC(ConstructWithArrayLike, ConstructWithArrayLike) \
ASM(ConstructForwardVarargs, ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \
ASM(JSConstructStubGeneric, Dummy) \
ASM(JSBuiltinsConstructStub, Dummy) \
- TFC(FastNewObject, FastNewObject, 1) \
+ TFC(FastNewObject, FastNewObject) \
TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
- TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
- TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
+ TFC(FastNewFunctionContextEval, FastNewFunctionContext) \
+ TFC(FastNewFunctionContextFunction, FastNewFunctionContext) \
TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
TFS(CreateEmptyArrayLiteral, kFeedbackVector, kSlot) \
TFS(CreateShallowArrayLiteral, kFeedbackVector, kSlot, kConstantElements) \
TFS(CreateShallowObjectLiteral, kFeedbackVector, kSlot, \
kObjectBoilerplateDescription, kFlags) \
/* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
- TFC(ConstructProxy, JSTrampoline, 1) \
+ TFC(ConstructProxy, JSTrampoline) \
\
/* Apply and entries */ \
ASM(JSEntry, Dummy) \
@@ -97,22 +103,18 @@ namespace internal {
ASM(JSConstructEntryTrampoline, Dummy) \
ASM(ResumeGeneratorTrampoline, ResumeGenerator) \
\
- /* Stack and interrupt check */ \
- ASM(InterruptCheck, Dummy) \
- ASM(StackCheck, Dummy) \
- \
/* String helpers */ \
- TFC(StringCharAt, StringAt, 1) \
- TFC(StringCodePointAtUTF16, StringAt, 1) \
- TFC(StringCodePointAtUTF32, StringAt, 1) \
- TFC(StringEqual, Compare, 1) \
- TFC(StringGreaterThan, Compare, 1) \
- TFC(StringGreaterThanOrEqual, Compare, 1) \
+ TFC(StringCharAt, StringAt) \
+ TFC(StringCodePointAtUTF16, StringAt) \
+ TFC(StringCodePointAtUTF32, StringAt) \
+ TFC(StringEqual, Compare) \
+ TFC(StringGreaterThan, Compare) \
+ TFC(StringGreaterThanOrEqual, Compare) \
TFS(StringIndexOf, kReceiver, kSearchString, kPosition) \
- TFC(StringLessThan, Compare, 1) \
- TFC(StringLessThanOrEqual, Compare, 1) \
+ TFC(StringLessThan, Compare) \
+ TFC(StringLessThanOrEqual, Compare) \
TFS(StringRepeat, kString, kCount) \
- TFC(StringSubstring, StringSubstring, 1) \
+ TFC(StringSubstring, StringSubstring) \
\
/* OrderedHashTable helpers */ \
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
@@ -132,8 +134,8 @@ namespace internal {
ASM(InterpreterOnStackReplacement, ContextOnly) \
\
/* Code life-cycle */ \
- TFC(CompileLazy, JSTrampoline, 1) \
- TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \
+ TFC(CompileLazy, JSTrampoline) \
+ TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
ASM(InstantiateAsmJs, Dummy) \
ASM(NotifyDeoptimized, Dummy) \
\
@@ -170,14 +172,14 @@ namespace internal {
API(HandleApiCallAsConstructor) \
\
/* Adapters for Turbofan into runtime */ \
- TFC(AllocateInNewSpace, Allocate, 1) \
- TFC(AllocateInOldSpace, Allocate, 1) \
+ TFC(AllocateInNewSpace, Allocate) \
+ TFC(AllocateInOldSpace, Allocate) \
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, kObject) \
- TFC(GrowFastDoubleElements, GrowArrayElements, 1) \
- TFC(GrowFastSmiOrObjectElements, GrowArrayElements, 1) \
- TFC(NewArgumentsElements, NewArgumentsElements, 1) \
+ TFC(GrowFastDoubleElements, GrowArrayElements) \
+ TFC(GrowFastSmiOrObjectElements, GrowArrayElements) \
+ TFC(NewArgumentsElements, NewArgumentsElements) \
\
/* Debugger */ \
TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -185,37 +187,37 @@ namespace internal {
ASM(HandleDebuggerStatement, ContextOnly) \
\
/* Type conversions */ \
- TFC(ToObject, TypeConversion, 1) \
- TFC(ToBoolean, TypeConversion, 1) \
- TFC(OrdinaryToPrimitive_Number, TypeConversion, 1) \
- TFC(OrdinaryToPrimitive_String, TypeConversion, 1) \
- TFC(NonPrimitiveToPrimitive_Default, TypeConversion, 1) \
- TFC(NonPrimitiveToPrimitive_Number, TypeConversion, 1) \
- TFC(NonPrimitiveToPrimitive_String, TypeConversion, 1) \
- TFC(StringToNumber, TypeConversion, 1) \
- TFC(ToName, TypeConversion, 1) \
- TFC(NonNumberToNumber, TypeConversion, 1) \
- TFC(NonNumberToNumeric, TypeConversion, 1) \
- TFC(ToNumber, TypeConversion, 1) \
- TFC(ToNumberConvertBigInt, TypeConversion, 1) \
- TFC(ToNumeric, TypeConversion, 1) \
- TFC(NumberToString, TypeConversion, 1) \
- TFC(ToString, TypeConversion, 1) \
- TFC(ToInteger, TypeConversion, 1) \
- TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
- TFC(ToLength, TypeConversion, 1) \
- TFC(Typeof, Typeof, 1) \
- TFC(GetSuperConstructor, Typeof, 1) \
- TFC(BigIntToI64, BigIntToI64, 1) \
- TFC(I64ToBigInt, BigIntToWasmI64, 1) \
+ TFC(ToObject, TypeConversion) \
+ TFC(ToBoolean, TypeConversion) \
+ TFC(OrdinaryToPrimitive_Number, TypeConversion) \
+ TFC(OrdinaryToPrimitive_String, TypeConversion) \
+ TFC(NonPrimitiveToPrimitive_Default, TypeConversion) \
+ TFC(NonPrimitiveToPrimitive_Number, TypeConversion) \
+ TFC(NonPrimitiveToPrimitive_String, TypeConversion) \
+ TFC(StringToNumber, TypeConversion) \
+ TFC(ToName, TypeConversion) \
+ TFC(NonNumberToNumber, TypeConversion) \
+ TFC(NonNumberToNumeric, TypeConversion) \
+ TFC(ToNumber, TypeConversion) \
+ TFC(ToNumberConvertBigInt, TypeConversion) \
+ TFC(ToNumeric, TypeConversion) \
+ TFC(NumberToString, TypeConversion) \
+ TFC(ToString, TypeConversion) \
+ TFC(ToInteger, TypeConversion) \
+ TFC(ToInteger_TruncateMinusZero, TypeConversion) \
+ TFC(ToLength, TypeConversion) \
+ TFC(Typeof, Typeof) \
+ TFC(GetSuperConstructor, Typeof) \
+ TFC(BigIntToI64, BigIntToI64) \
+ TFC(I64ToBigInt, BigIntToWasmI64) \
\
/* Type conversions continuations */ \
- TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
+ TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter) \
\
/* Handlers */ \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedLoadIC_Slow, LoadWithVector) \
- TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
+ TFH(KeyedStoreIC_Megamorphic, Store) \
TFH(KeyedStoreIC_Slow, StoreWithVector) \
TFH(LoadGlobalIC_Slow, LoadWithVector) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
@@ -249,19 +251,23 @@ namespace internal {
TFH(ElementsTransitionAndStore_GrowNoTransitionHandleCOW, StoreTransition) \
TFH(ElementsTransitionAndStore_NoTransitionIgnoreOOB, StoreTransition) \
TFH(ElementsTransitionAndStore_NoTransitionHandleCOW, StoreTransition) \
+ TFH(KeyedHasIC_PolymorphicName, LoadWithVector) \
+ TFH(KeyedHasIC_SloppyArguments, LoadWithVector) \
+ TFH(HasIndexedInterceptorIC, LoadWithVector) \
+ TFH(HasIC_Slow, LoadWithVector) \
\
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
ASM(RunMicrotasksTrampoline, RunMicrotasksEntry) \
- TFC(RunMicrotasks, RunMicrotasks, 1) \
+ TFC(RunMicrotasks, RunMicrotasks) \
\
/* Object property helpers */ \
TFS(HasProperty, kObject, kKey) \
TFS(DeleteProperty, kObject, kKey, kLanguageMode) \
\
/* Abort */ \
- TFC(Abort, Abort, 1) \
- TFC(AbortJS, Abort, 1) \
+ TFC(Abort, Abort) \
+ TFC(AbortJS, Abort) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -272,45 +278,44 @@ namespace internal {
TFJ(ReturnReceiver, 0, kReceiver) \
\
/* Array */ \
- TFC(ArrayConstructor, JSTrampoline, 1) \
- TFC(ArrayConstructorImpl, ArrayConstructor, 1) \
+ TFC(ArrayConstructor, JSTrampoline) \
+ TFC(ArrayConstructorImpl, ArrayConstructor) \
TFC(ArrayNoArgumentConstructor_PackedSmi_DontOverride, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_HoleySmi_DontOverride, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_PackedSmi_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_HoleySmi_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_Packed_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_Holey_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_PackedDouble_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArrayNoArgumentConstructor_HoleyDouble_DisableAllocationSites, \
- ArrayNoArgumentConstructor, 1) \
+ ArrayNoArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_PackedSmi_DontOverride, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_HoleySmi_DontOverride, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_PackedSmi_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_HoleySmi_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_Packed_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_Holey_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_PackedDouble_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
+ ArraySingleArgumentConstructor) \
TFC(ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites, \
- ArraySingleArgumentConstructor, 1) \
- TFC(ArrayNArgumentsConstructor, ArrayNArgumentsConstructor, 1) \
- ASM(InternalArrayConstructor, Dummy) \
- ASM(InternalArrayConstructorImpl, Dummy) \
- TFC(InternalArrayNoArgumentConstructor_Packed, ArrayNoArgumentConstructor, \
- 1) \
+ ArraySingleArgumentConstructor) \
+ TFC(ArrayNArgumentsConstructor, ArrayNArgumentsConstructor) \
+ ASM(InternalArrayConstructor, JSTrampoline) \
+ ASM(InternalArrayConstructorImpl, JSTrampoline) \
+ TFC(InternalArrayNoArgumentConstructor_Packed, ArrayNoArgumentConstructor) \
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kReceiver, kArg) \
@@ -348,73 +353,8 @@ namespace internal {
TFS(CloneFastJSArray, kSource) \
TFS(CloneFastJSArrayFillingHoles, kSource) \
TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
- /* ES6 #sec-array.prototype.every */ \
- TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
- kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength) \
- TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength, kResult) \
- TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.some */ \
- TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
- kObject, kInitialK, kLength, kTo) \
- TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength) \
- TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
- kInitialK, kLength, kResult) \
- TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.foreach */ \
- TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
- kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayMapLoopEagerDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
- kArray, kInitialK, kLength) \
- TFJ(ArrayMapLoopLazyDeoptContinuation, 6, kReceiver, kCallbackFn, kThisArg, \
- kArray, kInitialK, kLength, kResult) \
- TFJ(ArrayMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.reduce */ \
- TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
- kAccumulator, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kReceiver, kCallbackFn, \
- kLength) \
- TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kInitialK, kLength, kAccumulator) \
- TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kInitialK, kLength, kResult) \
- TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.reduceRight */ \
- TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
- kAccumulator, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kReceiver, \
- kCallbackFn, kLength) \
- TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kInitialK, kLength, kAccumulator) \
- TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kInitialK, kLength, kResult) \
- TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.entries */ \
TFJ(ArrayPrototypeEntries, 0, kReceiver) \
- /* ES6 #sec-array.prototype.find */ \
- TFS(ArrayFindLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
- kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength) \
- TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
- kInitialK, kLength, kResult) \
- TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kReceiver, \
- kCallbackFn, kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
- TFJ(ArrayPrototypeFind, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.findIndex */ \
- TFS(ArrayFindIndexLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
- kArray, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength) \
- TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, \
- kThisArg, kInitialK, kLength, kResult) \
- TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kReceiver, \
- kCallbackFn, kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
- TFJ(ArrayPrototypeFindIndex, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.keys */ \
TFJ(ArrayPrototypeKeys, 0, kReceiver) \
/* ES6 #sec-array.prototype.values */ \
@@ -442,7 +382,7 @@ namespace internal {
TFS(AsyncFunctionEnter, kClosure, kReceiver) \
TFS(AsyncFunctionReject, kAsyncFunctionObject, kReason, kCanSuspend) \
TFS(AsyncFunctionResolve, kAsyncFunctionObject, kValue, kCanSuspend) \
- TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter, 1) \
+ TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \
TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
@@ -668,6 +608,8 @@ namespace internal {
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
TFH(CloneObjectIC, CloneObjectWithVector) \
TFH(CloneObjectIC_Slow, CloneObjectWithVector) \
+ TFH(KeyedHasIC, LoadWithVector) \
+ TFH(KeyedHasIC_Megamorphic, LoadWithVector) \
\
/* IterableToList */ \
/* ES #sec-iterabletolist */ \
@@ -770,7 +712,7 @@ namespace internal {
TFJ(MathTrunc, 1, kReceiver, kX) \
\
/* Number */ \
- TFC(AllocateHeapNumber, AllocateHeapNumber, 1) \
+ TFC(AllocateHeapNumber, AllocateHeapNumber) \
/* ES #sec-number-constructor */ \
TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
@@ -793,25 +735,25 @@ namespace internal {
CPP(NumberPrototypeToString) \
/* ES6 #sec-number.prototype.valueof */ \
TFJ(NumberPrototypeValueOf, 0, kReceiver) \
- TFC(Add, BinaryOp, 1) \
- TFC(Subtract, BinaryOp, 1) \
- TFC(Multiply, BinaryOp, 1) \
- TFC(Divide, BinaryOp, 1) \
- TFC(Modulus, BinaryOp, 1) \
- TFC(Exponentiate, BinaryOp, 1) \
- TFC(BitwiseAnd, BinaryOp, 1) \
- TFC(BitwiseOr, BinaryOp, 1) \
- TFC(BitwiseXor, BinaryOp, 1) \
- TFC(ShiftLeft, BinaryOp, 1) \
- TFC(ShiftRight, BinaryOp, 1) \
- TFC(ShiftRightLogical, BinaryOp, 1) \
- TFC(LessThan, Compare, 1) \
- TFC(LessThanOrEqual, Compare, 1) \
- TFC(GreaterThan, Compare, 1) \
- TFC(GreaterThanOrEqual, Compare, 1) \
- TFC(Equal, Compare, 1) \
- TFC(SameValue, Compare, 1) \
- TFC(StrictEqual, Compare, 1) \
+ TFC(Add, BinaryOp) \
+ TFC(Subtract, BinaryOp) \
+ TFC(Multiply, BinaryOp) \
+ TFC(Divide, BinaryOp) \
+ TFC(Modulus, BinaryOp) \
+ TFC(Exponentiate, BinaryOp) \
+ TFC(BitwiseAnd, BinaryOp) \
+ TFC(BitwiseOr, BinaryOp) \
+ TFC(BitwiseXor, BinaryOp) \
+ TFC(ShiftLeft, BinaryOp) \
+ TFC(ShiftRight, BinaryOp) \
+ TFC(ShiftRightLogical, BinaryOp) \
+ TFC(LessThan, Compare) \
+ TFC(LessThanOrEqual, Compare) \
+ TFC(GreaterThan, Compare) \
+ TFC(GreaterThanOrEqual, Compare) \
+ TFC(Equal, Compare) \
+ TFC(SameValue, Compare) \
+ TFC(StrictEqual, Compare) \
TFS(BitwiseNot, kValue) \
TFS(Decrement, kValue) \
TFS(Increment, kValue) \
@@ -862,8 +804,8 @@ namespace internal {
TFJ(ObjectValues, 1, kReceiver, kObject) \
\
/* instanceof */ \
- TFC(OrdinaryHasInstance, Compare, 1) \
- TFC(InstanceOf, Compare, 1) \
+ TFC(OrdinaryHasInstance, Compare) \
+ TFC(InstanceOf, Compare) \
\
/* for-in */ \
TFS(ForInEnumerate, kReceiver) \
@@ -930,7 +872,7 @@ namespace internal {
TFJ(ProxyRevoke, 0, kReceiver) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue, kOnNonExistent) \
TFS(ProxyHasProperty, kProxy, kName) \
- TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
+ TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue) \
\
/* Reflect */ \
ASM(ReflectApply, Dummy) \
@@ -959,7 +901,6 @@ namespace internal {
CPP(RegExpCapture9Getter) \
/* ES #sec-regexp-pattern-flags */ \
TFJ(RegExpConstructor, 2, kReceiver, kPattern, kFlags) \
- TFJ(RegExpInternalMatch, 2, kReceiver, kRegExp, kString) \
CPP(RegExpInputGetter) \
CPP(RegExpInputSetter) \
CPP(RegExpLastMatchGetter) \
@@ -1073,8 +1014,6 @@ namespace internal {
TFJ(StringPrototypeCodePointAt, 1, kReceiver, kPosition) \
/* ES6 #sec-string.prototype.concat */ \
TFJ(StringPrototypeConcat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.endswith */ \
- CPP(StringPrototypeEndsWith) \
/* ES6 #sec-string.prototype.fontcolor */ \
TFJ(StringPrototypeFontcolor, 1, kReceiver, kValue) \
/* ES6 #sec-string.prototype.fontsize */ \
@@ -1126,8 +1065,6 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.sup */ \
TFJ(StringPrototypeSup, 0, kReceiver) \
- /* ES6 #sec-string.prototype.startswith */ \
- CPP(StringPrototypeStartsWith) \
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0, kReceiver) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1163,12 +1100,7 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0, kReceiver) \
\
/* TypedArray */ \
- TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize, \
- kBufferConstructor) \
- TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
- kByteOffset) \
/* ES #sec-typedarray-constructors */ \
- TFS(CreateTypedArray, kTarget, kNewTarget, kArg1, kArg2, kArg3) \
TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
TFJ(GenericConstructorLazyDeoptContinuation, 1, kReceiver, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1189,9 +1121,6 @@ namespace internal {
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
CPP(TypedArrayPrototypeFill) \
- /* ES6 #sec-%typedarray%.prototype.filter */ \
- TFJ(TypedArrayPrototypeFilter, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.find */ \
TFJ(TypedArrayPrototypeFind, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1208,12 +1137,6 @@ namespace internal {
CPP(TypedArrayPrototypeReverse) \
/* ES6 %TypedArray%.prototype.set */ \
TFJ(TypedArrayPrototypeSet, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-%typedarray%.prototype.slice */ \
- TFJ(TypedArrayPrototypeSlice, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 %TypedArray%.prototype.subarray */ \
- TFJ(TypedArrayPrototypeSubArray, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
/* ES6 %TypedArray%.prototype.every */ \
@@ -1222,17 +1145,8 @@ namespace internal {
/* ES6 %TypedArray%.prototype.some */ \
TFJ(TypedArrayPrototypeSome, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 %TypedArray%.prototype.reduce */ \
- TFJ(TypedArrayPrototypeReduce, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 %TypedArray%.prototype.reduceRight */ \
- TFJ(TypedArrayPrototypeReduceRight, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 %TypedArray%.prototype.forEach */ \
- TFJ(TypedArrayPrototypeForEach, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.of */ \
TFJ(TypedArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.from */ \
@@ -1240,16 +1154,17 @@ namespace internal {
\
/* Wasm */ \
ASM(WasmCompileLazy, Dummy) \
- TFC(WasmAllocateHeapNumber, AllocateHeapNumber, 1) \
- TFC(WasmAtomicWake, WasmAtomicWake, 1) \
- TFC(WasmI32AtomicWait, WasmI32AtomicWait, 1) \
- TFC(WasmI64AtomicWait, WasmI64AtomicWait, 1) \
- TFC(WasmCallJavaScript, CallTrampoline, 1) \
- TFC(WasmMemoryGrow, WasmMemoryGrow, 1) \
- TFC(WasmRecordWrite, RecordWrite, 1) \
- TFC(WasmStackGuard, NoContext, 1) \
- TFC(WasmToNumber, TypeConversion, 1) \
- TFC(WasmThrow, WasmThrow, 1) \
+ TFC(WasmAllocateHeapNumber, AllocateHeapNumber) \
+ TFC(WasmAtomicWake, WasmAtomicWake) \
+ TFC(WasmI32AtomicWait, WasmI32AtomicWait) \
+ TFC(WasmI64AtomicWait, WasmI64AtomicWait) \
+ TFC(WasmCallJavaScript, CallTrampoline) \
+ TFC(WasmMemoryGrow, WasmMemoryGrow) \
+ TFC(WasmRecordWrite, RecordWrite) \
+ TFC(WasmStackGuard, NoContext) \
+ TFC(WasmStackOverflow, NoContext) \
+ TFC(WasmToNumber, TypeConversion) \
+ TFC(WasmThrow, WasmThrow) \
TFS(ThrowWasmTrapUnreachable) \
TFS(ThrowWasmTrapMemOutOfBounds) \
TFS(ThrowWasmTrapUnalignedAccess) \
@@ -1262,8 +1177,8 @@ namespace internal {
TFS(ThrowWasmTrapDataSegmentDropped) \
TFS(ThrowWasmTrapElemSegmentDropped) \
TFS(ThrowWasmTrapTableOutOfBounds) \
- TFC(BigIntToWasmI64, BigIntToWasmI64, 1) \
- TFC(WasmBigIntToI64, BigIntToI64, 1) \
+ TFC(BigIntToWasmI64, BigIntToWasmI64) \
+ TFC(WasmBigIntToI64, BigIntToI64) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1351,11 +1266,11 @@ namespace internal {
TFS(SubString, kString, kFrom, kTo) \
\
/* Miscellaneous */ \
+ ASM(StackCheck, Dummy) \
ASM(DoubleToI, Dummy) \
- TFC(GetProperty, GetProperty, 1) \
+ TFC(GetProperty, GetProperty) \
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
- ASM(MathPowInternal, Dummy) \
ASM(MemCopyUint8Uint8, CCall) \
ASM(MemCopyUint16Uint8, CCall) \
ASM(MemMove, CCall) \
@@ -1365,12 +1280,11 @@ namespace internal {
CPP(Trace) \
\
/* Weak refs */ \
- CPP(WeakCellClear) \
- CPP(WeakCellHoldingsGetter) \
- CPP(WeakFactoryCleanupIteratorNext) \
- CPP(WeakFactoryCleanupSome) \
- CPP(WeakFactoryConstructor) \
- CPP(WeakFactoryMakeCell) \
+ CPP(FinalizationGroupCleanupIteratorNext) \
+ CPP(FinalizationGroupCleanupSome) \
+ CPP(FinalizationGroupConstructor) \
+ CPP(FinalizationGroupRegister) \
+ CPP(FinalizationGroupUnregister) \
CPP(WeakRefConstructor) \
CPP(WeakRefDeref)
@@ -1555,6 +1469,7 @@ namespace internal {
V(WasmMemoryGrow) \
V(WasmRecordWrite) \
V(WasmStackGuard) \
+ V(WasmStackOverflow) \
V(WasmToNumber) \
V(WasmThrow) \
V(DoubleToI) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 1163730599..81c760a26d 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -30,8 +30,7 @@ namespace internal {
};
// Define interface descriptors for builtins with StubCall linkage.
-#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor, \
- result_size) \
+#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
#define DEFINE_TFS_INTERFACE_DESCRIPTOR(Name, ...) \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 6defa92ca3..48ffc3ba0c 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index aeaa804856..00602203b3 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -70,19 +70,18 @@ void Builtins::Generate_StoreIC_Uninitialized(
StoreICUninitializedGenerator::Generate(state);
}
+// TODO(mythria): Check if we can remove feedback vector and slot parameters in
+// descriptor.
void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_Slow() {
typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot, vector,
- receiver, name);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, receiver, name);
}
TF_BUILTIN(KeyedStoreIC_Slow, HandlerBuiltinsAssembler) {
@@ -494,5 +493,50 @@ TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
vector);
}
+TF_BUILTIN(KeyedHasIC_SloppyArguments, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label miss(this);
+
+ Node* result = HasKeyedSloppyArguments(receiver, key, &miss);
+ Return(result);
+
+ BIND(&miss);
+ {
+ Comment("Miss");
+ TailCallRuntime(Runtime::kKeyedHasIC_Miss, context, receiver, key, slot,
+ vector);
+ }
+}
+
+TF_BUILTIN(HasIndexedInterceptorIC, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label if_keyispositivesmi(this), if_keyisinvalid(this);
+ Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
+ BIND(&if_keyispositivesmi);
+ TailCallRuntime(Runtime::kHasElementWithInterceptor, context, receiver, key);
+
+ BIND(&if_keyisinvalid);
+ TailCallRuntime(Runtime::kKeyedHasIC_Miss, context, receiver, key, slot,
+ vector);
+}
+
+TF_BUILTIN(HasIC_Slow, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
+
+ TailCallRuntime(Runtime::kHasProperty, context, receiver, name);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 94d75a8f32..ce944784ea 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -40,6 +40,9 @@ IC_BUILTIN(KeyedStoreICTrampoline)
IC_BUILTIN(StoreInArrayLiteralIC)
IC_BUILTIN(CloneObjectIC)
IC_BUILTIN(CloneObjectIC_Slow)
+IC_BUILTIN(KeyedHasIC)
+IC_BUILTIN(KeyedHasIC_Megamorphic)
+IC_BUILTIN(KeyedHasIC_PolymorphicName)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bae7fd6e1c..312ca63e41 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -21,11 +21,7 @@ template <typename T>
using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
-// Interrupt and stack checks.
-
-void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt);
-}
+// Stack checks.
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
masm->TailCallRuntime(Runtime::kStackGuard);
@@ -221,15 +217,16 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
return Load(MachineType::Uint8(), is_marking_addr);
}
- Node* IsPageFlagSet(Node* object, int mask) {
- Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
- Node* flags = Load(MachineType::Pointer(), page,
- IntPtrConstant(MemoryChunk::kFlagsOffset));
+ TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
+ TNode<IntPtrT> page = PageFromAddress(object);
+ TNode<IntPtrT> flags =
+ UncheckedCast<IntPtrT>(Load(MachineType::Pointer(), page,
+ IntPtrConstant(MemoryChunk::kFlagsOffset)));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
IntPtrConstant(0));
}
- Node* IsWhite(Node* object) {
+ TNode<BoolT> IsWhite(TNode<IntPtrT> object) {
DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
Node* cell;
Node* mask;
@@ -241,8 +238,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Int32Constant(0));
}
- void GetMarkBit(Node* object, Node** cell, Node** mask) {
- Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
+ void GetMarkBit(TNode<IntPtrT> object, Node** cell, Node** mask) {
+ TNode<IntPtrT> page = PageFromAddress(object);
Node* bitmap = Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
@@ -361,7 +358,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&generational_wb);
{
- Label test_old_to_new_flags(this);
+ Label test_old_to_young_flags(this);
Label store_buffer_exit(this), store_buffer_incremental_wb(this);
// When incremental marking is not on, we skip cross generation pointer
@@ -370,24 +367,26 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
// which serves as the cross generation checking.
- Node* slot = Parameter(Descriptor::kSlot);
- Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
+ TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit);
- BIND(&test_old_to_new_flags);
+ BIND(&test_old_to_young_flags);
{
- Node* value = Load(MachineType::Pointer(), slot);
+ // TODO(ishell): do a new-space range check instead.
+ TNode<IntPtrT> value =
+ BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
// TODO(albertnetymk): Try to cache the page flag for value and object,
// instead of calling IsPageFlagSet each time.
- Node* value_in_new_space =
- IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
- GotoIfNot(value_in_new_space, &incremental_wb);
-
- Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
- Node* object_in_new_space =
- IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
- Branch(object_in_new_space, &incremental_wb,
- &store_buffer_incremental_wb);
+ TNode<BoolT> value_is_young =
+ IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask);
+ GotoIfNot(value_is_young, &incremental_wb);
+
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ TNode<BoolT> object_is_young =
+ IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
+ Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
}
BIND(&store_buffer_exit);
@@ -412,8 +411,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
Label call_incremental_wb(this);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* value = Load(MachineType::Pointer(), slot);
+ TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ TNode<IntPtrT> value =
+ BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
@@ -424,7 +424,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&exit);
- Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
Branch(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
&exit, &call_incremental_wb);
@@ -436,7 +436,8 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
- Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(Parameter(Descriptor::kObject));
CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), function, object, slot, isolate_constant,
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index ca8237f0df..47054037e5 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -45,6 +45,7 @@ BUILTIN(StringPrototypeToUpperCaseIntl) {
BUILTIN(StringPrototypeNormalizeIntl) {
HandleScope handle_scope(isolate);
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringNormalize);
TO_THIS_STRING(string, "String.prototype.normalize");
Handle<Object> form_input = args.atOrUndefined(isolate, 1);
@@ -82,14 +83,19 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
Handle<Object> x;
if (args.length() >= 2) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
- Object::ToNumber(isolate, args.at(1)));
+ if (FLAG_harmony_intl_bigint) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, x, Object::ToNumeric(isolate, args.at(1)));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumber(isolate, args.at(1)));
+ }
} else {
x = isolate->factory()->nan_value();
}
- RETURN_RESULT_OR_FAILURE(isolate, JSNumberFormat::FormatToParts(
- isolate, number_format, x->Number()));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSNumberFormat::FormatToParts(isolate, number_format, x));
}
BUILTIN(DateTimeFormatPrototypeResolvedOptions) {
@@ -248,7 +254,7 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
desc.set_configurable(false);
Maybe<bool> success = JSReceiver::DefineOwnProperty(
isolate, rec, isolate->factory()->intl_fallback_symbol(), &desc,
- kThrowOnError);
+ Just(kThrowOnError));
MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
CHECK(success.FromJust());
// b. b. Return this.
@@ -400,19 +406,23 @@ BUILTIN(NumberFormatInternalFormatNumber) {
// 3. If value is not provided, let value be undefined.
Handle<Object> value = args.atOrUndefined(isolate, 1);
- // 4. Let x be ? ToNumber(value).
- Handle<Object> number_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_obj,
- Object::ToNumber(isolate, value));
+ // 4. Let x be ? ToNumeric(value).
+ Handle<Object> numeric_obj;
+ if (FLAG_harmony_intl_bigint) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumeric(isolate, value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumber(isolate, value));
+ }
- double number = number_obj->Number();
icu::NumberFormat* icu_number_format =
number_format->icu_number_format()->raw();
CHECK_NOT_NULL(icu_number_format);
- // Return FormatNumber(nf, x).
- RETURN_RESULT_OR_FAILURE(isolate, JSNumberFormat::FormatNumber(
- isolate, *icu_number_format, number));
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ JSNumberFormat::FormatNumeric(isolate, *icu_number_format, numeric_obj));
}
BUILTIN(DateTimeFormatConstructor) {
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 0c00777dd3..2c1b748d0f 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -9,6 +9,7 @@
#include "src/objects/js-weak-refs.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/promise.h"
+#include "src/objects/smi-inl.h"
namespace v8 {
namespace internal {
@@ -33,8 +34,11 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> CalculateRingBufferOffset(TNode<IntPtrT> capacity,
TNode<IntPtrT> start,
TNode<IntPtrT> index);
+
+ void PrepareForContext(TNode<Context> microtask_context, Label* bailout);
void RunSingleMicrotask(TNode<Context> current_context,
TNode<Microtask> microtask);
+ void IncrementFinishedMicrotaskCount(TNode<RawPtrT> microtask_queue);
TNode<Context> GetCurrentContext();
void SetCurrentContext(TNode<Context> context);
@@ -100,6 +104,18 @@ TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::CalculateRingBufferOffset(
WordAnd(IntPtrAdd(start, index), IntPtrSub(capacity, IntPtrConstant(1))));
}
+void MicrotaskQueueBuiltinsAssembler::PrepareForContext(
+ TNode<Context> native_context, Label* bailout) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ // Skip the microtask execution if the associated context is shutdown.
+ GotoIf(WordEqual(GetMicrotaskQueue(native_context), IntPtrConstant(0)),
+ bailout);
+
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+}
+
void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> current_context, TNode<Microtask> microtask) {
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
@@ -114,7 +130,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
Label is_callable(this), is_callback(this),
is_promise_fulfill_reaction_job(this),
is_promise_reject_reaction_job(this),
- is_promise_resolve_thenable_job(this), is_weak_factory_cleanup_job(this),
+ is_promise_resolve_thenable_job(this),
+ is_finalization_group_cleanup_job(this),
is_unreachable(this, Label::kDeferred), done(this);
int32_t case_values[] = {CALLABLE_TASK_TYPE,
@@ -122,13 +139,13 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
- WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE};
+ FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE};
Label* case_labels[] = {&is_callable,
&is_callback,
&is_promise_fulfill_reaction_job,
&is_promise_reject_reaction_job,
&is_promise_resolve_thenable_job,
- &is_weak_factory_cleanup_job};
+ &is_finalization_group_cleanup_job};
static_assert(arraysize(case_values) == arraysize(case_labels), "");
Switch(microtask_type, &is_unreachable, case_values, case_labels,
arraysize(case_labels));
@@ -139,10 +156,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> microtask_context =
LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
TNode<Context> native_context = LoadNativeContext(microtask_context);
-
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(native_context);
- SetCurrentContext(native_context);
+ PrepareForContext(native_context, &done);
TNode<JSReceiver> callable =
LoadObjectField<JSReceiver>(microtask, CallableTask::kCallableOffset);
@@ -185,9 +199,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> microtask_context = LoadObjectField<Context>(
microtask, PromiseResolveThenableJobTask::kContextOffset);
TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(native_context);
- SetCurrentContext(native_context);
+ PrepareForContext(native_context, &done);
Node* const promise_to_resolve = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
@@ -211,9 +223,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> microtask_context = LoadObjectField<Context>(
microtask, PromiseReactionJobTask::kContextOffset);
TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(native_context);
- SetCurrentContext(native_context);
+ PrepareForContext(native_context, &done);
Node* const argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
@@ -246,9 +256,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> microtask_context = LoadObjectField<Context>(
microtask, PromiseReactionJobTask::kContextOffset);
TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(native_context);
- SetCurrentContext(native_context);
+ PrepareForContext(native_context, &done);
Node* const argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
@@ -275,19 +283,19 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
Goto(&done);
}
- BIND(&is_weak_factory_cleanup_job);
+ BIND(&is_finalization_group_cleanup_job);
{
- // Enter the context of the {weak_factory}.
- TNode<JSWeakFactory> weak_factory = LoadObjectField<JSWeakFactory>(
- microtask, WeakFactoryCleanupJobTask::kFactoryOffset);
+ // Enter the context of the {finalization_group}.
+ TNode<JSFinalizationGroup> finalization_group =
+ LoadObjectField<JSFinalizationGroup>(
+ microtask,
+ FinalizationGroupCleanupJobTask::kFinalizationGroupOffset);
TNode<Context> native_context = LoadObjectField<Context>(
- weak_factory, JSWeakFactory::kNativeContextOffset);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(native_context);
- SetCurrentContext(native_context);
+ finalization_group, JSFinalizationGroup::kNativeContextOffset);
+ PrepareForContext(native_context, &done);
- Node* const result = CallRuntime(Runtime::kWeakFactoryCleanupJob,
- native_context, weak_factory);
+ Node* const result = CallRuntime(Runtime::kFinalizationGroupCleanupJob,
+ native_context, finalization_group);
GotoIfException(result, &if_exception, &var_exception);
RewindEnteredContext(saved_entered_context_count);
@@ -311,17 +319,26 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&done);
}
+void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount(
+ TNode<RawPtrT> microtask_queue) {
+ TNode<IntPtrT> count = UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset)));
+ TNode<IntPtrT> new_count = IntPtrAdd(count, IntPtrConstant(1));
+ StoreNoWriteBarrier(
+ MachineType::PointerRepresentation(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset), new_count);
+}
+
TNode<Context> MicrotaskQueueBuiltinsAssembler::GetCurrentContext() {
auto ref = ExternalReference::Create(kContextAddress, isolate());
- return TNode<Context>::UncheckedCast(
- Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+ return TNode<Context>::UncheckedCast(LoadFullTagged(ExternalConstant(ref)));
}
void MicrotaskQueueBuiltinsAssembler::SetCurrentContext(
TNode<Context> context) {
auto ref = ExternalReference::Create(kContextAddress, isolate());
- StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
- context);
+ StoreFullTaggedNoWriteBarrier(ExternalConstant(ref), context);
}
TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() {
@@ -365,23 +382,22 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
ContextStack::kDataOffset);
Node* data = Load(MachineType::Pointer(), hsi, data_offset);
- StoreNoWriteBarrier(MachineType::Pointer().representation(), data,
- TimesSystemPointerSize(size),
- BitcastTaggedToWord(native_context));
+ StoreFullTaggedNoWriteBarrier(data, TimesSystemPointerSize(size),
+ native_context);
TNode<IntPtrT> new_size = IntPtrAdd(size, IntPtrConstant(1));
- StoreNoWriteBarrier(MachineType::IntPtr().representation(), hsi,
- size_offset, new_size);
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi, size_offset,
+ new_size);
using FlagStack = DetachableVector<int8_t>;
TNode<IntPtrT> flag_data_offset =
IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
FlagStack::kDataOffset);
Node* flag_data = Load(MachineType::Pointer(), hsi, flag_data_offset);
- StoreNoWriteBarrier(MachineType::Int8().representation(), flag_data, size,
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size,
BoolConstant(true));
StoreNoWriteBarrier(
- MachineType::IntPtr().representation(), hsi,
+ MachineType::PointerRepresentation(), hsi,
IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
FlagStack::kSizeOffset),
new_size);
@@ -419,12 +435,12 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
CSA_ASSERT(this, IntPtrLessThanOrEqual(saved_entered_context_count, size));
#endif
- StoreNoWriteBarrier(MachineType::IntPtr().representation(), hsi, size_offset,
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi, size_offset,
saved_entered_context_count);
using FlagStack = DetachableVector<int8_t>;
StoreNoWriteBarrier(
- MachineType::IntPtr().representation(), hsi,
+ MachineType::PointerRepresentation(), hsi,
IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
FlagStack::kSizeOffset),
saved_entered_context_count);
@@ -461,6 +477,11 @@ TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
TNode<Context> native_context = LoadNativeContext(context);
TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
+ // Do not store the microtask if MicrotaskQueue is not available, that may
+ // happen when the context shutdown.
+ Label if_shutdown(this, Label::kDeferred);
+ GotoIf(WordEqual(microtask_queue, IntPtrConstant(0)), &if_shutdown);
+
TNode<RawPtrT> ring_buffer = GetMicrotaskRingBuffer(microtask_queue);
TNode<IntPtrT> capacity = GetMicrotaskQueueCapacity(microtask_queue);
TNode<IntPtrT> size = GetMicrotaskQueueSize(microtask_queue);
@@ -493,6 +514,9 @@ TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
isolate_constant, microtask_queue, microtask);
Return(UndefinedConstant());
}
+
+ Bind(&if_shutdown);
+ Return(UndefinedConstant());
}
TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
@@ -531,6 +555,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
SetMicrotaskQueueStart(microtask_queue, new_start);
RunSingleMicrotask(current_context, microtask);
+ IncrementFinishedMicrotaskCount(microtask_queue);
Goto(&loop);
BIND(&done);
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index d15c41105d..d1c13307e8 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -225,7 +225,7 @@ BUILTIN(NumberPrototypeToString) {
// Fast case where the result is a one character string.
if ((IsUint32Double(value_number) && value_number < radix_number) ||
- value_number == -0.0) {
+ IsMinusZero(value_number)) {
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return *isolate->factory()->LookupSingleCharacterStringFromCode(
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index a6fa78504b..da265356fd 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -390,9 +390,7 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
CSA_ASSERT(this, IsJSArrayMap(array_map));
GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
- Node* array = AllocateUninitializedJSArrayWithoutElements(
- array_map, SmiTag(size), nullptr);
- StoreObjectField(array, JSArray::kElementsOffset, result);
+ Node* array = AllocateJSArray(array_map, result, SmiTag(size));
return TNode<JSArray>::UncheckedCast(array);
}
@@ -556,14 +554,14 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
GotoIfNot(IsJSObjectInstanceType(from_instance_type), slow);
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(from))), slow);
- ForEachEnumerableOwnProperty(context, from_map, CAST(from),
- [=](TNode<Name> key, TNode<Object> value) {
- KeyedStoreGenericGenerator::SetProperty(
- state(), context, to,
- to_is_simple_receiver, key, value,
- LanguageMode::kStrict);
- },
- slow);
+ ForEachEnumerableOwnProperty(
+ context, from_map, CAST(from), kEnumerationOrder,
+ [=](TNode<Name> key, TNode<Object> value) {
+ KeyedStoreGenericGenerator::SetProperty(state(), context, to,
+ to_is_simple_receiver, key,
+ value, LanguageMode::kStrict);
+ },
+ slow);
Goto(&done);
BIND(&done);
@@ -647,10 +645,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- TNode<JSArray> array = AllocateUninitializedJSArrayWithoutElements(
- array_map, CAST(var_length.value()), nullptr);
- StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
- var_elements.value());
+ TNode<JSArray> array = AllocateJSArray(
+ array_map, CAST(var_elements.value()), CAST(var_length.value()));
Return(array);
}
}
@@ -751,10 +747,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
Node* native_context = LoadNativeContext(context);
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- TNode<JSArray> array = AllocateUninitializedJSArrayWithoutElements(
- array_map, CAST(var_length.value()), nullptr);
- StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
- var_elements.value());
+ TNode<JSArray> array = AllocateJSArray(
+ array_map, CAST(var_elements.value()), CAST(var_length.value()));
Return(array);
}
}
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 75f680844b..6aa20e07a4 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/keys.h"
#include "src/lookup.h"
#include "src/message-template.h"
@@ -90,8 +91,8 @@ Object ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
// 5. Perform ? DefinePropertyOrThrow(O, key, desc).
// To preserve legacy behavior, we ignore errors silently rather than
// throwing an exception.
- Maybe<bool> success = JSReceiver::DefineOwnProperty(isolate, receiver, name,
- &desc, kThrowOnError);
+ Maybe<bool> success = JSReceiver::DefineOwnProperty(
+ isolate, receiver, name, &desc, Just(kThrowOnError));
MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
if (!success.FromJust()) {
isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
@@ -395,7 +396,7 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
Handle<Object> from_descriptor = descriptor.ToObject(isolate);
Maybe<bool> success = JSReceiver::CreateDataProperty(
- isolate, descriptors, key, from_descriptor, kDontThrow);
+ isolate, descriptors, key, from_descriptor, Just(kDontThrow));
CHECK(success.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 39d81ce9dd..22ffc2a851 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -18,7 +18,9 @@
namespace v8 {
namespace internal {
-using compiler::Node;
+typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
using IteratorRecord = IteratorBuiltinsAssembler::IteratorRecord;
Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
@@ -112,6 +114,66 @@ PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
return std::make_pair(resolve, reject);
}
+void PromiseBuiltinsAssembler::ExtractHandlerContext(Node* handler,
+ Variable* var_context) {
+ VARIABLE(var_handler, MachineRepresentation::kTagged, handler);
+ Label loop(this, &var_handler), done(this, Label::kDeferred);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_function(this), if_bound_function(this, Label::kDeferred),
+ if_proxy(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(var_handler.value()), &done);
+
+ int32_t case_values[] = {
+ JS_FUNCTION_TYPE,
+ JS_BOUND_FUNCTION_TYPE,
+ JS_PROXY_TYPE,
+ };
+ Label* case_labels[] = {
+ &if_function,
+ &if_bound_function,
+ &if_proxy,
+ };
+ static_assert(arraysize(case_values) == arraysize(case_labels), "");
+ TNode<Map> handler_map = LoadMap(var_handler.value());
+ TNode<Int32T> handler_type = LoadMapInstanceType(handler_map);
+ Switch(handler_type, &done, case_values, case_labels,
+ arraysize(case_labels));
+
+ BIND(&if_bound_function);
+ {
+ // Use the target function's context for JSBoundFunction.
+ var_handler.Bind(LoadObjectField(
+ var_handler.value(), JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop);
+ }
+
+ BIND(&if_proxy);
+ {
+ // Use the target function's context for JSProxy.
+ // If the proxy is revoked, |var_handler| will be undefined and this
+ // function will return with unchanged |var_context|.
+ var_handler.Bind(
+ LoadObjectField(var_handler.value(), JSProxy::kTargetOffset));
+ Goto(&loop);
+ }
+
+ BIND(&if_function);
+ {
+ // Use the function's context.
+ Node* handler_context =
+ LoadObjectField(var_handler.value(), JSFunction::kContextOffset);
+ var_context->Bind(LoadNativeContext(CAST(handler_context)));
+ Goto(&done);
+ }
+ }
+
+ // If no valid context is available, |var_context| is unchanged and the caller
+ // will use a fallback context.
+ BIND(&done);
+}
+
// ES #sec-newpromisecapability
TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
@@ -378,13 +440,19 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
}
BIND(&enqueue);
- Node* argument =
- LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
- Node* microtask = AllocatePromiseReactionJobTask(
- var_map.value(), context, argument, var_handler.value(),
- result_promise_or_capability);
- CallBuiltin(Builtins::kEnqueueMicrotask, context, microtask);
- Goto(&done);
+ {
+ VARIABLE(var_handler_context, MachineRepresentation::kTagged, context);
+ ExtractHandlerContext(var_handler.value(), &var_handler_context);
+
+ Node* argument =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* microtask = AllocatePromiseReactionJobTask(
+ var_map.value(), var_handler_context.value(), argument,
+ var_handler.value(), result_promise_or_capability);
+ CallBuiltin(Builtins::kEnqueueMicrotask, var_handler_context.value(),
+ microtask);
+ Goto(&done);
+ }
}
BIND(&done);
@@ -479,12 +547,21 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
VARIABLE(var_reversed, MachineRepresentation::kTagged,
SmiConstant(Smi::zero()));
+ // As an additional safety net against misuse of the V8 Extras API, we
+ // sanity check the {reactions} to make sure that they are actually
+ // PromiseReaction instances and not actual JavaScript values (which
+ // would indicate that we're rejecting or resolving an already settled
+ // promise), see https://crbug.com/931640 for details on this.
+ TNode<Map> promise_reaction_map =
+ CAST(LoadRoot(RootIndex::kPromiseReactionMap));
+
Label loop(this, {&var_current, &var_reversed}), done_loop(this);
Goto(&loop);
BIND(&loop);
{
Node* current = var_current.value();
GotoIf(TaggedIsSmi(current), &done_loop);
+ CSA_CHECK(this, WordEqual(LoadMap(CAST(current)), promise_reaction_map));
var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
StoreObjectField(current, PromiseReaction::kNextOffset,
var_reversed.value());
@@ -508,18 +585,23 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
GotoIf(TaggedIsSmi(current), &done_loop);
var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+ VARIABLE(var_context, MachineRepresentation::kTagged, context);
+
// Morph {current} from a PromiseReaction into a PromiseReactionJobTask
// and schedule that on the microtask queue. We try to minimize the number
// of stores here to avoid screwing up the store buffer.
STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
static_cast<int>(PromiseReactionJobTask::kSize));
if (type == PromiseReaction::kFulfill) {
+ Node* handler =
+ LoadObjectField(current, PromiseReaction::kFulfillHandlerOffset);
+ ExtractHandlerContext(handler, &var_context);
StoreMapNoWriteBarrier(current,
RootIndex::kPromiseFulfillReactionJobTaskMap);
StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
argument);
StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
- context);
+ var_context.value());
STATIC_ASSERT(
static_cast<int>(PromiseReaction::kFulfillHandlerOffset) ==
static_cast<int>(PromiseReactionJobTask::kHandlerOffset));
@@ -530,12 +612,13 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
} else {
Node* handler =
LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
+ ExtractHandlerContext(handler, &var_context);
StoreMapNoWriteBarrier(current,
RootIndex::kPromiseRejectReactionJobTaskMap);
StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
argument);
StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
- context);
+ var_context.value());
StoreObjectField(current, PromiseReactionJobTask::kHandlerOffset,
handler);
STATIC_ASSERT(
@@ -543,7 +626,7 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
static_cast<int>(
PromiseReactionJobTask::kPromiseOrCapabilityOffset));
}
- CallBuiltin(Builtins::kEnqueueMicrotask, context, current);
+ CallBuiltin(Builtins::kEnqueueMicrotask, var_context.value(), current);
Goto(&loop);
}
BIND(&done_loop);
@@ -964,9 +1047,19 @@ TF_BUILTIN(PromiseInternalReject, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
Node* const reason = Parameter(Descriptor::kReason);
Node* const context = Parameter(Descriptor::kContext);
+
+ // Main V8 Extras invariant that {promise} is still "pending" at
+ // this point, aka that {promise} is not resolved multiple times.
+ Label if_promise_is_settled(this, Label::kDeferred);
+ GotoIfNot(IsPromiseStatus(PromiseStatus(promise), v8::Promise::kPending),
+ &if_promise_is_settled);
+
// We pass true to trigger the debugger's on exception handler.
Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
TrueConstant()));
+
+ BIND(&if_promise_is_settled);
+ Abort(AbortReason::kPromiseAlreadySettled);
}
// V8 Extras: v8.resolvePromise(promise, resolution)
@@ -974,7 +1067,17 @@ TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
+
+ // Main V8 Extras invariant that {promise} is still "pending" at
+ // this point, aka that {promise} is not resolved multiple times.
+ Label if_promise_is_settled(this, Label::kDeferred);
+ GotoIfNot(IsPromiseStatus(PromiseStatus(promise), v8::Promise::kPending),
+ &if_promise_is_settled);
+
Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+
+ BIND(&if_promise_is_settled);
+ Abort(AbortReason::kPromiseAlreadySettled);
}
// ES#sec-promise.prototype.then
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 8edc2331a5..6bb38ee4eb 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -47,8 +47,9 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* then, Node* thenable,
Node* context);
- std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
- Node* promise, Node* native_context, Node* promise_context);
+ std::pair<Node*, Node*> CreatePromiseResolvingFunctions(Node* promise,
+ Node* debug_event,
+ Node* native_context);
Node* PromiseHasHandler(Node* promise);
@@ -67,8 +68,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
Node* native_context);
- Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
- Node* promise_capability);
+ Node* CreatePromiseGetCapabilitiesExecutorContext(Node* promise_capability,
+ Node* native_context);
protected:
void PromiseInit(Node* promise);
@@ -153,6 +154,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
Node* AllocateJSPromise(Node* context);
+
+ void ExtractHandlerContext(Node* handler, Variable* var_context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 0f18d8fb45..f742252050 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 02b4d0b71e..c5b1037dcb 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -116,9 +116,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
TNode<Map> array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
TNode<JSArray> array =
- AllocateUninitializedJSArrayWithoutElements(array_map, length);
- StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset,
- elements.value());
+ AllocateJSArray(array_map, CAST(elements.value()), length);
return array;
}
@@ -547,7 +545,6 @@ TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* receiver = Parameter(Descriptor::kReceiverValue);
- TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
CSA_ASSERT(this, IsJSProxy(proxy));
@@ -598,13 +595,10 @@ TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
BIND(&failure);
{
- Label if_throw(this, Label::kDeferred);
- Branch(SmiEqual(language_mode, SmiConstant(LanguageMode::kStrict)),
- &if_throw, &success);
-
- BIND(&if_throw);
- ThrowTypeError(context, MessageTemplate::kProxyTrapReturnedFalsishFor,
- HeapConstant(set_string), name);
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, context,
+ SmiConstant(MessageTemplate::kProxyTrapReturnedFalsishFor),
+ HeapConstant(set_string), name);
+ Goto(&success);
}
// 12. Return true.
@@ -613,23 +607,18 @@ TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
BIND(&private_symbol);
{
- Label failure(this), throw_error(this, Label::kDeferred);
-
- Branch(SmiEqual(language_mode, SmiConstant(LanguageMode::kStrict)),
- &throw_error, &failure);
+ Label failure(this);
- BIND(&failure);
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, context,
+ SmiConstant(MessageTemplate::kProxyPrivate));
Return(UndefinedConstant());
-
- BIND(&throw_error);
- ThrowTypeError(context, MessageTemplate::kProxyPrivate);
}
BIND(&trap_undefined);
{
// 7.a. Return ? target.[[Set]](P, V, Receiver).
CallRuntime(Runtime::kSetPropertyWithReceiver, context, target, name, value,
- receiver, language_mode);
+ receiver);
Return(value);
}
@@ -647,6 +636,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
Label if_found_value(this), check_in_runtime(this, Label::kDeferred);
+ GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
@@ -754,6 +744,7 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
throw_non_extensible(this, Label::kDeferred);
// 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ GotoIfNot(IsUniqueNameNoIndex(CAST(name)), if_bailout);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, target_map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index cd3f2b4bed..a81d5173a0 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -41,7 +41,7 @@ BUILTIN(ReflectDefineProperty) {
}
Maybe<bool> result = JSReceiver::DefineOwnProperty(
- isolate, Handle<JSReceiver>::cast(target), name, &desc, kDontThrow);
+ isolate, Handle<JSReceiver>::cast(target), name, &desc, Just(kDontThrow));
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -218,7 +218,7 @@ BUILTIN(ReflectSet) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, name, Handle<JSReceiver>::cast(target));
Maybe<bool> result = Object::SetSuperProperty(
- &it, value, LanguageMode::kSloppy, StoreOrigin::kMaybeKeyed);
+ &it, value, StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kDontThrow));
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 08b8e0457c..00b1f1b921 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -163,12 +163,12 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<RegExpMatchInfo> match_info, TNode<String> string) {
Label named_captures(this), out(this);
- TNode<IntPtrT> num_indices = SmiUntag(CAST(LoadFixedArrayElement(
+ TNode<IntPtrT> num_indices = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
match_info, RegExpMatchInfo::kNumberOfCapturesIndex)));
TNode<Smi> num_results = SmiTag(WordShr(num_indices, 1));
- TNode<Smi> start = CAST(
- LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex));
- TNode<Smi> end = CAST(LoadFixedArrayElement(
+ TNode<Smi> start = CAST(UnsafeLoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kFirstCaptureIndex));
+ TNode<Smi> end = CAST(UnsafeLoadFixedArrayElement(
match_info, RegExpMatchInfo::kFirstCaptureIndex + 1));
// Calculate the substring of the first match before creating the result array
@@ -181,7 +181,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
AllocateRegExpResult(context, num_results, start, string);
TNode<FixedArray> result_elements = CAST(LoadElements(result));
- StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
// If no captures exist we can skip named capture handling as well.
GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
@@ -202,18 +202,20 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
{
TNode<IntPtrT> from_cursor = var_from_cursor.value();
TNode<IntPtrT> to_cursor = var_to_cursor.value();
- TNode<Smi> start = CAST(LoadFixedArrayElement(match_info, from_cursor));
+ TNode<Smi> start =
+ CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor));
Label next_iter(this);
GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
TNode<IntPtrT> from_cursor_plus1 =
IntPtrAdd(from_cursor, IntPtrConstant(1));
- TNode<Smi> end = CAST(LoadFixedArrayElement(match_info, from_cursor_plus1));
+ TNode<Smi> end =
+ CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor_plus1));
TNode<String> capture =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
- StoreFixedArrayElement(result_elements, to_cursor, capture);
+ UnsafeStoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
BIND(&next_iter);
@@ -247,26 +249,26 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
GotoIf(WordEqual(maybe_names, SmiZero()), &out);
+ // One or more named captures exist, add a property for each one.
+
+ TNode<FixedArray> names = CAST(maybe_names);
+ TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
+ CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
+
// Allocate a new object to store the named capture properties.
// TODO(jgruber): Could be optimized by adding the object map to the heap
// root list.
+ TNode<IntPtrT> num_properties = WordSar(names_length, 1);
TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(LoadContextElement(
native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- TNode<NameDictionary> properties =
- AllocateNameDictionary(NameDictionary::kInitialCapacity);
+ TNode<NameDictionary> properties = AllocateNameDictionary(num_properties);
TNode<JSObject> group_object =
CAST(AllocateJSObjectFromMap(map, properties));
StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
- // One or more named captures exist, add a property for each one.
-
- TNode<FixedArray> names = CAST(maybe_names);
- TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
- CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
-
TVARIABLE(IntPtrT, var_i, IntPtrZero());
Variable* vars[] = {&var_i};
@@ -285,15 +287,32 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<HeapObject> capture =
CAST(LoadFixedArrayElement(result_elements, SmiUntag(index)));
- // TODO(jgruber): Calling into runtime to create each property is slow.
- // Either we should create properties entirely in CSA (should be doable),
- // or only call runtime once and loop there.
- CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
- capture);
+ // TODO(v8:8213): For maintainability, we should call a CSA/Torque
+ // implementation of CreateDataProperty instead.
+
+ // At this point the spec says to call CreateDataProperty. However, we can
+ // skip most of the steps and go straight to adding a dictionary entry
+ // because we know a bunch of useful facts:
+ // - All keys are non-numeric internalized strings
+ // - No keys repeat
+ // - Receiver has no prototype
+ // - Receiver isn't used as a prototype
+ // - Receiver isn't any special object like a Promise intrinsic object
+ // - Receiver is extensible
+ // - Receiver has no interceptors
+ Label add_dictionary_property_slow(this, Label::kDeferred);
+ Add<NameDictionary>(properties, name, capture,
+ &add_dictionary_property_slow);
var_i = i_plus_2;
Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &out,
&loop);
+
+ BIND(&add_dictionary_property_slow);
+ // If the dictionary needs resizing, the above Add call will jump here
+ // before making any changes. This shouldn't happen because we allocated
+ // the dictionary with enough space above.
+ Unreachable();
}
}
@@ -324,13 +343,6 @@ void RegExpBuiltinsAssembler::GetStringPointers(
TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
TNode<Number> last_index, TNode<RegExpMatchInfo> match_info) {
-// Just jump directly to runtime if native RegExp is not selected at compile
-// time or if regexp entry in generated code is turned off runtime switch or
-// at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- return CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
- last_index, match_info));
-#else // V8_INTERPRETED_REGEXP
ToDirectStringAssembler to_direct(state(), string);
TVARIABLE(HeapObject, var_result);
@@ -388,8 +400,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures <= offsets vector size / 2 - 1
- TNode<Smi> capture_count =
- CAST(LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureCountIndex));
+ TNode<Smi> capture_count = CAST(UnsafeLoadFixedArrayElement(
+ data, JSRegExp::kIrregexpCaptureCountIndex));
const int kOffsetsSize = Isolate::kJSRegexpStaticOffsetsVectorSize;
STATIC_ASSERT(kOffsetsSize >= 2);
@@ -429,7 +441,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
int_string_length, String::ONE_BYTE_ENCODING,
&var_string_start, &var_string_end);
var_code =
- LoadFixedArrayElement(data, JSRegExp::kIrregexpLatin1CodeIndex);
+ UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpLatin1CodeIndex);
Goto(&next);
}
@@ -438,7 +450,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
GetStringPointers(direct_string_data, to_direct.offset(), int_last_index,
int_string_length, String::TWO_BYTE_ENCODING,
&var_string_start, &var_string_end);
- var_code = LoadFixedArrayElement(data, JSRegExp::kIrregexpUC16CodeIndex);
+ var_code =
+ UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpUC16CodeIndex);
Goto(&next);
}
@@ -460,6 +473,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
#endif
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
+ GotoIfNot(IsCode(CAST(var_code.value())), &runtime);
TNode<Code> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
@@ -557,21 +571,21 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<Smi> available_slots =
SmiSub(LoadFixedArrayBaseLength(match_info),
SmiConstant(RegExpMatchInfo::kLastMatchOverhead));
- TNode<Smi> capture_count =
- CAST(LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureCountIndex));
+ TNode<Smi> capture_count = CAST(UnsafeLoadFixedArrayElement(
+ data, JSRegExp::kIrregexpCaptureCountIndex));
// Calculate number of register_count = (capture_count + 1) * 2.
TNode<Smi> register_count =
SmiShl(SmiAdd(capture_count, SmiConstant(1)), 1);
GotoIf(SmiGreaterThan(register_count, available_slots), &runtime);
// Fill match_info.
-
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
- register_count, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
- string);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
- string);
+ UnsafeStoreFixedArrayElement(match_info,
+ RegExpMatchInfo::kNumberOfCapturesIndex,
+ register_count, SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
+ string);
+ UnsafeStoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
+ string);
// Fill match and capture offsets in match_info.
{
@@ -639,7 +653,6 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
BIND(&out);
return var_result.value();
-#endif // V8_INTERPRETED_REGEXP
}
// ES#sec-regexp.prototype.exec
@@ -755,7 +768,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
GotoIfNot(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- TNode<Number> new_lastindex = CAST(LoadFixedArrayElement(
+ TNode<Number> new_lastindex = CAST(UnsafeLoadFixedArrayElement(
CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1));
StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
@@ -1015,9 +1028,10 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
TNode<FixedArray> data = CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
- CSA_ASSERT(this,
- SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
- SmiConstant(JSRegExp::ATOM)));
+ CSA_ASSERT(
+ this,
+ SmiEqual(CAST(UnsafeLoadFixedArrayElement(data, JSRegExp::kTagIndex)),
+ SmiConstant(JSRegExp::ATOM)));
// Callers ensure that last_index is in-bounds.
CSA_ASSERT(this,
@@ -1025,7 +1039,7 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
LoadStringLengthAsWord(subject_string)));
Node* const needle_string =
- LoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
+ UnsafeLoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
CSA_ASSERT(this, IsString(needle_string));
TNode<Smi> const match_from =
@@ -1047,16 +1061,19 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
TNode<Smi> const match_to =
SmiAdd(match_from, LoadStringLengthAsSmi(needle_string));
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
- SmiConstant(kNumRegisters), SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
- subject_string);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
- subject_string);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex,
- match_from, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex + 1,
- match_to, SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(
+ match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
+ SmiConstant(kNumRegisters), SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
+ subject_string);
+ UnsafeStoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
+ subject_string);
+ UnsafeStoreFixedArrayElement(match_info,
+ RegExpMatchInfo::kFirstCaptureIndex,
+ match_from, SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(match_info,
+ RegExpMatchInfo::kFirstCaptureIndex + 1,
+ match_to, SKIP_WRITE_BARRIER);
Return(match_info);
}
@@ -1908,9 +1925,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
string, &if_didnotmatch, true);
- Node* const match_from = LoadFixedArrayElement(
+ Node* const match_from = UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex);
- Node* const match_to = LoadFixedArrayElement(
+ Node* const match_to = UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
@@ -2386,7 +2403,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
length, allocation_site, mode);
TNode<FixedArray> fixed_array = CAST(LoadElements(result));
- StoreFixedArrayElement(fixed_array, 0, string);
+ UnsafeStoreFixedArrayElement(fixed_array, 0, string);
Return(result);
}
@@ -2439,7 +2456,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
}
TNode<FixedArray> match_indices = CAST(match_indices_ho);
- TNode<Smi> const match_from = CAST(LoadFixedArrayElement(
+ TNode<Smi> const match_from = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex));
// We're done if the match starts beyond the string.
@@ -2449,7 +2466,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&next);
}
- TNode<Smi> const match_to = CAST(LoadFixedArrayElement(
+ TNode<Smi> const match_to = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
// Advance index and continue if the match is empty.
@@ -2810,42 +2827,43 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Node* const to = SmiUntag(res_length);
const int increment = 1;
- BuildFastLoop(from, to,
- [this, res_elems, isolate, native_context, context, undefined,
- replace_callable](Node* index) {
- Node* const elem = LoadFixedArrayElement(res_elems, index);
+ BuildFastLoop(
+ from, to,
+ [this, res_elems, isolate, native_context, context, undefined,
+ replace_callable](Node* index) {
+ Node* const elem = LoadFixedArrayElement(res_elems, index);
- Label do_continue(this);
- GotoIf(TaggedIsSmi(elem), &do_continue);
+ Label do_continue(this);
+ GotoIf(TaggedIsSmi(elem), &do_continue);
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp
- // properties.
+ // elem must be an Array.
+ // Use the apply argument as backing for global RegExp
+ // properties.
- CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
+ CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
- // TODO(jgruber): Remove indirection through
- // Call->ReflectApply.
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const reflect_apply = LoadContextElement(
- native_context, Context::REFLECT_APPLY_INDEX);
+ // TODO(jgruber): Remove indirection through
+ // Call->ReflectApply.
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const reflect_apply =
+ LoadContextElement(native_context, Context::REFLECT_APPLY_INDEX);
- Node* const replacement_obj =
- CallJS(call_callable, context, reflect_apply, undefined,
- replace_callable, undefined, elem);
+ Node* const replacement_obj =
+ CallJS(call_callable, context, reflect_apply, undefined,
+ replace_callable, undefined, elem);
- // Overwrite the i'th element in the results with the string
- // we got back from the callback function.
+ // Overwrite the i'th element in the results with the string
+ // we got back from the callback function.
- TNode<String> const replacement_str =
- ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, index, replacement_str);
+ TNode<String> const replacement_str =
+ ToString_Inline(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, index, replacement_str);
- Goto(&do_continue);
- BIND(&do_continue);
- },
- increment, CodeStubAssembler::INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPost);
+ Goto(&do_continue);
+ BIND(&do_continue);
+ },
+ increment, CodeStubAssembler::INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
Goto(&create_result);
}
@@ -2898,9 +2916,9 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// Successful match.
{
- TNode<Smi> const match_start = CAST(LoadFixedArrayElement(
+ TNode<Smi> const match_start = CAST(UnsafeLoadFixedArrayElement(
var_match_indices, RegExpMatchInfo::kFirstCaptureIndex));
- TNode<Smi> const match_end = CAST(LoadFixedArrayElement(
+ TNode<Smi> const match_end = CAST(UnsafeLoadFixedArrayElement(
var_match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
TNode<Smi> const replace_length = LoadStringLengthAsSmi(replace_string);
@@ -3073,30 +3091,6 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
string, replace_value));
}
-// Simple string matching functionality for internal use which does not modify
-// the last match info.
-TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- TNode<Context> native_context = LoadNativeContext(context);
- TNode<RegExpMatchInfo> internal_match_info = CAST(LoadContextElement(
- native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX));
- TNode<HeapObject> maybe_match_indices =
- CAST(CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
- SmiZero(), internal_match_info));
- TNode<Oddball> null = NullConstant();
- Label if_matched(this);
- GotoIfNot(WordEqual(maybe_match_indices, null), &if_matched);
- Return(null);
-
- BIND(&if_matched);
- TNode<RegExpMatchInfo> match_indices = CAST(maybe_match_indices);
- Return(
- ConstructNewResultFromMatchInfo(context, regexp, match_indices, string));
-}
-
class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
public:
explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state)
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 1c2898374e..8f96ba38a6 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -30,6 +30,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map,
TNode<Object> regexp_string, TNode<String> flags);
+ TNode<BoolT> IsRegExp(TNode<Context> context, TNode<Object> maybe_receiver);
+
protected:
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
@@ -112,8 +114,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
void FlagGetter(Node* context, Node* receiver, JSRegExp::Flag flag,
int counter, const char* method_name);
- TNode<BoolT> IsRegExp(TNode<Context> context, TNode<Object> maybe_receiver);
-
Node* RegExpInitialize(Node* const context, Node* const regexp,
Node* const maybe_pattern, Node* const maybe_flags);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 0918f1e27b..96c5558536 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -22,7 +22,7 @@ namespace internal {
// See builtins-arraybuffer.cc for implementations of
// SharedArrayBuffer.prototye.byteLength and SharedArrayBuffer.prototype.slice
-inline bool AtomicIsLockFree(uint32_t size) {
+inline bool AtomicIsLockFree(double size) {
return size == 1 || size == 2 || size == 4;
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 085ffcfafa..e2db979452 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -9,6 +9,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/heap/factory-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects.h"
#include "src/objects/property-cell.h"
@@ -1803,9 +1804,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
- result_array =
- AllocateUninitializedJSArrayWithoutElements(array_map, length_smi);
- StoreObjectField(result_array.value(), JSObject::kElementsOffset, elements);
+ result_array = AllocateJSArray(array_map, elements, length_smi);
Goto(&done);
BIND(&fill_thehole_and_call_runtime);
@@ -2573,8 +2572,8 @@ class StringHtmlAssembler : public StringBuiltinsAssembler {
const char* method_name, const char* tag_name,
const char* attr, Node* const value) {
Node* const string = ToThisString(context, receiver, method_name);
- Node* const value_string =
- EscapeQuotes(context, ToString_Inline(context, value));
+ TNode<String> value_string =
+ EscapeQuotes(CAST(context), ToString_Inline(context, value));
std::string open_tag_attr =
"<" + std::string(tag_name) + " " + std::string(attr) + "=\"";
std::string close_tag = "</" + std::string(tag_name) + ">";
@@ -2594,20 +2593,8 @@ class StringHtmlAssembler : public StringBuiltinsAssembler {
return var_result.value();
}
- Node* EscapeQuotes(Node* const context, Node* const string) {
- CSA_ASSERT(this, IsString(string));
- Node* const regexp_function = LoadContextElement(
- LoadNativeContext(context), Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map = LoadObjectField(
- regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
- // TODO(pwong): Refactor to not allocate RegExp
- Node* const regexp =
- CallRuntime(Runtime::kRegExpInitializeAndCompile, context,
- AllocateJSObjectFromMap(initial_map), StringConstant("\""),
- StringConstant("g"));
-
- return CallRuntime(Runtime::kRegExpInternalReplace, context, regexp, string,
- StringConstant("&quot;"));
+ TNode<String> EscapeQuotes(TNode<Context> context, TNode<String> string) {
+ return CAST(CallRuntime(Runtime::kStringEscapeQuotes, context, string));
}
};
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index d656c8769c..d114a0e86b 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
@@ -118,70 +119,6 @@ BUILTIN(StringFromCodePoint) {
return *result;
}
-// ES6 section 21.1.3.6
-// String.prototype.endsWith ( searchString [ , endPosition ] )
-BUILTIN(StringPrototypeEndsWith) {
- HandleScope handle_scope(isolate);
- TO_THIS_STRING(str, "String.prototype.endsWith");
-
- // Check if the search string is a regExp and fail if it is.
- Handle<Object> search = args.atOrUndefined(isolate, 1);
- Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
- if (is_reg_exp.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- if (is_reg_exp.FromJust()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
- isolate->factory()->NewStringFromStaticChars(
- "String.prototype.endsWith")));
- }
- Handle<String> search_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
- Object::ToString(isolate, search));
-
- Handle<Object> position = args.atOrUndefined(isolate, 2);
- int end;
-
- if (position->IsUndefined(isolate)) {
- end = str->length();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToInteger(isolate, position));
- end = str->ToValidIndex(*position);
- }
-
- int start = end - search_string->length();
- if (start < 0) return ReadOnlyRoots(isolate).false_value();
-
- str = String::Flatten(isolate, str);
- search_string = String::Flatten(isolate, search_string);
-
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
- String::FlatContent str_content = str->GetFlatContent(no_gc);
- String::FlatContent search_content = search_string->GetFlatContent(no_gc);
-
- if (str_content.IsOneByte() && search_content.IsOneByte()) {
- Vector<const uint8_t> str_vector = str_content.ToOneByteVector();
- Vector<const uint8_t> search_vector = search_content.ToOneByteVector();
-
- return isolate->heap()->ToBoolean(memcmp(str_vector.start() + start,
- search_vector.start(),
- search_string->length()) == 0);
- }
-
- FlatStringReader str_reader(isolate, str);
- FlatStringReader search_reader(isolate, search_string);
-
- for (int i = 0; i < search_string->length(); i++) {
- if (str_reader.Get(start + i) != search_reader.Get(i)) {
- return ReadOnlyRoots(isolate).false_value();
- }
- }
- return ReadOnlyRoots(isolate).true_value();
-}
-
// ES6 section 21.1.3.9
// String.prototype.lastIndexOf ( searchString [ , position ] )
BUILTIN(StringPrototypeLastIndexOf) {
@@ -290,53 +227,6 @@ BUILTIN(StringPrototypeNormalize) {
}
#endif // !V8_INTL_SUPPORT
-BUILTIN(StringPrototypeStartsWith) {
- HandleScope handle_scope(isolate);
- TO_THIS_STRING(str, "String.prototype.startsWith");
-
- // Check if the search string is a regExp and fail if it is.
- Handle<Object> search = args.atOrUndefined(isolate, 1);
- Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
- if (is_reg_exp.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- if (is_reg_exp.FromJust()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
- isolate->factory()->NewStringFromStaticChars(
- "String.prototype.startsWith")));
- }
- Handle<String> search_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
- Object::ToString(isolate, search));
-
- Handle<Object> position = args.atOrUndefined(isolate, 2);
- int start;
-
- if (position->IsUndefined(isolate)) {
- start = 0;
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToInteger(isolate, position));
- start = str->ToValidIndex(*position);
- }
-
- if (start + search_string->length() > str->length()) {
- return ReadOnlyRoots(isolate).false_value();
- }
-
- FlatStringReader str_reader(isolate, String::Flatten(isolate, str));
- FlatStringReader search_reader(isolate,
- String::Flatten(isolate, search_string));
-
- for (int i = 0; i < search_string->length(); i++) {
- if (str_reader.Get(start + i) != search_reader.Get(i)) {
- return ReadOnlyRoots(isolate).false_value();
- }
- }
- return ReadOnlyRoots(isolate).true_value();
-}
#ifndef V8_INTL_SUPPORT
namespace {
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 425fbab5d1..0a9a1f26c7 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -10,8 +10,8 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 #sec-symbol-objects
-// ES ##sec-symbol.prototype.description
+// ES #sec-symbol-objects
+// ES #sec-symbol.prototype.description
TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 4e4a9d8db9..66fa69afff 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For public_symbol_table().
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index dc7e709cae..0cda07a27a 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/json-stringifier.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index add9a3af2e..75ff1de59f 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -10,7 +10,7 @@
#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
#include "src/heap/factory-inl.h"
-#include "torque-generated/builtins-typed-array-from-dsl-gen.h"
+#include "torque-generated/builtins-typed-array-createtypedarray-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -45,19 +45,6 @@ TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
return var_typed_map.value();
}
-// The byte_offset can be higher than Smi range, in which case to perform the
-// pointer arithmetic necessary to calculate external_pointer, converting
-// byte_offset to an intptr is more difficult. The max byte_offset is 8 * MaxSmi
-// on the particular platform. 32 bit platforms are self-limiting, because we
-// can't allocate an array bigger than our 32-bit arithmetic range anyway. 64
-// bit platforms could theoretically have an offset up to 2^35 - 1, so we may
-// need to convert the float heap number to an intptr.
-TNode<UintPtrT> TypedArrayBuiltinsAssembler::CalculateExternalPointer(
- TNode<UintPtrT> backing_store, TNode<Number> byte_offset) {
- return Unsigned(
- IntPtrAdd(backing_store, ChangeNonnegativeNumberToUintPtr(byte_offset)));
-}
-
// Setup the TypedArray which is under construction.
// - Set the length.
// - Set the byte_offset.
@@ -67,6 +54,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
TNode<Smi> length,
TNode<UintPtrT> byte_offset,
TNode<UintPtrT> byte_length) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(length));
StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset,
byte_offset,
@@ -80,483 +68,92 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
}
}
-// Attach an off-heap buffer to a TypedArray.
-void TypedArrayBuiltinsAssembler::AttachBuffer(TNode<JSTypedArray> holder,
- TNode<JSArrayBuffer> buffer,
- TNode<Map> map,
- TNode<Smi> length,
- TNode<Number> byte_offset) {
- StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
-
- Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize);
- StoreMapNoWriteBarrier(elements, map);
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0));
-
- TNode<UintPtrT> backing_store =
- LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kBackingStoreOffset);
-
- TNode<UintPtrT> external_pointer =
- CalculateExternalPointer(backing_store, byte_offset);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer,
- MachineType::PointerRepresentation());
-
- StoreObjectField(holder, JSObject::kElementsOffset, elements);
-}
-
-TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
- TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
- TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
- TNode<JSArrayBuffer> buffer = CAST(Parameter(Descriptor::kBuffer));
- TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
- TNode<Number> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
-
- TNode<Map> fixed_typed_map = LoadMapForType(holder);
-
- // SmiMul returns a heap number in case of Smi overflow.
- TNode<Number> byte_length = SmiMul(length, element_size);
-
- SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset),
- ChangeNonnegativeNumberToUintPtr(byte_length));
- AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset);
- Return(UndefinedConstant());
-}
-
-TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
- TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
- TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
- Node* initialize = Parameter(Descriptor::kInitialize);
- TNode<JSReceiver> buffer_constructor =
- CAST(Parameter(Descriptor::kBufferConstructor));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, TaggedIsPositiveSmi(length));
- CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- CSA_ASSERT(this, IsBoolean(initialize));
-
- TNode<Smi> byte_offset = SmiConstant(0);
-
- static const int32_t fta_base_data_offset =
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
-
- Label setup_holder(this), allocate_on_heap(this), aligned(this),
- allocate_elements(this), allocate_off_heap(this),
- allocate_off_heap_custom_constructor(this),
- allocate_off_heap_no_init(this), attach_buffer(this), done(this);
- TVARIABLE(IntPtrT, var_total_size);
-
- // SmiMul returns a heap number in case of Smi overflow.
- TNode<Number> byte_length = SmiMul(length, element_size);
-
- TNode<Map> fixed_typed_map = LoadMapForType(holder);
-
- // If target and new_target for the buffer differ, allocate off-heap.
- TNode<JSFunction> default_constructor = CAST(LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- GotoIfNot(WordEqual(buffer_constructor, default_constructor),
- &allocate_off_heap_custom_constructor);
-
- // For buffers with byte_length over the threshold, allocate off-heap.
- GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
- TNode<Smi> smi_byte_length = CAST(byte_length);
- GotoIf(SmiGreaterThan(smi_byte_length,
- SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
- &allocate_off_heap);
- TNode<IntPtrT> word_byte_length = SmiToIntPtr(smi_byte_length);
- Goto(&allocate_on_heap);
-
- BIND(&allocate_on_heap);
- {
- CSA_ASSERT(this, TaggedIsPositiveSmi(byte_length));
- // Allocate a new ArrayBuffer and initialize it with empty properties and
- // elements.
- Node* native_context = LoadNativeContext(context);
- Node* map =
- LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX);
- Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
-
- Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
- StoreMapNoWriteBarrier(buffer, map);
- StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOrHashOffset,
- empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(buffer, JSArray::kElementsOffset,
- empty_fixed_array);
- // Setup the ArrayBuffer.
- // - Set BitField to 0.
- // - Set IsExternal and IsDetachable bits of BitFieldSlot.
- // - Set the byte_length field to byte_length.
- // - Set backing_store to null/Smi(0).
- // - Set all embedder fields to Smi(0).
- if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) {
- DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset));
- StoreObjectFieldNoWriteBarrier(
- buffer, JSArrayBuffer::kOptionalPaddingOffset, Int32Constant(0),
- MachineRepresentation::kWord32);
- }
- int32_t bitfield_value = (1 << JSArrayBuffer::IsExternalBit::kShift) |
- (1 << JSArrayBuffer::IsDetachableBit::kShift);
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
- Int32Constant(bitfield_value),
- MachineRepresentation::kWord32);
-
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
- SmiToIntPtr(CAST(byte_length)),
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
- SmiConstant(0));
- for (int offset = JSArrayBuffer::kHeaderSize;
- offset < JSArrayBuffer::kSizeWithEmbedderFields;
- offset += kTaggedSize) {
- StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
- }
-
- StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
-
- // Check the alignment.
- // TODO(ishell): remove <Object, Object>
- GotoIf(WordEqual<Object, Object>(
- SmiMod(element_size, SmiConstant(kObjectAlignment)),
- SmiConstant(0)),
- &aligned);
-
- // Fix alignment if needed.
- DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
- TNode<IntPtrT> aligned_header_size =
- IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- TNode<IntPtrT> size = IntPtrAdd(word_byte_length, aligned_header_size);
- var_total_size = WordAnd(size, IntPtrConstant(~kObjectAlignmentMask));
- Goto(&allocate_elements);
- }
-
- BIND(&aligned);
- {
- TNode<IntPtrT> header_size =
- IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size = IntPtrAdd(word_byte_length, header_size);
- Goto(&allocate_elements);
- }
-
- BIND(&allocate_elements);
- {
- // Allocate a FixedTypedArray and set the length, base pointer and external
- // pointer.
- CSA_ASSERT(this, IsRegularHeapObjectSize(var_total_size.value()));
-
- Node* elements;
-
- if (UnalignedLoadSupported(MachineRepresentation::kFloat64) &&
- UnalignedStoreSupported(MachineRepresentation::kFloat64)) {
- elements = AllocateInNewSpace(var_total_size.value());
- } else {
- elements = AllocateInNewSpace(var_total_size.value(), kDoubleAlignment);
- }
-
- StoreMapNoWriteBarrier(elements, fixed_typed_map);
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kBasePointerOffset, elements);
- StoreObjectFieldNoWriteBarrier(elements,
- FixedTypedArrayBase::kExternalPointerOffset,
- IntPtrConstant(fta_base_data_offset),
- MachineType::PointerRepresentation());
-
- StoreObjectField(holder, JSObject::kElementsOffset, elements);
-
- GotoIf(IsFalse(initialize), &done);
- // Initialize the backing store by filling it with 0s.
- Node* backing_store = IntPtrAdd(BitcastTaggedToWord(elements),
- IntPtrConstant(fta_base_data_offset));
- // Call out to memset to perform initialization.
- Node* memset = ExternalConstant(ExternalReference::libc_memset_function());
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::IntPtr(), MachineType::UintPtr(), memset,
- backing_store, IntPtrConstant(0), word_byte_length);
- Goto(&done);
- }
-
- TVARIABLE(JSArrayBuffer, var_buffer);
-
- BIND(&allocate_off_heap);
- {
- GotoIf(IsFalse(initialize), &allocate_off_heap_no_init);
- var_buffer = CAST(Construct(context, default_constructor, byte_length));
- Goto(&attach_buffer);
- }
-
- BIND(&allocate_off_heap_custom_constructor);
- {
- var_buffer =
- CAST(CallStub(CodeFactory::Construct(isolate()), context,
- default_constructor, buffer_constructor, Int32Constant(1),
- UndefinedConstant(), byte_length));
- Goto(&attach_buffer);
- }
-
- BIND(&allocate_off_heap_no_init);
- {
- Node* buffer_constructor_noinit = LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_NOINIT_FUN_INDEX);
- var_buffer = CAST(CallJS(CodeFactory::Call(isolate()), context,
- buffer_constructor_noinit, UndefinedConstant(),
- byte_length));
- Goto(&attach_buffer);
- }
-
- BIND(&attach_buffer);
- {
- AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
- byte_offset);
- Goto(&done);
- }
-
- BIND(&done);
- SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset),
- ChangeNonnegativeNumberToUintPtr(byte_length));
- Return(UndefinedConstant());
-}
-
-// ES6 #sec-typedarray-buffer-byteoffset-length
-void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
+// Allocate a new ArrayBuffer and initialize it with empty properties and
+// elements.
+TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<JSArrayBuffer> buffer, TNode<Object> byte_offset,
- TNode<Object> length, TNode<Smi> element_size) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
-
- VARIABLE(new_byte_length, MachineRepresentation::kTagged, SmiConstant(0));
- VARIABLE(offset, MachineRepresentation::kTagged, SmiConstant(0));
-
- Label start_offset_error(this, Label::kDeferred),
- byte_length_error(this, Label::kDeferred),
- invalid_offset_error(this, Label::kDeferred);
- Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
- check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this), done(this);
-
- GotoIf(IsUndefined(byte_offset), &check_length);
-
- offset.Bind(ToInteger_Inline(context, byte_offset,
- CodeStubAssembler::kTruncateMinusZero));
- Branch(TaggedIsSmi(offset.value()), &offset_is_smi, &offset_not_smi);
-
- // Check that the offset is a multiple of the element size.
- BIND(&offset_is_smi);
- {
- TNode<Smi> smi_offset = CAST(offset.value());
- GotoIf(SmiEqual(smi_offset, SmiConstant(0)), &check_length);
- GotoIf(SmiLessThan(smi_offset, SmiConstant(0)), &invalid_length);
- TNode<Number> remainder = SmiMod(smi_offset, element_size);
- // TODO(ishell): remove <Object, Object>
- Branch(WordEqual<Object, Object>(remainder, SmiConstant(0)), &check_length,
- &start_offset_error);
- }
- BIND(&offset_not_smi);
- {
- GotoIf(IsTrue(CallBuiltin(Builtins::kLessThan, context, offset.value(),
- SmiConstant(0))),
- &invalid_length);
- Node* remainder =
- CallBuiltin(Builtins::kModulus, context, offset.value(), element_size);
- // Remainder can be a heap number.
- Branch(IsTrue(CallBuiltin(Builtins::kEqual, context, remainder,
- SmiConstant(0))),
- &check_length, &start_offset_error);
- }
-
- BIND(&check_length);
- Branch(IsUndefined(length), &length_undefined, &length_defined);
-
- BIND(&length_undefined);
- {
- ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
- TNode<Number> buffer_byte_length = ChangeUintPtrToTagged(
- LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kByteLengthOffset));
-
- Node* remainder = CallBuiltin(Builtins::kModulus, context,
- buffer_byte_length, element_size);
- // Remainder can be a heap number.
- GotoIf(IsFalse(CallBuiltin(Builtins::kEqual, context, remainder,
- SmiConstant(0))),
- &byte_length_error);
-
- new_byte_length.Bind(CallBuiltin(Builtins::kSubtract, context,
- buffer_byte_length, offset.value()));
-
- Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context,
- new_byte_length.value(), SmiConstant(0))),
- &invalid_offset_error, &call_init);
- }
-
- BIND(&length_defined);
- {
- TNode<Smi> new_length = ToSmiIndex(context, length, &invalid_length);
- ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
- new_byte_length.Bind(SmiMul(new_length, element_size));
- // Reading the byte length must come after the ToIndex operation, which
- // could cause the buffer to become detached.
- TNode<Number> buffer_byte_length = ChangeUintPtrToTagged(
- LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kByteLengthOffset));
-
- Node* end = CallBuiltin(Builtins::kAdd, context, offset.value(),
- new_byte_length.value());
-
- Branch(IsTrue(CallBuiltin(Builtins::kGreaterThan, context, end,
- buffer_byte_length)),
- &invalid_length, &call_init);
- }
-
- BIND(&call_init);
- {
- TNode<Object> raw_length = CallBuiltin(
- Builtins::kDivide, context, new_byte_length.value(), element_size);
- // Force the result into a Smi, or throw a range error if it doesn't fit.
- TNode<Smi> new_length = ToSmiIndex(context, raw_length, &invalid_length);
-
- CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
- new_length, buffer, element_size, offset.value());
- Goto(&done);
- }
-
- BIND(&invalid_offset_error);
- { ThrowRangeError(context, MessageTemplate::kInvalidOffset, byte_offset); }
-
- BIND(&start_offset_error);
- {
- Node* holder_map = LoadMap(holder);
- Node* problem_string = StringConstant("start offset");
- CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
- problem_string);
-
- Unreachable();
- }
-
- BIND(&byte_length_error);
- {
- Node* holder_map = LoadMap(holder);
- Node* problem_string = StringConstant("byte length");
- CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
- problem_string);
-
- Unreachable();
+ TNode<UintPtrT> byte_length) {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Map> map =
+ CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX));
+ TNode<FixedArray> empty_fixed_array =
+ CAST(LoadRoot(RootIndex::kEmptyFixedArray));
+
+ TNode<JSArrayBuffer> buffer = UncheckedCast<JSArrayBuffer>(
+ Allocate(JSArrayBuffer::kSizeWithEmbedderFields));
+ StoreMapNoWriteBarrier(buffer, map);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOrHashOffset,
+ empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArray::kElementsOffset,
+ empty_fixed_array);
+ // Setup the ArrayBuffer.
+ // - Set BitField to 0.
+ // - Set IsExternal and IsDetachable bits of BitFieldSlot.
+ // - Set the byte_length field to byte_length.
+ // - Set backing_store to null/Smi(0).
+ // - Set all embedder fields to Smi(0).
+ if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset));
+ StoreObjectFieldNoWriteBarrier(
+ buffer, JSArrayBuffer::kOptionalPaddingOffset, Int32Constant(0),
+ MachineRepresentation::kWord32);
}
+ int32_t bitfield_value = (1 << JSArrayBuffer::IsExternalBit::kShift) |
+ (1 << JSArrayBuffer::IsDetachableBit::kShift);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
+ Int32Constant(bitfield_value),
+ MachineRepresentation::kWord32);
- BIND(&invalid_length);
- {
- ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength, length);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
+ byte_length,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
+ IntPtrConstant(0),
+ MachineType::PointerRepresentation());
+ for (int offset = JSArrayBuffer::kHeaderSize;
+ offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
- BIND(&done);
+ StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
+ return buffer;
}
-void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
- TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<JSTypedArray> typed_array, TNode<Smi> element_size) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
-
- TNode<JSFunction> const default_constructor = CAST(LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
-
- Label construct(this), if_detached(this), if_notdetached(this),
- check_for_sab(this), if_buffernotshared(this), check_prototype(this),
- done(this);
- TVARIABLE(JSReceiver, buffer_constructor, default_constructor);
-
- TNode<JSArrayBuffer> source_buffer = LoadObjectField<JSArrayBuffer>(
- typed_array, JSArrayBufferView::kBufferOffset);
- Branch(IsDetachedBuffer(source_buffer), &if_detached, &if_notdetached);
-
- // TODO(petermarshall): Throw on detached typedArray.
- TVARIABLE(Smi, source_length);
- BIND(&if_detached);
- source_length = SmiConstant(0);
- Goto(&check_for_sab);
-
- BIND(&if_notdetached);
- source_length = LoadJSTypedArrayLength(typed_array);
- Goto(&check_for_sab);
+TNode<FixedTypedArrayBase> TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
+ TNode<Map> map, TNode<IntPtrT> total_size, TNode<Number> length) {
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(total_size, IntPtrConstant(0)));
- // The spec requires that constructing a typed array using a SAB-backed typed
- // array use the ArrayBuffer constructor, not the species constructor. See
- // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
- BIND(&check_for_sab);
- TNode<Uint32T> bitfield =
- LoadObjectField<Uint32T>(source_buffer, JSArrayBuffer::kBitFieldOffset);
- Branch(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &construct,
- &if_buffernotshared);
+ // Allocate a FixedTypedArray and set the length, base pointer and external
+ // pointer.
+ CSA_ASSERT(this, IsRegularHeapObjectSize(total_size));
- BIND(&if_buffernotshared);
- {
- buffer_constructor =
- SpeciesConstructor(context, source_buffer, default_constructor);
- // TODO(petermarshall): Throw on detached typedArray.
- GotoIfNot(IsDetachedBuffer(source_buffer), &construct);
- source_length = SmiConstant(0);
- Goto(&construct);
- }
+ TNode<Object> elements;
- BIND(&construct);
- {
- TypedArrayBuiltinsFromDSLAssembler(this->state())
- .ConstructByArrayLike(context, holder, typed_array,
- source_length.value(), element_size,
- buffer_constructor.value());
- Goto(&done);
+ if (UnalignedLoadSupported(MachineRepresentation::kFloat64) &&
+ UnalignedStoreSupported(MachineRepresentation::kFloat64)) {
+ elements = AllocateInNewSpace(total_size);
+ } else {
+ elements = AllocateInNewSpace(total_size, kDoubleAlignment);
}
- BIND(&done);
+ StoreMapNoWriteBarrier(elements, map);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
+ StoreObjectFieldNoWriteBarrier(
+ elements, FixedTypedArrayBase::kBasePointerOffset, elements);
+ StoreObjectFieldNoWriteBarrier(
+ elements, FixedTypedArrayBase::kExternalPointerOffset,
+ IntPtrConstant(FixedTypedArrayBase::ExternalPointerValueForOnHeapArray()),
+ MachineType::PointerRepresentation());
+ return CAST(elements);
}
-Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
- CSA_ASSERT(this, IsJSTypedArray(typed_array));
- Node* elements = LoadElements(typed_array);
+TNode<RawPtrT> TypedArrayBuiltinsAssembler::LoadDataPtr(
+ TNode<JSTypedArray> typed_array) {
+ TNode<FixedArrayBase> elements = LoadElements(typed_array);
CSA_ASSERT(this, IsFixedTypedArray(elements));
return LoadFixedTypedArrayBackingStore(CAST(elements));
}
-TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
- TNode<Number> byte_length) {
- Label smi(this), done(this);
- TVARIABLE(BoolT, is_valid);
- GotoIf(TaggedIsSmi(byte_length), &smi);
-
- TNode<Float64T> float_value = LoadHeapNumberValue(CAST(byte_length));
- TNode<Float64T> max_byte_length_double =
- Float64Constant(FixedTypedArrayBase::kMaxByteLength);
- is_valid = Float64LessThanOrEqual(float_value, max_byte_length_double);
- Goto(&done);
-
- BIND(&smi);
- TNode<IntPtrT> max_byte_length =
- IntPtrConstant(FixedTypedArrayBase::kMaxByteLength);
- is_valid =
- UintPtrLessThanOrEqual(SmiUntag(CAST(byte_length)), max_byte_length);
- Goto(&done);
-
- BIND(&done);
- return is_valid.value();
-}
-
-void TypedArrayBuiltinsAssembler::ConstructByIterable(
- TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<JSReceiver> iterable, TNode<JSReceiver> iterator_fn,
- TNode<Smi> element_size) {
- Label fast_path(this), slow_path(this), done(this);
- CSA_ASSERT(this, IsCallable(iterator_fn));
-
- TNode<JSArray> array_like =
- CAST(CallBuiltin(Builtins::kIterableToListMayPreserveHoles, context,
- iterable, iterator_fn));
- TNode<Object> initial_length = LoadJSArrayLength(array_like);
-
- TNode<JSFunction> default_constructor = CAST(LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- TypedArrayBuiltinsFromDSLAssembler(this->state())
- .ConstructByArrayLike(context, holder, array_like, initial_length,
- element_size, default_constructor);
-}
-
TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowTypeError(context, MessageTemplate::kConstructAbstractClass,
@@ -564,108 +161,6 @@ TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
}
// ES #sec-typedarray-constructors
-TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> arg1 = CAST(Parameter(Descriptor::kArg1));
- TNode<Object> arg2 = CAST(Parameter(Descriptor::kArg2));
- TNode<Object> arg3 = CAST(Parameter(Descriptor::kArg3));
-
- CSA_ASSERT(this, IsConstructor(target));
- CSA_ASSERT(this, IsJSReceiver(new_target));
-
- Label if_arg1isbuffer(this), if_arg1istypedarray(this),
- if_arg1isreceiver(this), if_arg1isnumber(this), return_result(this);
-
- ConstructorBuiltinsAssembler constructor_assembler(this->state());
- TNode<JSTypedArray> result = CAST(
- constructor_assembler.EmitFastNewObject(context, target, new_target));
- // We need to set the byte_offset / byte_length to some sane values
- // to keep the heap verifier happy.
- // TODO(bmeurer): Fix this initialization to not use EmitFastNewObject,
- // which causes the problem, since it puts Undefined into all slots of
- // the object even though that doesn't make any sense for these fields.
- StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteOffsetOffset,
- UintPtrConstant(0),
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteLengthOffset,
- UintPtrConstant(0),
- MachineType::PointerRepresentation());
-
- TNode<Smi> element_size =
- SmiTag(GetTypedArrayElementSize(LoadElementsKind(result)));
-
- GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
- TNode<HeapObject> arg1_heap_object = UncheckedCast<HeapObject>(arg1);
- GotoIf(IsJSArrayBuffer(arg1_heap_object), &if_arg1isbuffer);
- GotoIf(IsJSTypedArray(arg1_heap_object), &if_arg1istypedarray);
- GotoIf(IsJSReceiver(arg1_heap_object), &if_arg1isreceiver);
- Goto(&if_arg1isnumber);
-
- // https://tc39.github.io/ecma262/#sec-typedarray-buffer-byteoffset-length
- BIND(&if_arg1isbuffer);
- {
- ConstructByArrayBuffer(context, result, CAST(arg1), arg2, arg3,
- element_size);
- Goto(&return_result);
- }
-
- // https://tc39.github.io/ecma262/#sec-typedarray-typedarray
- BIND(&if_arg1istypedarray);
- {
- TNode<JSTypedArray> typed_array = CAST(arg1_heap_object);
- ConstructByTypedArray(context, result, typed_array, element_size);
- Goto(&return_result);
- }
-
- // https://tc39.github.io/ecma262/#sec-typedarray-object
- BIND(&if_arg1isreceiver);
- {
- Label if_iteratorundefined(this), if_iteratornotcallable(this);
- // Get iterator symbol
- TNode<Object> iteratorFn = CAST(GetMethod(
- context, arg1_heap_object, isolate()->factory()->iterator_symbol(),
- &if_iteratorundefined));
- GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
- GotoIfNot(IsCallable(CAST(iteratorFn)), &if_iteratornotcallable);
-
- ConstructByIterable(context, result, CAST(arg1_heap_object),
- CAST(iteratorFn), element_size);
- Goto(&return_result);
-
- BIND(&if_iteratorundefined);
- {
- TNode<HeapObject> array_like = arg1_heap_object;
- TNode<Object> initial_length =
- GetProperty(context, arg1, LengthStringConstant());
-
- TNode<JSFunction> default_constructor = CAST(LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- TypedArrayBuiltinsFromDSLAssembler(this->state())
- .ConstructByArrayLike(context, result, array_like, initial_length,
- element_size, default_constructor);
- Goto(&return_result);
- }
-
- BIND(&if_iteratornotcallable);
- { ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
- }
-
- // The first argument was a number or fell through and is treated as
- // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
- BIND(&if_arg1isnumber);
- {
- TypedArrayBuiltinsFromDSLAssembler(this->state())
- .ConstructByLength(context, result, arg1, element_size);
- Goto(&return_result);
- }
-
- BIND(&return_result);
- Return(result);
-}
-
-// ES #sec-typedarray-constructors
TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
@@ -774,6 +269,28 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
return element_size.value();
}
+TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo
+TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
+ TNode<JSTypedArray> typed_array) {
+ TNode<Int32T> elements_kind = LoadElementsKind(typed_array);
+ TVARIABLE(UintPtrT, var_size_log2);
+ TVARIABLE(Map, var_map);
+ ReadOnlyRoots roots(isolate());
+
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ DCHECK_GT(size, 0);
+ var_size_log2 = UintPtrConstant(ElementsKindToShiftSize(kind));
+
+ Handle<Map> map(roots.MapForFixedTypedArray(kind), isolate());
+ var_map = HeapConstant(map);
+ });
+
+ return TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo{
+ var_size_log2.value(), var_map.value(), elements_kind};
+}
+
TNode<JSFunction> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
TNode<Context> context, TNode<JSTypedArray> exemplar) {
TVARIABLE(IntPtrT, context_slot);
@@ -789,72 +306,6 @@ TNode<JSFunction> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
LoadContextElement(LoadNativeContext(context), context_slot.value()));
}
-template <class... TArgs>
-TNode<JSTypedArray> TypedArrayBuiltinsAssembler::TypedArraySpeciesCreate(
- const char* method_name, TNode<Context> context,
- TNode<JSTypedArray> exemplar, TArgs... args) {
- TVARIABLE(JSTypedArray, var_new_typed_array);
- Label slow(this, Label::kDeferred), done(this);
-
- // Let defaultConstructor be the intrinsic object listed in column one of
- // Table 52 for exemplar.[[TypedArrayName]].
- TNode<JSFunction> default_constructor =
- GetDefaultConstructor(context, exemplar);
-
- TNode<Map> map = LoadMap(exemplar);
- GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
- GotoIf(IsTypedArraySpeciesProtectorCellInvalid(), &slow);
- {
- const size_t argc = sizeof...(args);
- static_assert(argc >= 1 && argc <= 3,
- "TypedArraySpeciesCreate called with unexpected arguments");
- TNode<Object> arg_list[argc] = {args...};
- TNode<Object> arg0 = argc < 1 ? UndefinedConstant() : arg_list[0];
- TNode<Object> arg1 = argc < 2 ? UndefinedConstant() : arg_list[1];
- TNode<Object> arg2 = argc < 3 ? UndefinedConstant() : arg_list[2];
- var_new_typed_array = UncheckedCast<JSTypedArray>(
- CallBuiltin(Builtins::kCreateTypedArray, context, default_constructor,
- default_constructor, arg0, arg1, arg2));
-#ifdef DEBUG
- // It is assumed that the CreateTypedArray builtin does not produce a
- // typed array that fails ValidateTypedArray.
- TNode<JSArrayBuffer> buffer =
- LoadJSArrayBufferViewBuffer(var_new_typed_array.value());
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(buffer)));
-#endif // DEBUG
- Goto(&done);
- }
- BIND(&slow);
- {
- // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- TNode<JSReceiver> constructor =
- SpeciesConstructor(context, exemplar, default_constructor);
-
- // Let newTypedArray be ? Construct(constructor, argumentList).
- TNode<JSReceiver> new_object = Construct(context, constructor, args...);
-
- // Perform ? ValidateTypedArray(newTypedArray).
- var_new_typed_array = ValidateTypedArray(context, new_object, method_name);
- Goto(&done);
- }
-
- BIND(&done);
- return var_new_typed_array.value();
-}
-
-TNode<JSTypedArray>
-TypedArrayBuiltinsAssembler::TypedArraySpeciesCreateByLength(
- TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
- const char* method_name) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(len));
-
- TNode<JSTypedArray> new_typed_array =
- TypedArraySpeciesCreate(method_name, context, exemplar, len);
-
- ThrowIfLengthLessThan(context, new_typed_array, len);
- return new_typed_array;
-}
-
TNode<JSTypedArray> TypedArrayBuiltinsAssembler::TypedArrayCreateByLength(
TNode<Context> context, TNode<Object> constructor, TNode<Smi> len,
const char* method_name) {
@@ -941,8 +392,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Grab pointers and byte lengths we need later on.
- TNode<IntPtrT> target_data_ptr = UncheckedCast<IntPtrT>(LoadDataPtr(target));
- TNode<IntPtrT> source_data_ptr = UncheckedCast<IntPtrT>(LoadDataPtr(source));
+ TNode<RawPtrT> target_data_ptr = LoadDataPtr(target);
+ TNode<RawPtrT> source_data_ptr = LoadDataPtr(source);
TNode<Word32T> source_el_kind = LoadElementsKind(source);
TNode<Word32T> target_el_kind = LoadElementsKind(target);
@@ -967,9 +418,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
BIND(&call_memmove);
{
- TNode<IntPtrT> target_start =
- IntPtrAdd(target_data_ptr, IntPtrMul(offset, target_el_size));
- CallCMemmove(target_start, source_data_ptr, source_byte_length);
+ TNode<RawPtrT> target_start =
+ RawPtrAdd(target_data_ptr, IntPtrMul(offset, target_el_size));
+ CallCMemmove(target_start, source_data_ptr, Unsigned(source_byte_length));
Goto(&out);
}
@@ -1039,9 +490,9 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
BIND(&out);
}
-void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<IntPtrT> dest_ptr,
- TNode<IntPtrT> src_ptr,
- TNode<IntPtrT> byte_length) {
+void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<RawPtrT> dest_ptr,
+ TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length) {
TNode<ExternalReference> memmove =
ExternalConstant(ExternalReference::libc_memmove_function());
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
@@ -1059,6 +510,16 @@ void TypedArrayBuiltinsAssembler::CallCMemcpy(TNode<RawPtrT> dest_ptr,
dest_ptr, src_ptr, byte_length);
}
+void TypedArrayBuiltinsAssembler::CallCMemset(TNode<RawPtrT> dest_ptr,
+ TNode<IntPtrT> value,
+ TNode<UintPtrT> length) {
+ TNode<ExternalReference> memset =
+ ExternalConstant(ExternalReference::libc_memset_function());
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memset,
+ dest_ptr, value, length);
+}
+
void TypedArrayBuiltinsAssembler::
CallCCopyFastNumberJSArrayElementsToTypedArray(TNode<Context> context,
TNode<JSArray> source,
@@ -1136,6 +597,13 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
+TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(
+ TNode<JSArrayBuffer> buffer) {
+ TNode<Uint32T> bitfield =
+ LoadObjectField<Uint32T>(buffer, JSArrayBuffer::kBitFieldOffset);
+ return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield);
+}
+
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
const char* method_name = "%TypedArray%.prototype.set";
@@ -1214,189 +682,6 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kNotTypedArray);
}
-// ES %TypedArray%.prototype.slice
-TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
- const char* method_name = "%TypedArray%.prototype.slice";
- Label call_c(this), call_memmove(this), if_count_is_not_zero(this),
- if_bigint_mixed_types(this, Label::kDeferred);
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CodeStubArguments args(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
-
- TNode<Object> receiver = args.GetReceiver();
- TNode<JSTypedArray> source =
- ValidateTypedArray(context, receiver, method_name);
-
- TNode<Smi> source_length = LoadJSTypedArrayLength(source);
-
- // Convert start offset argument to integer, and calculate relative offset.
- TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
- TNode<Smi> start_index =
- SmiTag(ConvertToRelativeIndex(context, start, SmiUntag(source_length)));
-
- // Convert end offset argument to integer, and calculate relative offset.
- // If end offset is not given or undefined is given, set source_length to
- // "end_index".
- TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
- TNode<Smi> end_index =
- Select<Smi>(IsUndefined(end), [=] { return source_length; },
- [=] {
- return SmiTag(ConvertToRelativeIndex(
- context, end, SmiUntag(source_length)));
- });
-
- // Create a result array by invoking TypedArraySpeciesCreate.
- TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
- TNode<JSTypedArray> result_array =
- TypedArraySpeciesCreateByLength(context, source, count, method_name);
-
- // If count is zero, return early.
- GotoIf(SmiGreaterThan(count, SmiConstant(0)), &if_count_is_not_zero);
- args.PopAndReturn(result_array);
-
- BIND(&if_count_is_not_zero);
- // Check the source array is detached or not. We don't need to check if the
- // result array is detached or not since TypedArraySpeciesCreate checked it.
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
- result_array, JSTypedArray::kBufferOffset))));
- TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- ThrowIfArrayBufferIsDetached(context, receiver_buffer, method_name);
-
- // result_array could be a different type from source or share the same
- // buffer with the source because of custom species constructor.
- // If the types of source and result array are the same and they are not
- // sharing the same buffer, use memmove.
- TNode<Word32T> source_el_kind = LoadElementsKind(source);
- TNode<Word32T> target_el_kind = LoadElementsKind(result_array);
- GotoIfNot(Word32Equal(source_el_kind, target_el_kind), &call_c);
-
- TNode<Object> target_buffer =
- LoadObjectField(result_array, JSTypedArray::kBufferOffset);
- Branch(WordEqual(receiver_buffer, target_buffer), &call_c, &call_memmove);
-
- BIND(&call_memmove);
- {
- GotoIfForceSlowPath(&call_c);
-
- TNode<IntPtrT> target_data_ptr =
- UncheckedCast<IntPtrT>(LoadDataPtr(result_array));
- TNode<IntPtrT> source_data_ptr =
- UncheckedCast<IntPtrT>(LoadDataPtr(source));
-
- TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
- TNode<IntPtrT> source_start_bytes =
- IntPtrMul(SmiToIntPtr(start_index), source_el_size);
- TNode<IntPtrT> source_start =
- IntPtrAdd(source_data_ptr, source_start_bytes);
-
- TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
-
-#ifdef DEBUG
- TNode<UintPtrT> target_byte_length =
- LoadJSArrayBufferViewByteLength(result_array);
- CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes),
- target_byte_length));
- TNode<UintPtrT> source_byte_length =
- LoadJSArrayBufferViewByteLength(source);
- TNode<UintPtrT> source_size_in_bytes =
- UintPtrSub(source_byte_length, Unsigned(source_start_bytes));
- CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes),
- source_size_in_bytes));
-#endif // DEBUG
-
- CallCMemmove(target_data_ptr, source_start, count_bytes);
- args.PopAndReturn(result_array);
- }
-
- BIND(&call_c);
- {
- GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
- IsBigInt64ElementsKind(target_el_kind)),
- &if_bigint_mixed_types);
-
- CallCCopyTypedArrayElementsSlice(
- source, result_array, SmiToIntPtr(start_index), SmiToIntPtr(end_index));
- args.PopAndReturn(result_array);
- }
-
- BIND(&if_bigint_mixed_types);
- ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
-}
-
-// ES %TypedArray%.prototype.subarray
-TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
- const char* method_name = "%TypedArray%.prototype.subarray";
- Label offset_done(this);
-
- TVARIABLE(Smi, var_begin);
- TVARIABLE(Smi, var_end);
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CodeStubArguments args(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
-
- // 1. Let O be the this value.
- // 3. If O does not have a [[TypedArrayName]] internal slot, throw a TypeError
- // exception.
- TNode<Object> receiver = args.GetReceiver();
- ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
-
- TNode<JSTypedArray> source = CAST(receiver);
-
- // 5. Let buffer be O.[[ViewedArrayBuffer]].
- TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
- // 6. Let srcLength be O.[[ArrayLength]].
- TNode<Smi> source_length = LoadJSTypedArrayLength(source);
-
- // 7. Let relativeBegin be ? ToInteger(begin).
- // 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
- // 0); else let beginIndex be min(relativeBegin, srcLength).
- TNode<Object> begin = args.GetOptionalArgumentValue(0, SmiConstant(0));
- var_begin =
- SmiTag(ConvertToRelativeIndex(context, begin, SmiUntag(source_length)));
-
- TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
- // 9. If end is undefined, let relativeEnd be srcLength;
- var_end = source_length;
- GotoIf(IsUndefined(end), &offset_done);
-
- // else, let relativeEnd be ? ToInteger(end).
- // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd), 0);
- // else let endIndex be min(relativeEnd, srcLength).
- var_end =
- SmiTag(ConvertToRelativeIndex(context, end, SmiUntag(source_length)));
- Goto(&offset_done);
-
- BIND(&offset_done);
-
- // 11. Let newLength be max(endIndex - beginIndex, 0).
- TNode<Smi> new_length =
- SmiMax(SmiSub(var_end.value(), var_begin.value()), SmiConstant(0));
-
- // 12. Let constructorName be the String value of O.[[TypedArrayName]].
- // 13. Let elementSize be the Number value of the Element Size value specified
- // in Table 52 for constructorName.
- TNode<Word32T> element_kind = LoadElementsKind(source);
- TNode<IntPtrT> element_size = GetTypedArrayElementSize(element_kind);
-
- // 14. Let srcByteOffset be O.[[ByteOffset]].
- TNode<Number> source_byte_offset =
- ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(source));
-
- // 15. Let beginByteOffset be srcByteOffset + beginIndex Ɨ elementSize.
- TNode<Number> offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size));
- TNode<Number> begin_byte_offset = NumberAdd(source_byte_offset, offset);
-
- // 16. Let argumentsList be Ā« buffer, beginByteOffset, newLength Ā».
- // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
- args.PopAndReturn(TypedArraySpeciesCreate(
- method_name, context, source, buffer, begin_byte_offset, new_length));
-}
-
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1796,106 +1081,6 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
"%TypedArray%.from");
}
-// ES %TypedArray%.prototype.filter
-TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
- const char* method_name = "%TypedArray%.prototype.filter";
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CodeStubArguments args(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
-
- Label if_callback_not_callable(this, Label::kDeferred),
- detached(this, Label::kDeferred);
-
- // 1. Let O be the this value.
- // 2. Perform ? ValidateTypedArray(O).
- TNode<Object> receiver = args.GetReceiver();
- TNode<JSTypedArray> source =
- ValidateTypedArray(context, receiver, method_name);
-
- // 3. Let len be O.[[ArrayLength]].
- TNode<Smi> length = LoadJSTypedArrayLength(source);
-
- // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
- TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
- GotoIf(TaggedIsSmi(callbackfn), &if_callback_not_callable);
- GotoIfNot(IsCallable(CAST(callbackfn)), &if_callback_not_callable);
-
- // 5. If thisArg is present, let T be thisArg; else let T be undefined.
- TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
-
- TNode<JSArrayBuffer> source_buffer =
- LoadObjectField<JSArrayBuffer>(source, JSArrayBufferView::kBufferOffset);
- TNode<Word32T> elements_kind = LoadElementsKind(source);
- GrowableFixedArray values(state());
- VariableList vars(
- {values.var_array(), values.var_length(), values.var_capacity()}, zone());
-
- // 6. Let kept be a new empty List.
- // 7. Let k be 0.
- // 8. Let captured be 0.
- // 9. Repeat, while k < len
- BuildFastLoop(
- vars, SmiConstant(0), length,
- [&](Node* index) {
- GotoIf(IsDetachedBuffer(source_buffer), &detached);
-
- TVARIABLE(Numeric, value);
- // a. Let Pk be ! ToString(k).
- // b. Let kValue be ? Get(O, Pk).
- DispatchTypedArrayByElementsKind(
- elements_kind,
- [&](ElementsKind kind, int size, int typed_array_fun_index) {
- TNode<IntPtrT> backing_store =
- UncheckedCast<IntPtrT>(LoadDataPtr(source));
- value = CAST(LoadFixedTypedArrayElementAsTagged(
- backing_store, index, kind, ParameterMode::SMI_PARAMETERS));
- });
-
- // c. Let selected be ToBoolean(Call(callbackfn, T, kValue, k, O))
- Node* selected =
- CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
- value.value(), index, source);
-
- Label true_continue(this), false_continue(this);
- BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
-
- BIND(&true_continue);
- // d. If selected is true, then
- // i. Append kValue to the end of kept.
- // ii. Increase captured by 1.
- values.Push(value.value());
- Goto(&false_continue);
-
- BIND(&false_continue);
- },
- 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
-
- TNode<JSArray> values_array = values.ToJSArray(context);
- TNode<Smi> captured = LoadFastJSArrayLength(values_array);
-
- // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
- TNode<JSTypedArray> result_array =
- TypedArraySpeciesCreateByLength(context, source, captured, method_name);
-
- // 11. Let n be 0.
- // 12. For each element e of kept, do
- // a. Perform ! Set(A, ! ToString(n), e, true).
- // b. Increment n by 1.
- CallRuntime(Runtime::kTypedArrayCopyElements, context, result_array,
- values_array, captured);
-
- // 13. Return A.
- args.PopAndReturn(result_array);
-
- BIND(&if_callback_not_callable);
- ThrowTypeError(context, MessageTemplate::kCalledNonCallable, callbackfn);
-
- BIND(&detached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
-}
-
#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index a82b32c25c..ab0ee6016d 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -6,12 +6,15 @@
#define V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
#include "src/code-stub-assembler.h"
+#include "torque-generated/builtins-typed-array-from-dsl-gen.h"
namespace v8 {
namespace internal {
class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
public:
+ using ElementsInfo =
+ TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo;
explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
@@ -21,40 +24,31 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<JSTypedArray> exemplar,
TArgs... args);
- TNode<JSTypedArray> TypedArraySpeciesCreateByLength(
- TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
- const char* method_name);
-
void GenerateTypedArrayPrototypeIterationMethod(TNode<Context> context,
TNode<Object> receiver,
const char* method_name,
IterationKind iteration_kind);
- void ConstructByArrayBuffer(TNode<Context> context,
- TNode<JSTypedArray> holder,
- TNode<JSArrayBuffer> buffer,
- TNode<Object> byte_offset, TNode<Object> length,
- TNode<Smi> element_size);
- void ConstructByTypedArray(TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<JSTypedArray> typed_array,
- TNode<Smi> element_size);
- void ConstructByIterable(TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<JSReceiver> iterable,
- TNode<JSReceiver> iterator_fn,
- TNode<Smi> element_size);
-
void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
TNode<UintPtrT> byte_offset,
TNode<UintPtrT> byte_length);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
TNode<Map> map, TNode<Smi> length,
- TNode<Number> byte_offset);
+ TNode<UintPtrT> byte_offset);
+
+ TNode<JSArrayBuffer> AllocateEmptyOnHeapBuffer(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<UintPtrT> byte_length);
+
+ TNode<FixedTypedArrayBase> AllocateOnHeapElements(TNode<Map> map,
+ TNode<IntPtrT> byte_length,
+ TNode<Number> length);
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<BoolT> IsMockArrayBufferAllocatorFlag();
TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
- TNode<Number> byte_offset);
- Node* LoadDataPtr(Node* typed_array);
- TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
+ TNode<UintPtrT> byte_offset);
+ TNode<RawPtrT> LoadDataPtr(TNode<JSTypedArray> typed_array);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
@@ -65,10 +59,8 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
// Returns the byte size of an element for a TypedArray elements kind.
TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
- TNode<JSArrayBuffer> LoadTypedArrayBuffer(TNode<JSTypedArray> typed_array) {
- return LoadObjectField<JSArrayBuffer>(typed_array,
- JSTypedArray::kBufferOffset);
- }
+ // Returns information (byte size and map) about a TypedArray's elements.
+ ElementsInfo GetTypedArrayElementsInfo(TNode<JSTypedArray> typed_array);
TNode<JSFunction> GetDefaultConstructor(TNode<Context> context,
TNode<JSTypedArray> exemplar);
@@ -99,12 +91,15 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<JSTypedArray> target, TNode<IntPtrT> offset,
Label* call_runtime, Label* if_source_too_large);
- void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
- TNode<IntPtrT> byte_length);
+ void CallCMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length);
void CallCMemcpy(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
+ void CallCMemset(TNode<RawPtrT> dest_ptr, TNode<IntPtrT> value,
+ TNode<UintPtrT> length);
+
void CallCCopyFastNumberJSArrayElementsToTypedArray(
TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
@@ -123,6 +118,8 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+
+ TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index a79ff81101..382a835b49 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -90,6 +90,13 @@ TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry, context);
}
+TF_BUILTIN(WasmStackOverflow, WasmBuiltinsAssembler) {
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+ TNode<Object> context = LoadContextFromInstance(instance);
+ TailCallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, centry, context);
+}
+
TF_BUILTIN(WasmThrow, WasmBuiltinsAssembler) {
TNode<Object> exception = UncheckedParameter(Descriptor::kException);
TNode<Object> instance = LoadInstanceFromFrame();
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index e89deb705b..1d8a6f39f6 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-BUILTIN(WeakFactoryConstructor) {
+BUILTIN(FinalizationGroupConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
@@ -31,91 +31,92 @@ BUILTIN(WeakFactoryConstructor) {
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSWeakFactory> weak_factory = Handle<JSWeakFactory>::cast(result);
- weak_factory->set_native_context(*isolate->native_context());
- weak_factory->set_cleanup(*cleanup);
- weak_factory->set_flags(
- JSWeakFactory::ScheduledForCleanupField::encode(false));
- return *weak_factory;
+ Handle<JSFinalizationGroup> finalization_group =
+ Handle<JSFinalizationGroup>::cast(result);
+ finalization_group->set_native_context(*isolate->native_context());
+ finalization_group->set_cleanup(*cleanup);
+ finalization_group->set_flags(
+ JSFinalizationGroup::ScheduledForCleanupField::encode(false));
+
+ DCHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ DCHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ DCHECK(finalization_group->key_map()->IsUndefined(isolate));
+ return *finalization_group;
}
-BUILTIN(WeakFactoryMakeCell) {
+BUILTIN(FinalizationGroupRegister) {
HandleScope scope(isolate);
- const char* method_name = "WeakFactory.prototype.makeCell";
+ const char* method_name = "FinalizationGroup.prototype.register";
- CHECK_RECEIVER(JSWeakFactory, weak_factory, method_name);
+ CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
Handle<Object> target = args.atOrUndefined(isolate, 1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
- NewTypeError(MessageTemplate::kWeakRefsMakeCellTargetMustBeObject));
+ NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject));
}
- Handle<JSReceiver> target_receiver = Handle<JSReceiver>::cast(target);
Handle<Object> holdings = args.atOrUndefined(isolate, 2);
if (target->SameValue(*holdings)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(
- MessageTemplate::kWeakRefsMakeCellTargetAndHoldingsMustNotBeSame));
+ MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame));
}
+ Handle<Object> key = args.atOrUndefined(isolate, 3);
+ // TODO(marja, gsathya): Restrictions on "key" (e.g., does it need to be an
+ // object).
+
// TODO(marja): Realms.
- Handle<Map> weak_cell_map(isolate->native_context()->js_weak_cell_map(),
- isolate);
-
- // Allocate the JSWeakCell object in the old space, because 1) JSWeakCell
- // weakness handling is only implemented in the old space 2) they're
- // supposedly long-living. TODO(marja): Support JSWeakCells in Scavenger.
- Handle<JSWeakCell> weak_cell =
- Handle<JSWeakCell>::cast(isolate->factory()->NewJSObjectFromMap(
- weak_cell_map, TENURED, Handle<AllocationSite>::null()));
- weak_cell->set_target(*target_receiver);
- weak_cell->set_holdings(*holdings);
- weak_factory->AddWeakCell(*weak_cell);
- return *weak_cell;
+ JSFinalizationGroup::Register(finalization_group,
+ Handle<JSReceiver>::cast(target), holdings, key,
+ isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+BUILTIN(FinalizationGroupUnregister) {
+ HandleScope scope(isolate);
+ const char* method_name = "FinalizationGroup.prototype.unregister";
+
+ CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+
+ Handle<Object> key = args.atOrUndefined(isolate, 1);
+ JSFinalizationGroup::Unregister(finalization_group, key, isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(WeakFactoryCleanupSome) {
+BUILTIN(FinalizationGroupCleanupSome) {
HandleScope scope(isolate);
- const char* method_name = "WeakFactory.prototype.cleanupSome";
+ const char* method_name = "FinalizationGroup.prototype.cleanupSome";
- CHECK_RECEIVER(JSWeakFactory, weak_factory, method_name);
+ CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+
+ // TODO(marja, gsathya): Add missing "cleanup" callback.
// Don't do set_scheduled_for_cleanup(false); we still have the microtask
// scheduled and don't want to schedule another one in case the user never
// executes microtasks.
- JSWeakFactory::Cleanup(weak_factory, isolate);
+ JSFinalizationGroup::Cleanup(finalization_group, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(WeakFactoryCleanupIteratorNext) {
+BUILTIN(FinalizationGroupCleanupIteratorNext) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSWeakFactoryCleanupIterator, iterator, "next");
+ CHECK_RECEIVER(JSFinalizationGroupCleanupIterator, iterator, "next");
- Handle<JSWeakFactory> weak_factory(iterator->factory(), isolate);
- if (!weak_factory->NeedsCleanup()) {
+ Handle<JSFinalizationGroup> finalization_group(iterator->finalization_group(),
+ isolate);
+ if (!finalization_group->NeedsCleanup()) {
return *isolate->factory()->NewJSIteratorResult(
handle(ReadOnlyRoots(isolate).undefined_value(), isolate), true);
}
- Handle<JSWeakCell> weak_cell_object =
- handle(weak_factory->PopClearedCell(isolate), isolate);
-
- return *isolate->factory()->NewJSIteratorResult(weak_cell_object, false);
-}
-
-BUILTIN(WeakCellHoldingsGetter) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSWeakCell, weak_cell, "get WeakCell.holdings");
- return weak_cell->holdings();
-}
+ Handle<Object> holdings = handle(
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate),
+ isolate);
-BUILTIN(WeakCellClear) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSWeakCell, weak_cell, "WeakCell.prototype.clear");
- weak_cell->Clear(isolate);
- return ReadOnlyRoots(isolate).undefined_value();
+ return *isolate->factory()->NewJSIteratorResult(holdings, false);
}
BUILTIN(WeakRefConstructor) {
@@ -135,8 +136,9 @@ BUILTIN(WeakRefConstructor) {
NewTypeError(
MessageTemplate::kWeakRefsWeakRefConstructorTargetMustBeObject));
}
- isolate->heap()->AddKeepDuringJobTarget(
- Handle<JSReceiver>::cast(target_object));
+ Handle<JSReceiver> target_receiver =
+ handle(JSReceiver::cast(*target_object), isolate);
+ isolate->heap()->AddKeepDuringJobTarget(target_receiver);
// TODO(marja): Realms.
@@ -146,7 +148,7 @@ BUILTIN(WeakRefConstructor) {
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
Handle<JSWeakRef> weak_ref = Handle<JSWeakRef>::cast(result);
- weak_ref->set_target(*target_object);
+ weak_ref->set_target(*target_receiver);
return *weak_ref;
}
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index e4bc3b758b..48b2048275 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -37,7 +37,7 @@ namespace collections {
goto MayHaveSideEffects;
}
case (o: Object): deferred {
- ThrowTypeError(context, kIteratorValueNotAnObject, o);
+ ThrowTypeError(kIteratorValueNotAnObject, o);
}
}
}
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 516fed39d3..5aacf2ef75 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -39,7 +39,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
// Must be generating embedded builtin code.
- DCHECK(isolate_->ShouldLoadConstantsFromRootList());
+ DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
// All code objects should be loaded through the root register or use
// pc-relative addressing.
@@ -69,7 +69,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
- DCHECK(isolate_->ShouldLoadConstantsFromRootList());
+ DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference)->kind() ==
@@ -88,7 +88,7 @@ void BuiltinsConstantsTableBuilder::Finalize() {
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
- DCHECK(isolate_->ShouldLoadConstantsFromRootList());
+ DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
// An empty map means there's nothing to do.
if (map_.size() == 0) return;
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index c354313e29..e90da0ad1b 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -5,14 +5,6 @@
#include 'src/builtins/builtins-data-view-gen.h'
namespace data_view {
- extern operator '.buffer'
- macro LoadJSArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer;
- extern operator '.byte_length'
- macro LoadJSArrayBufferViewByteLength(JSArrayBufferView): uintptr;
- extern operator '.byte_offset'
- macro LoadJSArrayBufferViewByteOffset(JSArrayBufferView): uintptr;
- extern operator '.backing_store'
- macro LoadJSArrayBufferBackingStore(JSArrayBuffer): RawPtr;
macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
if constexpr (kind == UINT8_ELEMENTS) {
@@ -76,7 +68,7 @@ namespace data_view {
return Cast<JSDataView>(o) otherwise CastError;
}
label CastError {
- ThrowTypeError(context, kIncompatibleMethodReceiver, method);
+ ThrowTypeError(kIncompatibleMethodReceiver, method);
}
}
@@ -401,15 +393,14 @@ namespace data_view {
getIndex = ToIndex(offset, context) otherwise RangeError;
}
label RangeError {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ ThrowRangeError(kInvalidDataViewAccessorOffset);
}
let littleEndian: bool = ToBoolean(requestedLittleEndian);
let buffer: JSArrayBuffer = dataView.buffer;
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- context, kDetachedOperation, MakeDataViewGetterNameString(kind));
+ ThrowTypeError(kDetachedOperation, MakeDataViewGetterNameString(kind));
}
let getIndexFloat: float64 = Convert<float64>(getIndex);
@@ -420,7 +411,7 @@ namespace data_view {
let elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ ThrowRangeError(kInvalidDataViewAccessorOffset);
}
let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
@@ -679,7 +670,7 @@ namespace data_view {
getIndex = ToIndex(offset, context) otherwise RangeError;
}
label RangeError {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ ThrowRangeError(kInvalidDataViewAccessorOffset);
}
let littleEndian: bool = ToBoolean(requestedLittleEndian);
@@ -691,8 +682,7 @@ namespace data_view {
let bigIntValue: BigInt = ToBigInt(context, value);
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- context, kDetachedOperation, MakeDataViewSetterNameString(kind));
+ ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
let getIndexFloat: float64 = Convert<float64>(getIndex);
@@ -703,7 +693,7 @@ namespace data_view {
let elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ ThrowRangeError(kInvalidDataViewAccessorOffset);
}
let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
@@ -712,8 +702,7 @@ namespace data_view {
let numValue: Number = ToNumber(context, value);
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- context, kDetachedOperation, MakeDataViewSetterNameString(kind));
+ ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
let getIndexFloat: float64 = Convert<float64>(getIndex);
@@ -724,7 +713,7 @@ namespace data_view {
let elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ ThrowRangeError(kInvalidDataViewAccessorOffset);
}
let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 109991af5a..960fa9f68c 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-type FrameType extends Smi
- generates 'TNode<Smi>' constexpr 'StackFrame::Type';
+type FrameType extends Smi constexpr 'StackFrame::Type';
const ARGUMENTS_ADAPTOR_FRAME: constexpr FrameType
generates 'StackFrame::ARGUMENTS_ADAPTOR';
const STUB_FRAME: constexpr FrameType
@@ -19,7 +18,7 @@ FromConstexpr<FrameType, constexpr FrameType>(t: constexpr FrameType):
// up by a single bit.
const i: constexpr uintptr = %RawConstexprCast<constexpr uintptr>(t)
<< kSmiTagSize;
- return %RawObjectCast<FrameType>(BitcastWordToTaggedSigned(i));
+ return %RawDownCast<FrameType>(BitcastWordToTaggedSigned(i));
}
Cast<FrameType>(o: Object): FrameType
labels CastError {
@@ -27,17 +26,13 @@ Cast<FrameType>(o: Object): FrameType
assert(
(Convert<uintptr>(BitcastTaggedToWord(o)) >>> kSmiTagSize) <
kFrameTypeCount);
- return %RawObjectCast<FrameType>(o);
+ return %RawDownCast<FrameType>(o);
}
-type FrameBase extends RawPtr
- generates 'TNode<RawPtrT>' constexpr 'void*';
-type StandardFrame extends FrameBase
- generates 'TNode<RawPtrT>' constexpr 'void*';
-type ArgumentsAdaptorFrame extends FrameBase
- generates 'TNode<RawPtrT>' constexpr 'void*';
-type StubFrame extends FrameBase
- generates 'TNode<RawPtrT>' constexpr 'void*';
+type FrameBase extends RawPtr constexpr 'void*';
+type StandardFrame extends FrameBase constexpr 'void*';
+type ArgumentsAdaptorFrame extends FrameBase constexpr 'void*';
+type StubFrame extends FrameBase constexpr 'void*';
type Frame = ArgumentsAdaptorFrame | StandardFrame | StubFrame;
extern macro LoadFramePointer(): Frame;
@@ -57,18 +52,18 @@ macro LoadSmiFromFrame(f: Frame, o: constexpr int32): Smi {
const kStandardFrameFunctionOffset: constexpr int31
generates 'StandardFrameConstants::kFunctionOffset';
operator '.function' macro LoadFunctionFromFrame(f: Frame): JSFunction {
- // TODO(danno): Use RawObjectCast here in order to avoid passing the implicit
+ // TODO(danno): Use RawDownCast here in order to avoid passing the implicit
// context, since this accessor is used in legacy CSA code through
// LoadTargetFromFrame
const result: Object = LoadObjectFromFrame(f, kStandardFrameFunctionOffset);
- return %RawObjectCast<JSFunction>(result);
+ return %RawDownCast<JSFunction>(result);
}
const kStandardFrameCallerFPOffset: constexpr int31
generates 'StandardFrameConstants::kCallerFPOffset';
operator '.caller' macro LoadCallerFromFrame(f: Frame): Frame {
const result: RawPtr = LoadPointerFromFrame(f, kStandardFrameCallerFPOffset);
- return %RawPointerCast<Frame>(result);
+ return %RawDownCast<Frame>(result);
}
type ContextOrFrameType = Context | FrameType;
@@ -124,7 +119,7 @@ Cast<StandardFrame>(implicit context: Context)(f: Frame):
// stack from generated code.
// See the descriptions and frame layouts in src/frame-constants.h.
if (IsContext(o)) {
- return %RawPointerCast<StandardFrame>(f);
+ return %RawDownCast<StandardFrame>(f);
}
goto CastError;
}
@@ -134,7 +129,7 @@ Cast<ArgumentsAdaptorFrame>(implicit context: Context)(f: Frame):
const t: FrameType =
Cast<FrameType>(f.context_or_frame_type) otherwise CastError;
if (t == ARGUMENTS_ADAPTOR_FRAME) {
- return %RawPointerCast<ArgumentsAdaptorFrame>(f);
+ return %RawDownCast<ArgumentsAdaptorFrame>(f);
}
goto CastError;
}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index 7af40748b5..dd23ac5b5a 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -27,7 +27,7 @@ void GrowableFixedArray::Push(TNode<Object> const value) {
BIND(&store);
{
TNode<FixedArray> const array = var_array_.value();
- StoreFixedArrayElement(array, length, value);
+ UnsafeStoreFixedArrayElement(array, length, value);
var_length_ = IntPtrAdd(length, IntPtrConstant(1));
}
@@ -56,11 +56,8 @@ TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
}
TNode<Smi> const result_length = SmiTag(length());
- TNode<JSArray> const result = AllocateUninitializedJSArrayWithoutElements(
- array_map, result_length, nullptr);
-
- StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
-
+ TNode<JSArray> const result =
+ AllocateJSArray(array_map, var_array_.value(), result_length);
return result;
}
diff --git a/deps/v8/src/builtins/growable-fixed-array.tq b/deps/v8/src/builtins/growable-fixed-array.tq
new file mode 100644
index 0000000000..eb62e56c98
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array.tq
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace growable_fixed_array {
+ // TODO(pwong): Support FixedTypedArrays.
+ struct GrowableFixedArray {
+ Push(obj: Object) {
+ this.EnsureCapacity();
+ this.array.objects[this.length++] = obj;
+ }
+ ResizeFixedArray(newCapacity: intptr): FixedArray {
+ assert(this.length >= 0);
+ assert(newCapacity >= 0);
+ assert(newCapacity >= this.length);
+ const first: intptr = 0;
+ return ExtractFixedArray(
+ this.array, first, this.length, newCapacity, kFixedArrays);
+ }
+ EnsureCapacity() {
+ assert(this.length <= this.capacity);
+ if (this.capacity == this.length) {
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+ this.capacity = this.capacity + (this.capacity >> 1) + 16;
+ this.array = this.ResizeFixedArray(this.capacity);
+ }
+ }
+ ToJSArray(implicit context: Context)(): JSArray {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const map: Map = LoadJSArrayElementsMap(PACKED_ELEMENTS, nativeContext);
+ const fixedArray: FixedArray = this.ResizeFixedArray(this.length);
+ const lengthSmi = Convert<Smi>(this.length);
+ return AllocateJSArray(map, fixedArray, lengthSmi);
+ }
+
+ array: FixedArray;
+ capacity: intptr;
+ length: intptr;
+ }
+
+ macro NewGrowableFixedArray(): GrowableFixedArray {
+ return GrowableFixedArray{kEmptyFixedArray, 0, 0};
+ }
+}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 8e70a4cd0a..fa72ec2278 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -12,6 +12,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/cell.h"
@@ -109,13 +111,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- edx: new target
// -- esi: pointer to last argument
// -- ecx: counter
- // -- sp[0*kPointerSize]: the hole (receiver)
- // -- sp[1*kPointerSize]: number of arguments (tagged)
- // -- sp[2*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[2*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(esi, ecx, times_4, 0));
+ __ push(Operand(esi, ecx, times_system_pointer_size, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@@ -139,7 +141,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
+ 1 * kSystemPointerSize)); // 1 ~ receiver
__ PushReturnAddressFrom(ecx);
__ ret(0);
}
@@ -157,11 +160,11 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
__ sub(scratch, esp);
// Add the size of the arguments.
- static_assert(kPointerSize == 4,
- "The next instruction assumes kPointerSize == 4");
- __ lea(scratch, Operand(scratch, num_args, times_4, 0));
+ static_assert(kSystemPointerSize == 4,
+ "The next instruction assumes kSystemPointerSize == 4");
+ __ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
if (include_receiver) {
- __ add(scratch, Immediate(kPointerSize));
+ __ add(scratch, Immediate(kSystemPointerSize));
}
// See if we overflowed, i.e. scratch is positive.
__ cmp(scratch, Immediate(0));
@@ -195,17 +198,18 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(edx);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- edi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: argument count
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- edi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(eax, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
- __ j(not_zero, &not_create_implicit_receiver);
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
+ __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor,
+ ecx, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -220,11 +224,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: implicit receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -242,12 +246,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore argument count.
@@ -280,16 +284,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- edx: new target
// -- edi: pointer to last argument
// -- ecx: counter (tagged)
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
- __ Push(Operand(edi, ecx, times_pointer_size, 0));
+ __ Push(Operand(edi, ecx, times_system_pointer_size, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@@ -301,11 +305,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -342,7 +346,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0 * kPointerSize));
+ __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
__ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
@@ -353,7 +357,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
- __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
+ 1 * kSystemPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
}
@@ -395,7 +400,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
- __ sub(esp, Immediate(kPointerSize));
+ __ sub(esp, Immediate(kSystemPointerSize));
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
@@ -416,7 +421,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ mov(edi, __ ExternalReferenceAsOperand(context_address, edi));
- static constexpr int kOffsetToContextSlot = -2 * kPointerSize;
+ static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
__ mov(Operand(ebp, kOffsetToContextSlot), edi);
// If this is the outermost JS call, set js_entry_sp value.
@@ -476,7 +481,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(ebx);
__ pop(esi);
__ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
+ __ add(esp, Immediate(2 * kSystemPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(ebp);
@@ -543,7 +548,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry, Label::kNear);
__ bind(&loop);
// Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_4, 0));
+ __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
__ push(Operand(scratch2, 0)); // dereference handle
__ inc(ecx);
__ bind(&entry);
@@ -669,8 +674,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(edi, ecx);
__ j(greater_equal, &done_loop);
- __ Push(
- FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize));
+ __ Push(FieldOperand(ebx, edi, times_system_pointer_size,
+ FixedArray::kHeaderSize));
__ add(edi, Immediate(1));
__ jmp(&loop);
@@ -940,7 +945,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#undef JUMP_IF_EQUAL
// Otherwise, load the size of the current bytecode and advance the offset.
- __ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
+ __ add(bytecode_offset,
+ Operand(bytecode_size_table, bytecode, times_int_size, 0));
}
// Generate code for entering a JS function with the interpreter.
@@ -1052,7 +1058,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(eax);
// Continue loop if not done.
__ bind(&loop_check);
- __ sub(frame_size, Immediate(kPointerSize));
+ __ sub(frame_size, Immediate(kSystemPointerSize));
__ j(greater_equal, &loop_header);
}
@@ -1064,7 +1070,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ test(eax, eax);
__ j(zero, &no_incoming_new_target_or_generator_register);
- __ mov(Operand(ebp, eax, times_pointer_size, 0), edx);
+ __ mov(Operand(ebp, eax, times_system_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and bytecode offset into registers.
@@ -1081,9 +1087,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(
- kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, ecx, times_pointer_size, 0));
+ __ mov(kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ecx,
+ times_system_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1127,7 +1133,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ jmp(&loop_check);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
- __ sub(start_address, Immediate(kPointerSize));
+ __ sub(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
__ cmp(start_address, array_limit);
__ j(greater, &loop_header, Label::kNear);
@@ -1169,7 +1175,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Find the address of the last argument.
- __ shl(scratch, kPointerSizeLog2);
+ __ shl(scratch, kSystemPointerSizeLog2);
__ neg(scratch);
__ add(scratch, argv);
Generate_InterpreterPushArgs(masm, scratch, argv);
@@ -1226,7 +1232,8 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
- __ lea(scratch1, Operand(num_args, times_4, kPointerSize));
+ __ lea(scratch1,
+ Operand(num_args, times_system_pointer_size, kSystemPointerSize));
__ AllocateStackFrame(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
@@ -1234,16 +1241,16 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// basically when the source and destination overlap. We at least need one
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
- __ mov(scratch1,
- Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
- __ mov(Operand(esp, i * kPointerSize), scratch1);
+ __ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
+ (i + 1) * kSystemPointerSize));
+ __ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
// Step 3 copy arguments to correct locations.
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
- __ mov(Operand(esp, num_args, times_pointer_size,
- (num_slots_to_move + 1) * kPointerSize),
+ __ mov(Operand(esp, num_args, times_system_pointer_size,
+ (num_slots_to_move + 1) * kSystemPointerSize),
Immediate(0));
__ mov(scratch1, num_args);
@@ -1251,10 +1258,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ jmp(&loop_check);
__ bind(&loop_header);
__ mov(scratch2, Operand(start_addr, 0));
- __ mov(Operand(esp, scratch1, times_pointer_size,
- num_slots_to_move * kPointerSize),
+ __ mov(Operand(esp, scratch1, times_system_pointer_size,
+ num_slots_to_move * kSystemPointerSize),
scratch2);
- __ sub(start_addr, Immediate(kPointerSize));
+ __ sub(start_addr, Immediate(kSystemPointerSize));
__ sub(scratch1, Immediate(1));
__ bind(&loop_check);
__ cmp(scratch1, Immediate(0));
@@ -1404,8 +1411,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, scratch, times_pointer_size,
- 0));
+ Operand(kInterpreterDispatchTableRegister, scratch,
+ times_system_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
@@ -1468,8 +1475,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
- __ Push(Operand(
- ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+ __ Push(Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ i * kSystemPointerSize));
}
for (int i = 0; i < 3 - j; ++i) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1493,7 +1500,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ PopReturnAddressTo(edx);
__ inc(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
+ __ lea(esp, Operand(esp, ecx, times_system_pointer_size, 0));
__ PushReturnAddressFrom(edx);
__ ret(0);
@@ -1520,9 +1527,9 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
- __ mov(Operand(esp,
- config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ __ mov(Operand(esp, config->num_allocatable_general_registers() *
+ kSystemPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
@@ -1536,9 +1543,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
ebp,
Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
const int offsetToPC =
- BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
+ kSystemPointerSize;
__ pop(Operand(esp, offsetToPC));
- __ Drop(offsetToPC / kPointerSize);
+ __ Drop(offsetToPC / kSystemPointerSize);
__ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ ret(0);
}
@@ -1570,8 +1578,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(1 * kPointerSize); // Remove eax.
+ __ mov(eax, Operand(esp, 1 * kSystemPointerSize));
+ __ ret(1 * kSystemPointerSize); // Remove eax.
}
// static
@@ -1590,22 +1598,25 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
Label no_arg_array, no_this_arg;
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ movd(xmm0,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
__ test(eax, eax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(edi, Operand(esp, eax, times_system_pointer_size, 0));
__ cmp(eax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ mov(edx,
+ Operand(esp, eax, times_system_pointer_size, -kSystemPointerSize));
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ lea(esp,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ Push(edi);
__ PushReturnAddressFrom(ecx);
@@ -1667,7 +1678,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
// 2. Get the callable to call (passed as receiver) from the stack.
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(edi, Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1676,8 +1687,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label loop;
__ mov(ecx, eax);
__ bind(&loop);
- __ mov(edx, Operand(esp, ecx, times_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), edx);
+ __ mov(edx, Operand(esp, ecx, times_system_pointer_size, 0));
+ __ mov(Operand(esp, ecx, times_system_pointer_size, kSystemPointerSize),
+ edx);
__ dec(ecx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(edx); // Discard copy of return address.
@@ -1708,19 +1720,23 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edi, Operand(esp, eax, times_system_pointer_size,
+ -0 * kSystemPointerSize));
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
+ -1 * kSystemPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ mov(edx, Operand(esp, eax, times_system_pointer_size,
+ -2 * kSystemPointerSize));
__ bind(&done);
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
__ PopReturnAddressTo(edx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ lea(esp,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ Push(ecx);
__ PushReturnAddressFrom(edx);
@@ -1765,20 +1781,24 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edi, Operand(esp, eax, times_system_pointer_size,
+ -0 * kSystemPointerSize));
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
+ -1 * kSystemPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ mov(edx, Operand(esp, eax, times_system_pointer_size,
+ -2 * kSystemPointerSize));
__ bind(&done);
// Spill argumentsList to use ecx as a scratch register.
__ movd(xmm0, ecx);
__ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ lea(esp,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(ecx);
@@ -1813,7 +1833,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Label generic_array_code;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray function should be a map.
@@ -1863,7 +1882,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, edi, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ lea(esp, Operand(esp, edi, times_half_system_pointer_size,
+ 1 * kSystemPointerSize)); // 1 ~ receiver
__ PushReturnAddressFrom(ecx);
}
@@ -1926,7 +1946,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmp(eax, kArgumentsLength);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ mov(edi, FieldOperand(kArgumentsList, eax, times_pointer_size,
+ __ mov(edi, FieldOperand(kArgumentsList, eax, times_system_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(edi, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
@@ -2029,7 +2049,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ PopReturnAddressTo(ecx);
__ bind(&loop);
{
- __ Push(Operand(scratch, edx, times_pointer_size, 1 * kPointerSize));
+ __ Push(Operand(scratch, edx, times_system_pointer_size,
+ 1 * kSystemPointerSize));
__ dec(edx);
__ j(not_zero, &loop);
}
@@ -2089,13 +2110,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(ecx);
} else {
Label convert_to_object, convert_receiver;
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(ecx,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
__ j(above_equal, &done_convert);
// Reload the receiver (it was clobbered by CmpObjectType).
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(ecx,
+ Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
@@ -2131,7 +2154,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+ __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
+ ecx);
}
__ bind(&done_convert);
@@ -2186,7 +2210,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ lea(ecx, Operand(edx, times_pointer_size, 0));
+ __ lea(ecx, Operand(edx, times_system_pointer_size, 0));
__ sub(esp, ecx);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
@@ -2194,7 +2218,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRealStackLimit(esp);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
- __ lea(esp, Operand(esp, edx, times_pointer_size, 0));
+ __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2210,10 +2234,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(ecx, 0);
- __ lea(edx, Operand(esp, edx, times_pointer_size, 0));
+ __ lea(edx, Operand(esp, edx, times_system_pointer_size, 0));
__ bind(&loop);
- __ movd(xmm1, Operand(edx, ecx, times_pointer_size, 0));
- __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm1);
+ __ movd(xmm1, Operand(edx, ecx, times_system_pointer_size, 0));
+ __ movd(Operand(esp, ecx, times_system_pointer_size, 0), xmm1);
__ inc(ecx);
__ cmp(ecx, eax);
__ j(less, &loop);
@@ -2227,9 +2251,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntag(edx);
__ bind(&loop);
__ dec(edx);
- __ movd(xmm1, FieldOperand(ecx, edx, times_pointer_size,
+ __ movd(xmm1, FieldOperand(ecx, edx, times_tagged_size,
FixedArray::kHeaderSize));
- __ movd(Operand(esp, eax, times_pointer_size, 0), xmm1);
+ __ movd(Operand(esp, eax, times_system_pointer_size, 0), xmm1);
__ lea(eax, Operand(eax, 1));
__ j(greater, &loop);
}
@@ -2256,7 +2280,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+ __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), ecx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2304,7 +2328,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2423,7 +2447,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
+ edi);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2464,14 +2489,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
+ __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
__ mov(eax, -1); // account for receiver
Label copy;
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
+ __ sub(edi, Immediate(kSystemPointerSize));
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &copy);
// eax now contains the expected number of arguments.
@@ -2491,7 +2516,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
+ __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
// ecx = expected - actual.
__ sub(kExpectedNumberOfArgumentsRegister, eax);
// eax = -actual - 1
@@ -2502,7 +2527,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
+ __ sub(edi, Immediate(kSystemPointerSize));
__ test(eax, eax);
__ j(not_zero, &copy);
@@ -2712,10 +2737,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CheckStackAlignment();
}
// Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 0 * kSystemPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kSystemPointerSize), esi); // argv.
__ Move(ecx, Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 2 * kPointerSize), ecx);
+ __ mov(Operand(esp, 2 * kSystemPointerSize), ecx);
__ call(kRuntimeCallFunctionRegister);
// Result is in eax or edx:eax - do not destroy these registers!
@@ -2765,11 +2790,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, eax);
- __ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
+ __ mov(Operand(esp, 0 * kSystemPointerSize), Immediate(0)); // argc.
+ __ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(0)); // argv.
__ Move(esi,
Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 2 * kPointerSize), esi);
+ __ mov(Operand(esp, 2 * kSystemPointerSize), esi);
__ CallCFunction(find_handler, 3);
}
@@ -2797,7 +2822,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
- const int kArgumentOffset = 4 * kPointerSize;
+ const int kArgumentOffset = 4 * kSystemPointerSize;
MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
MemOperand exponent_operand(
@@ -2886,137 +2911,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ ret(0);
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = eax;
- const Register scratch = ecx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ mov(scratch, Immediate(1));
- __ Cvtsi2sd(double_result, scratch);
-
- Label fast_power, try_arithmetic_simplification;
- __ DoubleToI(exponent, double_exponent, double_scratch,
- &try_arithmetic_simplification, &try_arithmetic_simplification);
- __ jmp(&int_exponent);
-
- __ bind(&try_arithmetic_simplification);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cvttsd2si(exponent, Operand(double_exponent));
- __ cmp(exponent, Immediate(0x1));
- __ j(overflow, &call_runtime);
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), double_exponent);
- __ fld_d(Operand(esp, 0)); // E
- __ movsd(Operand(esp, 0), double_base);
- __ fld_d(Operand(esp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1); // 2^X
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ test_b(eax, Immediate(0x5F)); // We check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(esp, 0));
- __ movsd(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- __ mov(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ test(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ neg(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shr(scratch, 1);
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shr(scratch, 1);
- __ mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // scratch has the original value of the exponent - if the exponent is
- // negative, return 1/result.
- __ test(exponent, exponent);
- __ j(positive, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // exponent is a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ Cvtsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- __ bind(&call_runtime);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, scratch);
- __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
- __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 4);
- }
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-
- __ bind(&done);
- __ ret(0);
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -3064,7 +2958,7 @@ namespace {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
Operand ApiParameterOperand(int index) {
- return Operand(esp, index * kPointerSize);
+ return Operand(esp, index * kSystemPointerSize);
}
// Prepares stack to put arguments (aligns and so on). Reserves
@@ -3082,7 +2976,7 @@ void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers esi, edi and
// caller-save registers. Restores context. On return removes
-// stack_space * kPointerSize (GCed).
+// stack_space * kSystemPointerSize (GCed).
void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
ExternalReference thunk_ref,
Operand thunk_last_arg, int stack_space,
@@ -3210,7 +3104,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
if (stack_space_operand == nullptr) {
DCHECK_NE(stack_space, 0);
- __ ret(stack_space * kPointerSize);
+ __ ret(stack_space * kSystemPointerSize);
} else {
DCHECK_EQ(0, stack_space);
__ pop(ecx);
@@ -3240,32 +3134,27 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- esi : kTargetContext
- // -- edx : kApiFunctionAddress
- // -- ecx : kArgc
- // --
+ // -- esi : context
+ // -- edx : api function address
+ // -- ecx : arguments count (not including the receiver)
+ // -- eax : call data
+ // -- edi : holder
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
// -- esp[argc * 4] : first argument
// -- esp[(argc + 1) * 4] : receiver
- // -- esp[(argc + 2) * 4] : kHolder
- // -- esp[(argc + 3) * 4] : kCallData
// -----------------------------------
Register api_function_address = edx;
Register argc = ecx;
- Register scratch = eax;
-
- DCHECK(!AreAliased(api_function_address, argc, scratch));
+ Register call_data = eax;
+ Register holder = edi;
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = kPointerSize;
- static constexpr int kHolderOffset = kReceiverOffset + kPointerSize;
- static constexpr int kCallDataOffset = kHolderOffset + kPointerSize;
+ // Park argc in xmm0.
+ __ movd(xmm0, argc);
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, holder));
typedef FunctionCallbackArguments FCA;
@@ -3283,45 +3172,30 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// esp[0]: return address
//
// Target state:
- // esp[0 * kPointerSize]: return address
- // esp[1 * kPointerSize]: kHolder
- // esp[2 * kPointerSize]: kIsolate
- // esp[3 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // esp[4 * kPointerSize]: undefined (kReturnValue)
- // esp[5 * kPointerSize]: kData
- // esp[6 * kPointerSize]: undefined (kNewTarget)
-
- // Reserve space on the stack.
- __ sub(esp, Immediate(FCA::kArgsLength * kPointerSize));
-
- // Return address (the old stack location is overwritten later on).
- __ mov(scratch, Operand(esp, FCA::kArgsLength * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), scratch);
-
- // kHolder.
- __ mov(scratch, Operand(esp, argc, times_pointer_size,
- FCA::kArgsLength * kPointerSize + kHolderOffset));
- __ mov(Operand(esp, 1 * kPointerSize), scratch);
-
- // kIsolate.
- __ Move(scratch,
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 2 * kPointerSize), scratch);
-
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ mov(Operand(esp, 3 * kPointerSize), scratch);
- __ mov(Operand(esp, 4 * kPointerSize), scratch);
- __ mov(Operand(esp, 6 * kPointerSize), scratch);
-
- // kData.
- __ mov(scratch, Operand(esp, argc, times_pointer_size,
- FCA::kArgsLength * kPointerSize + kCallDataOffset));
- __ mov(Operand(esp, 5 * kPointerSize), scratch);
+ // esp[0 * kSystemPointerSize]: return address
+ // esp[1 * kSystemPointerSize]: kHolder
+ // esp[2 * kSystemPointerSize]: kIsolate
+ // esp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // esp[4 * kSystemPointerSize]: undefined (kReturnValue)
+ // esp[5 * kSystemPointerSize]: kData
+ // esp[6 * kSystemPointerSize]: undefined (kNewTarget)
+
+ __ PopReturnAddressTo(ecx);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(call_data);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ Push(holder);
+ __ PushReturnAddressFrom(ecx);
+
+ // Reload argc from xmm0.
+ __ movd(argc, xmm0);
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
- __ lea(scratch, Operand(esp, 1 * kPointerSize));
+ Register scratch = eax;
+ __ lea(scratch, Operand(esp, 1 * kSystemPointerSize));
// The API function takes a reference to v8::Arguments. If the CPU profiler
// is enabled, a wrapper function will be called and we need to pass
@@ -3340,8 +3214,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ lea(scratch, Operand(scratch, argc, times_pointer_size,
- (FCA::kArgsLength - 1) * kPointerSize));
+ __ lea(scratch, Operand(scratch, argc, times_system_pointer_size,
+ (FCA::kArgsLength - 1) * kSystemPointerSize));
__ mov(ApiParameterOperand(kApiArgc + 1), scratch);
// FunctionCallbackInfo::length_.
@@ -3350,8 +3224,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ lea(scratch,
- Operand(argc, times_pointer_size,
- (FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ Operand(argc, times_system_pointer_size,
+ (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ mov(ApiParameterOperand(kApiArgc + 3), scratch);
// v8::InvocationCallback's argument.
@@ -3364,7 +3238,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// the stored ebp (pushed by EnterApiExitFrame), and the return address.
static constexpr int kStackSlotsAboveFCA = 2;
Operand return_value_operand(
- ebp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ ebp,
+ (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3);
@@ -3414,15 +3289,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
PrepareCallApiFunction(masm, kApiArgc, scratch);
// Load address of v8::PropertyAccessorInfo::args_ array. The value in ebp
- // here corresponds to esp + kPointersize before PrepareCallApiFunction.
- __ lea(scratch, Operand(ebp, kPointerSize + 2 * kPointerSize));
+ // here corresponds to esp + kSystemPointerSize before PrepareCallApiFunction.
+ __ lea(scratch, Operand(ebp, kSystemPointerSize + 2 * kSystemPointerSize));
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
Operand info_object = ApiParameterOperand(3);
__ mov(info_object, scratch);
// Name as handle.
- __ sub(scratch, Immediate(kPointerSize));
+ __ sub(scratch, Immediate(kSystemPointerSize));
__ mov(ApiParameterOperand(0), scratch);
// Arguments pointer.
__ lea(scratch, info_object);
@@ -3439,7 +3314,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
- ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ ebp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
Operand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -3530,9 +3406,9 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// esp[4]: First argument, destination pointer.
// esp[0]: return address
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
+ const int kDestinationOffset = 1 * kSystemPointerSize;
+ const int kSourceOffset = 2 * kSystemPointerSize;
+ const int kSizeOffset = 3 * kSystemPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
@@ -3550,7 +3426,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
- stack_offset += 2 * kPointerSize;
+ stack_offset += 2 * kSystemPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 558e6495f1..a840875874 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/mips/constants-mips.h"
#include "src/objects-inl.h"
@@ -47,8 +49,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -210,8 +210,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
- __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -2633,7 +2634,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
- Label out_of_range, only_low, negate, done;
+ Label done;
Register result_reg = t0;
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
@@ -2682,7 +2683,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ lw(input_high,
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
- Label normal_exponent, restore_sign;
+ Label normal_exponent;
// Extract the biased exponent in result.
__ Ext(result_reg, input_high, HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -2760,106 +2761,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = a2;
- const DoubleRegister double_base = f2;
- const DoubleRegister double_exponent = f4;
- const DoubleRegister double_result = f0;
- const DoubleRegister double_scratch = f6;
- const FPURegister single_scratch = f8;
- const Register scratch = t5;
- const Register scratch2 = t3;
-
- Label call_runtime, done, int_exponent;
-
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
- double_scratch, scratch2, kCheckForInexactConversion);
- // scratch2 == 0 means there was no conversion error.
- __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
-
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
-
- __ mov_d(double_scratch, double_base); // Back up base.
- __ Move(double_result, 1.0);
-
- // Get absolute value of exponent.
- Label positive_exponent, bail_out;
- __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
- __ Subu(scratch, zero_reg, scratch);
- // Check when Subu overflows and we get negative result
- // (happens only when input is MIN_INT).
- __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
- __ bind(&positive_exponent);
- __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
- Operand(zero_reg));
-
- Label while_true, no_carry, loop_end;
- __ bind(&while_true);
-
- __ And(scratch2, scratch, 1);
-
- __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
- __ mul_d(double_result, double_result, double_scratch);
- __ bind(&no_carry);
-
- __ sra(scratch, scratch, 1);
-
- __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
- __ mul_d(double_scratch, double_scratch, double_scratch);
-
- __ Branch(&while_true);
-
- __ bind(&loop_end);
-
- __ Branch(&done, ge, exponent, Operand(zero_reg));
- __ Move(double_scratch, 1.0);
- __ div_d(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ CompareF64(EQ, double_result, kDoubleRegZero);
- __ BranchFalseShortF(&done);
-
- // double_exponent may not contain the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ bind(&bail_out);
- __ mtc1(exponent, single_scratch);
- __ cvt_d_w(double_exponent, single_scratch);
-
- // Returning or bailing out.
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 6826fef162..ee0e5238b8 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/mips64/constants-mips64.h"
#include "src/objects-inl.h"
@@ -47,8 +49,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -210,8 +210,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
- __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -2669,7 +2670,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
- Label out_of_range, only_low, negate, done;
+ Label done;
Register result_reg = t0;
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
@@ -2718,7 +2719,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Lw(input_high,
MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
- Label normal_exponent, restore_sign;
+ Label normal_exponent;
// Extract the biased exponent in result.
__ Ext(result_reg, input_high, HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -2797,106 +2798,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = a2;
- const DoubleRegister double_base = f2;
- const DoubleRegister double_exponent = f4;
- const DoubleRegister double_result = f0;
- const DoubleRegister double_scratch = f6;
- const FPURegister single_scratch = f8;
- const Register scratch = t1;
- const Register scratch2 = a7;
-
- Label call_runtime, done, int_exponent;
-
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
- double_scratch, scratch2, kCheckForInexactConversion);
- // scratch2 == 0 means there was no conversion error.
- __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
-
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
-
- __ mov_d(double_scratch, double_base); // Back up base.
- __ Move(double_result, 1.0);
-
- // Get absolute value of exponent.
- Label positive_exponent, bail_out;
- __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
- __ Dsubu(scratch, zero_reg, scratch);
- // Check when Dsubu overflows and we get negative result
- // (happens only when input is MIN_INT).
- __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
- __ bind(&positive_exponent);
- __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
- Operand(zero_reg));
-
- Label while_true, no_carry, loop_end;
- __ bind(&while_true);
-
- __ And(scratch2, scratch, 1);
-
- __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
- __ mul_d(double_result, double_result, double_scratch);
- __ bind(&no_carry);
-
- __ dsra(scratch, scratch, 1);
-
- __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
- __ mul_d(double_scratch, double_scratch, double_scratch);
-
- __ Branch(&while_true);
-
- __ bind(&loop_end);
-
- __ Branch(&done, ge, exponent, Operand(zero_reg));
- __ Move(double_scratch, 1.0);
- __ div_d(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ CompareF64(EQ, double_result, kDoubleRegZero);
- __ BranchFalseShortF(&done);
-
- // double_exponent may not contain the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ bind(&bail_out);
- __ mtc1(exponent, single_scratch);
- __ cvt_d_w(double_exponent, single_scratch);
-
- // Returning or bailing out.
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index a04b034085..2557b082a7 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -11,7 +11,7 @@ namespace object {
const elements: FixedArray =
Cast<FixedArray>(array.elements) otherwise IfSlow;
const length: Smi = array.length;
- const result: JSObject = AllocateEmptyJSObject();
+ const result: JSObject = NewJSObject();
for (let k: Smi = 0; k < length; ++k) {
const value: Object = array::LoadElementOrUndefined(elements, k);
@@ -22,7 +22,7 @@ namespace object {
// Symbol.toPrimitive, toString, and valueOf, which could
// invalidate assumptions about the iterable.
if (Is<JSReceiver>(pair.key)) goto IfSlow;
- CreateDataProperty(result, pair.key, pair.value);
+ FastCreateDataProperty(result, pair.key, pair.value);
}
return result;
}
@@ -41,7 +41,7 @@ namespace object {
return ObjectFromEntriesFastCase(iterable) otherwise IfSlow;
}
label IfSlow {
- const result: JSObject = AllocateEmptyJSObject();
+ const result: JSObject = NewJSObject();
const fastIteratorResultMap: Map =
Cast<Map>(LoadNativeContext(context)[ITERATOR_RESULT_MAP_INDEX])
otherwise unreachable;
@@ -55,7 +55,7 @@ namespace object {
iterator::IteratorValue(step, fastIteratorResultMap);
const pair: KeyValuePair =
collections::LoadKeyValuePair(iteratorValue);
- CreateDataProperty(result, pair.key, pair.value);
+ FastCreateDataProperty(result, pair.key, pair.value);
}
return result;
} catch (e) deferred {
@@ -63,7 +63,7 @@ namespace object {
}
}
label Throw deferred {
- ThrowTypeError(context, kNotIterable);
+ ThrowTypeError(kNotIterable);
}
}
} // namespace object
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
deleted file mode 100644
index 5cdcfd83b8..0000000000
--- a/deps/v8/src/builtins/object.tq
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-namespace object {
- macro AllocateEmptyJSObject(implicit context: Context)(): JSObject {
- const objectFunction: JSFunction = GetObjectFunction();
- const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
- otherwise unreachable;
- return AllocateJSObjectFromMap(map);
- }
-}
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 58419b1ccc..36254a6776 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
@@ -45,7 +47,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -92,7 +93,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
- Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -215,8 +215,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
- __ bne(&not_create_implicit_receiver, cr0);
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
+ __ JumpIfIsInRange(r7, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -2373,112 +2374,160 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r6 : new target (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments, stack_overflow;
-
- Label enough, too_few;
+ Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
__ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
- __ cmp(r3, r5);
- __ blt(&too_few);
-
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
-
- // Calculate copy start address into r3 and copy end address into r7.
- // r3: actual number of arguments as a smi
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
- // adjust for return address and receiver
- __ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
- __ sub(r7, r3, r7);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r3: copy start address
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- // r7: copy end address
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
+ __ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
+ r0);
+ __ bne(&skip_adapt_arguments, cr0);
- Label copy;
- __ bind(&copy);
- __ LoadP(r0, MemOperand(r3, 0));
- __ push(r0);
- __ cmp(r3, r7); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kPointerSize));
- __ bne(&copy);
+ // -------------------------------------------
+ // Adapt arguments.
+ // -------------------------------------------
+ {
+ Label under_application, over_application, invoke;
+ __ cmp(r3, r5);
+ __ blt(&under_application);
- __ b(&invoke);
- }
+ // Enough parameters: actual >= expected
+ __ bind(&over_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
+
+ // Calculate copy start address into r3 and copy end address into r7.
+ // r3: actual number of arguments as a smi
+ // r4: function
+ // r5: expected number of arguments
+ // r6: new target (passed through to callee)
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ add(r3, r3, fp);
+ // adjust for return address and receiver
+ __ addi(r3, r3, Operand(2 * kPointerSize));
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, r3, r7);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r3: copy start address
+ // r4: function
+ // r5: expected number of arguments
+ // r6: new target (passed through to callee)
+ // r7: copy end address
+
+ Label copy;
+ __ bind(&copy);
+ __ LoadP(r0, MemOperand(r3, 0));
+ __ push(r0);
+ __ cmp(r3, r7); // Compare before moving to next argument.
+ __ subi(r3, r3, Operand(kPointerSize));
+ __ bne(&copy);
+
+ __ b(&invoke);
+ }
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
+ // Too few parameters: Actual < expected
+ __ bind(&under_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r3: actual number of arguments as a smi
+ // r4: function
+ // r5: expected number of arguments
+ // r6: new target (passed through to callee)
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ add(r3, r3, fp);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r3: copy start address
+ // r4: function
+ // r5: expected number of arguments
+ // r6: new target (passed through to callee)
+ Label copy;
+ __ bind(&copy);
+ // Adjust load for return address and receiver.
+ __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
+ __ push(r0);
+ __ cmp(r3, fp); // Compare before moving to next argument.
+ __ subi(r3, r3, Operand(kPointerSize));
+ __ bne(&copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r4: function
+ // r5: expected number of arguments
+ // r6: new target (passed through to callee)
+ __ LoadRoot(r0, RootIndex::kUndefinedValue);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, fp, r7);
+ // Adjust for frame.
+ __ subi(r7, r7,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r0);
+ __ cmp(sp, r7);
+ __ bne(&fill);
+ }
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
+ // Call the entry point.
+ __ bind(&invoke);
+ __ mr(r3, r5);
+ // r3 : expected number of arguments
+ // r4 : function (passed through to callee)
+ // r6 : new target (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ CallCodeObject(r5);
- // Calculate copy start address into r0 and copy end address is fp.
- // r3: actual number of arguments as a smi
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
- // Copy the arguments (including the receiver) to the new stack frame.
- // r3: copy start address
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
- __ push(r0);
- __ cmp(r3, fp); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
- __ sub(r7, fp, r7);
- // Adjust for frame.
- __ subi(r7, r7,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ cmp(sp, r7);
- __ bne(&fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mr(r3, r5);
- // r3 : expected number of arguments
- // r4 : function (passed through to callee)
- // r6 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ CallCodeObject(r5);
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ blr();
+ }
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // -------------------------------------------
+ // Skip adapt arguments.
+ // -------------------------------------------
+ __ bind(&skip_adapt_arguments);
+ {
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application;
+ __ cmp(r3, r5);
+ __ blt(&under_application);
+
+ __ bind(&over_application);
+ {
+ // Remove superfluous parameters from the stack.
+ __ sub(r7, r3, r5);
+ __ mr(r3, r5);
+ __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
+ __ add(sp, sp, r7);
+ __ b(&dont_adapt_arguments);
+ }
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ blr();
+ __ bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(r7, RootIndex::kUndefinedValue);
+ __ bind(&fill);
+ __ addi(r3, r3, Operand(1));
+ __ push(r7);
+ __ cmp(r3, r5);
+ __ blt(&fill);
+ __ b(&dont_adapt_arguments);
+ }
+ }
// -------------------------------------------
// Dont adapt arguments.
@@ -2823,100 +2872,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = r5;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
- const Register scratch = r11;
- const Register scratch2 = r10;
-
- Label call_runtime, done, int_exponent;
-
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, double_scratch);
- __ beq(&int_exponent);
-
- __ mflr(r0);
- __ push(r0);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(r0);
- __ mtlr(r0);
- __ MovFromFloatResult(double_result);
- __ b(&done);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ mr(exponent, scratch);
-
- __ fmr(double_scratch, double_base); // Back up base.
- __ li(scratch2, Operand(1));
- __ ConvertIntToDouble(scratch2, double_result);
-
- // Get absolute value of exponent.
- __ cmpi(scratch, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ neg(scratch2, scratch);
- __ isel(lt, scratch, scratch2, scratch);
- } else {
- Label positive_exponent;
- __ bge(&positive_exponent);
- __ neg(scratch, scratch);
- __ bind(&positive_exponent);
- }
-
- Label while_true, no_carry, loop_end;
- __ bind(&while_true);
- __ andi(scratch2, scratch, Operand(1));
- __ beq(&no_carry, cr0);
- __ fmul(double_result, double_result, double_scratch);
- __ bind(&no_carry);
- __ ShiftRightImm(scratch, scratch, Operand(1), SetRC);
- __ beq(&loop_end, cr0);
- __ fmul(double_scratch, double_scratch, double_scratch);
- __ b(&while_true);
- __ bind(&loop_end);
-
- __ cmpi(exponent, Operand::Zero());
- __ bge(&done);
-
- __ li(scratch2, Operand(1));
- __ ConvertIntToDouble(scratch2, double_scratch);
- __ fdiv(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ fcmpu(double_result, kDoubleRegZero);
- __ bne(&done);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ ConvertIntToDouble(exponent, double_exponent);
-
- // Returning or bailing out.
- __ mflr(r0);
- __ push(r0);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(r0);
- __ mtlr(r0);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
@@ -3098,32 +3053,23 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- cp : kTargetContext
- // -- r4 : kApiFunctionAddress
- // -- r5 : kArgc
- // --
+ // -- cp : context
+ // -- r4 : api function address
+ // -- r5 : arguments count (not including the receiver)
+ // -- r6 : call data
+ // -- r3 : holder
// -- sp[0] : last argument
// -- ...
// -- sp[(argc - 1)* 4] : first argument
// -- sp[(argc + 0) * 4] : receiver
- // -- sp[(argc + 1) * 4] : kHolder
- // -- sp[(argc + 2) * 4] : kCallData
// -----------------------------------
Register api_function_address = r4;
Register argc = r5;
+ Register call_data = r6;
+ Register holder = r3;
Register scratch = r7;
- Register index = r8; // For indexing MemOperands.
-
- DCHECK(!AreAliased(api_function_address, argc, scratch, index));
-
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = 0;
- static constexpr int kHolderOffset = kReceiverOffset + 1;
- static constexpr int kCallDataOffset = kHolderOffset + 1;
-
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
typedef FunctionCallbackArguments FCA;
@@ -3149,26 +3095,22 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ subi(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
// kHolder.
- __ addi(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
- __ ShiftLeftImm(ip, index, Operand(kPointerSizeLog2));
- __ LoadPX(scratch, MemOperand(sp, ip));
- __ StoreP(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ StoreP(holder, MemOperand(sp, 0 * kPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ // kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
- __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
// kData.
- __ addi(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
- __ ShiftLeftImm(ip, index, Operand(kPointerSizeLog2));
- __ LoadPX(scratch, MemOperand(sp, ip));
- __ StoreP(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3207,7 +3149,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
- Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
__ ShiftLeftImm(ip, argc, Operand(kPointerSizeLog2));
__ add(scratch, scratch, ip);
__ StoreP(scratch,
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2179e7bcac..3134a3f43e 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
@@ -45,7 +47,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -92,7 +93,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
- Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
@@ -209,8 +209,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
- __ bne(&not_create_implicit_receiver);
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
+ __ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -2427,111 +2428,160 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r5 : new target (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments, stack_overflow;
-
- Label enough, too_few;
+ Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
__ tmll(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(Condition(1), &dont_adapt_arguments);
- __ CmpLogicalP(r2, r4);
- __ blt(&too_few);
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
+ __ tmlh(r6,
+ Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask >>
+ 16));
+ __ bne(&skip_adapt_arguments);
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
+ // -------------------------------------------
+ // Adapt arguments.
+ // -------------------------------------------
+ {
+ Label under_application, over_application, invoke;
+ __ CmpP(r2, r4);
+ __ blt(&under_application);
- // Calculate copy start address into r2 and copy end address into r6.
- // r2: actual number of arguments as a smi
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r2, r2);
- __ AddP(r2, fp);
- // adjust for return address and receiver
- __ AddP(r2, r2, Operand(2 * kPointerSize));
- __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
- __ SubP(r6, r2, r6);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r2: copy start address
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- // r6: copy end address
-
- Label copy;
- __ bind(&copy);
- __ LoadP(r0, MemOperand(r2, 0));
- __ push(r0);
- __ CmpP(r2, r6); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kPointerSize));
- __ bne(&copy);
-
- __ b(&invoke);
- }
+ // Enough parameters: actual >= expected
+ __ bind(&over_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
+
+ // Calculate copy start address into r2 and copy end address into r6.
+ // r2: actual number of arguments as a smi
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ __ SmiToPtrArrayOffset(r2, r2);
+ __ AddP(r2, fp);
+ // adjust for return address and receiver
+ __ AddP(r2, r2, Operand(2 * kPointerSize));
+ __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ SubP(r6, r2, r6);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r2: copy start address
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ // r6: copy end address
+
+ Label copy;
+ __ bind(&copy);
+ __ LoadP(r0, MemOperand(r2, 0));
+ __ push(r0);
+ __ CmpP(r2, r6); // Compare before moving to next argument.
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ bne(&copy);
+
+ __ b(&invoke);
+ }
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
+ // Too few parameters: Actual < expected
+ __ bind(&under_application);
+ {
+ EnterArgumentsAdaptorFrame(masm);
+ Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r2: actual number of arguments as a smi
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ __ SmiToPtrArrayOffset(r2, r2);
+ __ lay(r2, MemOperand(r2, fp));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r2: copy start address
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ Label copy;
+ __ bind(&copy);
+ // Adjust load for return address and receiver.
+ __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
+ __ push(r0);
+ __ CmpP(r2, fp); // Compare before moving to next argument.
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ bne(&copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r3: function
+ // r4: expected number of argumentus
+ __ LoadRoot(r0, RootIndex::kUndefinedValue);
+ __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ SubP(r6, fp, r6);
+ // Adjust for frame.
+ __ SubP(r6, r6,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r0);
+ __ CmpP(sp, r6);
+ __ bne(&fill);
+ }
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
+ // Call the entry point.
+ __ bind(&invoke);
+ __ LoadRR(r2, r4);
+ // r2 : expected number of arguments
+ // r3 : function (passed through to callee)
+ // r5 : new target (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ CallCodeObject(r4);
- // Calculate copy start address into r0 and copy end address is fp.
- // r2: actual number of arguments as a smi
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r2, r2);
- __ lay(r2, MemOperand(r2, fp));
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
- // Copy the arguments (including the receiver) to the new stack frame.
- // r2: copy start address
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
- __ push(r0);
- __ CmpP(r2, fp); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r3: function
- // r4: expected number of argumentus
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
- __ SubP(r6, fp, r6);
- // Adjust for frame.
- __ SubP(r6, r6,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ CmpP(sp, r6);
- __ bne(&fill);
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
}
- // Call the entry point.
- __ bind(&invoke);
- __ LoadRR(r2, r4);
- // r2 : expected number of arguments
- // r3 : function (passed through to callee)
- // r5 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ CallCodeObject(r4);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // -------------------------------------------
+ // Skip adapt arguments.
+ // -------------------------------------------
+ __ bind(&skip_adapt_arguments);
+ {
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application;
+ __ CmpP(r2, r4);
+ __ blt(&under_application);
+
+ __ bind(&over_application);
+ {
+ // Remove superfluous parameters from the stack.
+ __ SubP(r6, r2, r4);
+ __ lgr(r2, r4);
+ __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
+ __ lay(sp, MemOperand(sp, r6));
+ __ b(&dont_adapt_arguments);
+ }
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
+ __ bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(r6, RootIndex::kUndefinedValue);
+ __ bind(&fill);
+ __ AddP(r2, r2, Operand(1));
+ __ push(r6);
+ __ CmpP(r2, r4);
+ __ blt(&fill);
+ __ b(&dont_adapt_arguments);
+ }
+ }
// -------------------------------------------
// Dont adapt arguments.
@@ -2870,97 +2920,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = r4;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
- const Register scratch = r1;
- const Register scratch2 = r9;
-
- Label call_runtime, done, int_exponent;
-
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, double_scratch);
- __ beq(&int_exponent, Label::kNear);
-
- __ push(r14);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(r14);
- __ MovFromFloatResult(double_result);
- __ b(&done);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- // Exponent has previously been stored into scratch as untagged integer.
- __ LoadRR(exponent, scratch);
-
- __ ldr(double_scratch, double_base); // Back up base.
- __ LoadImmP(scratch2, Operand(1));
- __ ConvertIntToDouble(double_result, scratch2);
-
- // Get absolute value of exponent.
- Label positive_exponent;
- __ CmpP(scratch, Operand::Zero());
- __ bge(&positive_exponent, Label::kNear);
- __ LoadComplementRR(scratch, scratch);
- __ bind(&positive_exponent);
-
- Label while_true, no_carry, loop_end;
- __ bind(&while_true);
- __ mov(scratch2, Operand(1));
- __ AndP(scratch2, scratch);
- __ beq(&no_carry, Label::kNear);
- __ mdbr(double_result, double_scratch);
- __ bind(&no_carry);
- __ ShiftRightP(scratch, scratch, Operand(1));
- __ LoadAndTestP(scratch, scratch);
- __ beq(&loop_end, Label::kNear);
- __ mdbr(double_scratch, double_scratch);
- __ b(&while_true);
- __ bind(&loop_end);
-
- __ CmpP(exponent, Operand::Zero());
- __ bge(&done);
-
- // get 1/double_result:
- __ ldr(double_scratch, double_result);
- __ LoadImmP(scratch2, Operand(1));
- __ ConvertIntToDouble(double_result, scratch2);
- __ ddbr(double_result, double_scratch);
-
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ lzdr(kDoubleRegZero);
- __ cdbr(double_result, kDoubleRegZero);
- __ bne(&done, Label::kNear);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ ConvertIntToDouble(double_exponent, exponent);
-
- // Returning or bailing out.
- __ push(r14);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
- }
- __ pop(r14);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
@@ -3131,32 +3090,23 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- cp : kTargetContext
- // -- r3 : kApiFunctionAddress
- // -- r4 : kArgc
- // --
+ // -- cp : context
+ // -- r4 : api function address
+ // -- r4 : arguments count (not including the receiver)
+ // -- r5 : call data
+ // -- r2 : holder
// -- sp[0] : last argument
// -- ...
// -- sp[(argc - 1) * 4] : first argument
// -- sp[(argc + 0) * 4] : receiver
- // -- sp[(argc + 1) * 4] : kHolder
- // -- sp[(argc + 2) * 4] : kCallData
// -----------------------------------
Register api_function_address = r3;
Register argc = r4;
+ Register call_data = r5;
+ Register holder = r2;
Register scratch = r6;
- Register index = r7; // For indexing MemOperands.
-
- DCHECK(!AreAliased(api_function_address, argc, scratch, index));
-
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = 0;
- static constexpr int kHolderOffset = kReceiverOffset + 1;
- static constexpr int kCallDataOffset = kHolderOffset + 1;
-
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
typedef FunctionCallbackArguments FCA;
@@ -3182,26 +3132,22 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize)));
// kHolder.
- __ AddP(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
- __ ShiftLeftP(r1, index, Operand(kPointerSizeLog2));
- __ LoadP(scratch, MemOperand(sp, r1));
- __ StoreP(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ StoreP(holder, MemOperand(sp, 0 * kPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ // kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
- __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
// kData.
- __ AddP(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
- __ ShiftLeftP(r1, index, Operand(kPointerSizeLog2));
- __ LoadP(scratch, MemOperand(sp, r1));
- __ StoreP(scratch, MemOperand(sp, 4 * kPointerSize));
+ __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3241,7 +3187,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
- Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
__ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
__ AddP(scratch, r1);
__ StoreP(scratch,
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index cf7aa704f2..23e03a705d 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -8,8 +8,8 @@
#include "src/builtins/builtins.h"
#include "src/code-events.h"
#include "src/compiler/code-assembler.h"
-
#include "src/handles-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
@@ -42,7 +42,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
CHECK(!options.isolate_independent_code);
CHECK(!options.use_pc_relative_calls_and_jumps);
- if (!isolate->ShouldLoadConstantsFromRootList() ||
+ if (!isolate->IsGeneratingEmbeddedBuiltins() ||
!Builtins::IsIsolateIndependent(builtin_index)) {
return options;
}
@@ -116,17 +116,16 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
}
CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable,
+ handler_table_offset);
static constexpr bool kIsNotTurbofanned = false;
static constexpr int kStackSlots = 0;
- static constexpr int kSafepointTableOffset = 0;
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::BUILTIN, masm.CodeObject(), builtin_index,
MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate), kMovable,
- kIsNotTurbofanned, kStackSlots, kSafepointTableOffset,
- handler_table_offset);
+ kIsNotTurbofanned, kStackSlots);
PostBuildProfileAndTracing(isolate, *code, s_name);
return *code;
}
@@ -183,7 +182,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
CodeAssemblerGenerator generator,
CallDescriptors::Key interface_descriptor,
- const char* name, int result_size) {
+ const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -196,7 +195,6 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
- DCHECK_EQ(result_size, descriptor.GetReturnCount());
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(
isolate, &zone, descriptor, Code::BUILTIN, name,
@@ -269,8 +267,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
+ FlushInstructionCache(code->raw_instruction_start(),
+ code->raw_instruction_size());
}
}
}
@@ -318,22 +316,23 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
code = BuildWithCodeStubAssemblerJS( \
isolate, index, &Builtins::Generate_##Name, Argc, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFC(Name, InterfaceDescriptor, result_size) \
- code = BuildWithCodeStubAssemblerCS( \
- isolate, index, &Builtins::Generate_##Name, \
- CallDescriptors::InterfaceDescriptor, #Name, result_size); \
+#define BUILD_TFC(Name, InterfaceDescriptor) \
+ /* Return size is from the provided CallInterfaceDescriptor. */ \
+ code = BuildWithCodeStubAssemblerCS( \
+ isolate, index, &Builtins::Generate_##Name, \
+ CallDescriptors::InterfaceDescriptor, #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFS(Name, ...) \
/* Return size for generic TF builtins (stub linkage) is always 1. */ \
code = \
BuildWithCodeStubAssemblerCS(isolate, index, &Builtins::Generate_##Name, \
- CallDescriptors::Name, #Name, 1); \
+ CallDescriptors::Name, #Name); \
AddBuiltin(builtins, index++, code);
-#define BUILD_TFH(Name, InterfaceDescriptor) \
- /* Return size for IC builtins/handlers is always 1. */ \
- code = BuildWithCodeStubAssemblerCS( \
- isolate, index, &Builtins::Generate_##Name, \
- CallDescriptors::InterfaceDescriptor, #Name, 1); \
+#define BUILD_TFH(Name, InterfaceDescriptor) \
+ /* Return size for IC builtins/handlers is always 1. */ \
+ code = BuildWithCodeStubAssemblerCS( \
+ isolate, index, &Builtins::Generate_##Name, \
+ CallDescriptors::InterfaceDescriptor, #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_BCH(Name, OperandScale, Bytecode) \
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
new file mode 100644
index 0000000000..16405d4c12
--- /dev/null
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -0,0 +1,85 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace string {
+ macro TryFastStringCompareSequence(
+ string: String, searchStr: String, start: Number,
+ searchLength: Smi): Boolean labels Slow {
+ const directString = Cast<DirectString>(string) otherwise Slow;
+ const directSearchStr = Cast<DirectString>(searchStr) otherwise Slow;
+ const stringIndexSmi: Smi = Cast<Smi>(start) otherwise Slow;
+
+ let searchIndex: intptr = 0;
+ let stringIndex = Convert<intptr>(stringIndexSmi);
+ const searchLengthInteger = Convert<intptr>(searchLength);
+
+ while (searchIndex < searchLengthInteger) {
+ if (StringCharCodeAt(directSearchStr, searchIndex) !=
+ StringCharCodeAt(directString, stringIndex)) {
+ return False;
+ }
+
+ searchIndex++;
+ stringIndex++;
+ }
+ return True;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-string.prototype.endswith
+ transitioning javascript builtin StringPrototypeEndsWith(
+ context: Context, receiver: Object, ...arguments): Boolean {
+ const searchString: Object = arguments[0];
+ const endPosition: Object = arguments[1];
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ const object: Object = RequireObjectCoercible(receiver);
+
+ // 2. Let S be ? ToString(O).
+ const string: String = ToString_Inline(context, object);
+
+ // 3. Let isRegExp be ? IsRegExp(searchString).
+ // 4. If isRegExp is true, throw a TypeError exception.
+ if (IsRegExp(searchString)) {
+ ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.endsWith');
+ }
+
+ // 5. Let searchStr be ? ToString(searchString).
+ const searchStr: String = ToString_Inline(context, searchString);
+
+ // 6. Let len be the length of S.
+ const len: Number = string.length_smi;
+
+ // 7. If endPosition is undefined, let pos be len,
+ // else let pos be ? ToInteger(endPosition).
+ const pos: Number = (endPosition == Undefined) ?
+ len :
+ ToInteger_Inline(context, endPosition);
+
+ // 8. Let end be min(max(pos, 0), len).
+ const end: Number = NumberMin(NumberMax(pos, 0), len);
+
+ // 9. Let searchLength be the length of searchStr.
+ const searchLength: Smi = searchStr.length_smi;
+
+ // 10. Let start be end - searchLength.
+ let start = end - searchLength;
+
+ // 11. If start is less than 0, return false.
+ if (start < 0) return False;
+
+ // 12. If the sequence of code units of S starting at start of length
+ // searchLength is the same as the full code unit sequence of searchStr,
+ // return true.
+ // 13. Otherwise, return false.
+ try {
+ // Fast Path: If both strings are direct and relevant indices are Smis.
+ return TryFastStringCompareSequence(
+ string, searchStr, start, searchLength) otherwise Slow;
+ }
+ label Slow {
+ // Slow Path: If either of the string is indirect, bail into runtime.
+ return StringCompareSequence(context, string, searchStr, start);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
new file mode 100644
index 0000000000..1f885a2afd
--- /dev/null
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace string {
+ extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context:
+ Context)(Object): bool;
+
+ // TODO(ryzokuken): Add RequireObjectCoercible to base.tq and update callsites
+ macro RequireObjectCoercible(implicit context: Context)(argument: Object):
+ Object {
+ if (IsNullOrUndefined(argument)) {
+ ThrowTypeError(kCalledOnNullOrUndefined, 'String.prototype.startsWith');
+ }
+ return argument;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-string.prototype.startswith
+ transitioning javascript builtin StringPrototypeStartsWith(
+ context: Context, receiver: Object, ...arguments): Boolean {
+ const searchString: Object = arguments[0];
+ const position: Object = arguments[1];
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ const object: Object = RequireObjectCoercible(receiver);
+
+ // 2. Let S be ? ToString(O).
+ const string: String = ToString_Inline(context, object);
+
+ // 3. Let isRegExp be ? IsRegExp(searchString).
+ // 4. If isRegExp is true, throw a TypeError exception.
+ if (IsRegExp(searchString)) {
+ ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.startsWith');
+ }
+
+ // 5. Let searchStr be ? ToString(searchString).
+ const searchStr: String = ToString_Inline(context, searchString);
+
+ // 6. Let pos be ? ToInteger(position).
+ const pos: Number = ToInteger_Inline(context, position);
+
+ // 7. Assert: If position is undefined, then pos is 0.
+ // 8. Let len be the length of S.
+ const len: Number = string.length_smi;
+
+ // 9. Let start be min(max(pos, 0), len).
+ const start: Number = NumberMin(NumberMax(pos, 0), len);
+
+ // 10. Let searchLength be the length of searchStr.
+ const searchLength: Smi = searchStr.length_smi;
+
+ // 11. If searchLength + start is greater than len, return false.
+ if (searchLength + start > len) return False;
+
+ // 12. If the sequence of code units of S starting at start of length
+ // searchLength is the same as the full code unit sequence of searchStr,
+ // return true.
+ // 13. Otherwise, return false.
+ try {
+ // Fast Path: If both strings are direct and relevant indices are Smis.
+ return TryFastStringCompareSequence(
+ string, searchStr, start, searchLength) otherwise Slow;
+ }
+ label Slow {
+ // Slow Path: If either of the string is indirect, bail into runtime.
+ return StringCompareSequence(context, string, searchStr, start);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 64d9930815..04630dc295 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -2,23 +2,92 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-namespace typed_array {
- extern builtin TypedArrayInitialize(implicit context: Context)(
- JSTypedArray, PositiveSmi, PositiveSmi, Boolean, JSReceiver): void;
+#include 'src/builtins/builtins-constructor-gen.h'
- extern macro TypedArrayBuiltinsAssembler::ByteLengthIsValid(Number): bool;
- extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
- RawPtr, RawPtr, uintptr): void;
+namespace typed_array_createtypedarray {
+ extern builtin IterableToListMayPreserveHoles(Context, Object, Callable):
+ JSArray;
+ extern macro ConstructorBuiltinsAssembler::EmitFastNewObject(
+ implicit context: Context)(JSFunction, JSReceiver): JSTypedArray;
+ extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
+ implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer;
+ extern macro TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
+ Map, intptr, Number): FixedTypedArrayBase;
+ extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+ implicit context: Context)(JSTypedArray): JSFunction;
+ extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
+ bool;
+ extern macro TypedArrayBuiltinsAssembler::SetupTypedArray(
+ JSTypedArray, Smi, uintptr, uintptr): void;
+
+ extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
+ Map, String): never;
extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
void;
+ macro CalculateTotalElementsByteSize(byteLength: intptr): intptr {
+ return (kFixedTypedArrayBaseHeaderSize + kObjectAlignmentMask +
+ byteLength) &
+ ~kObjectAlignmentMask;
+ }
+
+ transitioning macro TypedArrayInitialize(implicit context: Context)(
+ initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi,
+ elementsInfo: typed_array::TypedArrayElementsInfo,
+ bufferConstructor: JSReceiver): uintptr {
+ const byteLength = elementsInfo.CalculateByteLength(length)
+ otherwise ThrowRangeError(kInvalidArrayBufferLength);
+ const byteLengthNum = Convert<Number>(byteLength);
+ const defaultConstructor = GetArrayBufferFunction();
+
+ try {
+ if (bufferConstructor != defaultConstructor) {
+ goto AttachOffHeapBuffer(ConstructWithTarget(
+ defaultConstructor, bufferConstructor, byteLengthNum));
+ }
+
+ if (byteLength > V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP) goto AllocateOffHeap;
+
+ AllocateEmptyOnHeapBuffer(typedArray, byteLength);
+
+ const totalSize =
+ CalculateTotalElementsByteSize(Convert<intptr>(byteLength));
+ const elements =
+ AllocateOnHeapElements(elementsInfo.map, totalSize, length);
+ typedArray.elements = elements;
+
+ if constexpr (initialize) {
+ const backingStore = LoadFixedTypedArrayOnHeapBackingStore(elements);
+ typed_array::CallCMemset(backingStore, 0, byteLength);
+ }
+ }
+ label AllocateOffHeap {
+ if constexpr (initialize) {
+ goto AttachOffHeapBuffer(Construct(defaultConstructor, byteLengthNum));
+ } else {
+ goto AttachOffHeapBuffer(Call(
+ context, GetArrayBufferNoInitFunction(), Undefined, byteLengthNum));
+ }
+ }
+ label AttachOffHeapBuffer(bufferObj: Object) {
+ const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
+ const byteOffset: uintptr = 0;
+ typedArray.AttachOffHeapBuffer(
+ buffer, elementsInfo.map, length, byteOffset);
+ }
+
+ const byteOffset: uintptr = 0;
+ SetupTypedArray(typedArray, length, byteOffset, byteLength);
+
+ return byteLength;
+ }
+
// 22.2.4.2 TypedArray ( length )
// ES #sec-typedarray-length
- macro ConstructByLength(implicit context: Context)(
- typedArray: JSTypedArray, length: Object, elementSize: Smi): void {
- const positiveElementSize: PositiveSmi =
- Cast<PositiveSmi>(elementSize) otherwise unreachable;
+ transitioning macro ConstructByLength(implicit context: Context)(
+ typedArray: JSTypedArray, length: Object,
+ elementsInfo: typed_array::TypedArrayElementsInfo): void {
const convertedLength: Number =
ToInteger_Inline(context, length, kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
@@ -26,42 +95,39 @@ namespace typed_array {
// representation (which uses Smis).
// TODO(7881): support larger-than-smi typed array lengths
const positiveLength: PositiveSmi = Cast<PositiveSmi>(convertedLength)
- otherwise ThrowRangeError(context, kInvalidTypedArrayLength, length);
- const defaultConstructor: JSFunction = GetArrayBufferFunction();
- const initialize: Boolean = True;
+ otherwise ThrowRangeError(kInvalidTypedArrayLength, length);
+ const defaultConstructor: Constructor = GetArrayBufferFunction();
+ const initialize: constexpr bool = true;
TypedArrayInitialize(
- typedArray, positiveLength, positiveElementSize, initialize,
+ initialize, typedArray, positiveLength, elementsInfo,
defaultConstructor);
}
// 22.2.4.4 TypedArray ( object )
// ES #sec-typedarray-object
- macro ConstructByArrayLike(implicit context: Context)(
+ transitioning macro ConstructByArrayLike(implicit context: Context)(
typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object,
- elementSize: Smi, bufferConstructor: JSReceiver): void {
- const positiveElementSize: PositiveSmi =
- Cast<PositiveSmi>(elementSize) otherwise unreachable;
+ elementsInfo: typed_array::TypedArrayElementsInfo,
+ bufferConstructor: JSReceiver): void {
// The caller has looked up length on arrayLike, which is observable.
const length: PositiveSmi = ToSmiLength(initialLength)
- otherwise ThrowRangeError(context, kInvalidTypedArrayLength, initialLength);
- const initialize: Boolean = False;
- TypedArrayInitialize(
- typedArray, length, positiveElementSize, initialize, bufferConstructor);
+ otherwise ThrowRangeError(kInvalidTypedArrayLength, initialLength);
+ const initialize: constexpr bool = false;
+ const byteLength = TypedArrayInitialize(
+ initialize, typedArray, length, elementsInfo, bufferConstructor);
try {
const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
if (IsDetachedBuffer(src.buffer)) {
- ThrowTypeError(context, kDetachedOperation, 'Construct');
+ ThrowTypeError(kDetachedOperation, 'Construct');
- } else if (src.elements_kind != typedArray.elements_kind) {
+ } else if (src.elements_kind != elementsInfo.kind) {
goto IfSlow;
} else if (length > 0) {
- const byteLength: Number = SmiMul(length, elementSize);
- assert(ByteLengthIsValid(byteLength));
- CallCMemcpy(
- typedArray.data_ptr, src.data_ptr, Convert<uintptr>(byteLength));
+ assert(byteLength <= kTypedArrayMaxByteLength);
+ typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength);
}
}
label IfSlow deferred {
@@ -70,4 +136,253 @@ namespace typed_array {
}
}
}
+
+ // 22.2.4.4 TypedArray ( object )
+ // ES #sec-typedarray-object
+ transitioning macro ConstructByIterable(implicit context: Context)(
+ typedArray: JSTypedArray, iterable: JSReceiver, iteratorFn: Callable,
+ elementsInfo: typed_array::TypedArrayElementsInfo): never
+ labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+ const array: JSArray =
+ IterableToListMayPreserveHoles(context, iterable, iteratorFn);
+ goto IfConstructByArrayLike(array, array.length, GetArrayBufferFunction());
+ }
+
+ // 22.2.4.3 TypedArray ( typedArray )
+ // ES #sec-typedarray-typedarray
+ transitioning macro ConstructByTypedArray(implicit context: Context)(
+ typedArray: JSTypedArray, srcTypedArray: JSTypedArray,
+ elementsInfo: typed_array::TypedArrayElementsInfo): never
+ labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+ let bufferConstructor: JSReceiver = GetArrayBufferFunction();
+ const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
+ // TODO(petermarshall): Throw on detached typedArray.
+ let length: Smi = IsDetachedBuffer(srcBuffer) ? 0 : srcTypedArray.length;
+
+ // The spec requires that constructing a typed array using a SAB-backed
+ // typed array use the ArrayBuffer constructor, not the species constructor.
+ // See https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
+ if (!IsSharedArrayBuffer(srcBuffer)) {
+ bufferConstructor = SpeciesConstructor(srcBuffer, bufferConstructor);
+ // TODO(petermarshall): Throw on detached typedArray.
+ if (IsDetachedBuffer(srcBuffer)) length = 0;
+ }
+ goto IfConstructByArrayLike(srcTypedArray, length, bufferConstructor);
+ }
+
+ // 22.2.4.5 TypedArray ( buffer, byteOffset, length )
+ // ES #sec-typedarray-buffer-byteoffset-length
+ macro ConstructByArrayBuffer(implicit context: Context)(
+ typedArray: JSTypedArray, buffer: JSArrayBuffer, byteOffset: Object,
+ length: Object, elementsInfo: typed_array::TypedArrayElementsInfo): void {
+ try {
+ let offset: uintptr = 0;
+ if (byteOffset != Undefined) {
+ // 6. Let offset be ? ToIndex(byteOffset).
+ offset = TryNumberToUintPtr(
+ ToInteger_Inline(context, byteOffset, kTruncateMinusZero))
+ otherwise goto IfInvalidOffset;
+
+ // 7. If offset modulo elementSize ā‰  0, throw a RangeError exception.
+ if (elementsInfo.IsUnaligned(offset)) {
+ goto IfInvalidAlignment('start offset');
+ }
+ }
+
+ let newLength: PositiveSmi = 0;
+ let newByteLength: uintptr;
+ // 8. If length is present and length is not undefined, then
+ if (length != Undefined) {
+ // a. Let newLength be ? ToIndex(length).
+ newLength = ToSmiIndex(length) otherwise IfInvalidLength;
+ }
+
+ // 9. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(kDetachedOperation, 'Construct');
+ }
+
+ // 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
+ const bufferByteLength: uintptr = buffer.byte_length;
+
+ // 11. If length is either not present or undefined, then
+ if (length == Undefined) {
+ // a. If bufferByteLength modulo elementSize ā‰  0, throw a RangeError
+ // exception.
+ if (elementsInfo.IsUnaligned(bufferByteLength)) {
+ goto IfInvalidAlignment('byte length');
+ }
+
+ // b. Let newByteLength be bufferByteLength - offset.
+ // c. If newByteLength < 0, throw a RangeError exception.
+ if (bufferByteLength < offset) goto IfInvalidOffset;
+
+ // Spec step 16 length calculated here to avoid recalculating the length
+ // in the step 12 branch.
+ newByteLength = bufferByteLength - offset;
+ newLength = elementsInfo.CalculateLength(newByteLength)
+ otherwise IfInvalidOffset;
+
+ // 12. Else,
+ } else {
+ // a. Let newByteLength be newLength Ɨ elementSize.
+ newByteLength = elementsInfo.CalculateByteLength(newLength)
+ otherwise IfInvalidByteLength;
+
+ // b. If offset + newByteLength > bufferByteLength, throw a RangeError
+ // exception.
+ if ((bufferByteLength < newByteLength) ||
+ (offset > bufferByteLength - newByteLength))
+ goto IfInvalidLength;
+ }
+
+ SetupTypedArray(typedArray, newLength, offset, newByteLength);
+ typedArray.AttachOffHeapBuffer(
+ buffer, elementsInfo.map, newLength, offset);
+ }
+ label IfInvalidAlignment(problemString: String) deferred {
+ ThrowInvalidTypedArrayAlignment(typedArray.map, problemString);
+ }
+ label IfInvalidByteLength deferred {
+ ThrowRangeError(kInvalidArrayBufferLength);
+ }
+ label IfInvalidLength deferred {
+ ThrowRangeError(kInvalidTypedArrayLength, length);
+ }
+ label IfInvalidOffset deferred {
+ ThrowRangeError(kInvalidOffset, byteOffset);
+ }
+ }
+
+ transitioning macro ConstructByJSReceiver(implicit context: Context)(
+ array: JSTypedArray, obj: JSReceiver,
+ elementsInfo: typed_array::TypedArrayElementsInfo): never
+ labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
+ try {
+ const iteratorMethod: Object =
+ GetIteratorMethod(obj) otherwise IfIteratorUndefined;
+ const iteratorFn: Callable = Cast<Callable>(iteratorMethod)
+ otherwise ThrowTypeError(kIteratorSymbolNonCallable);
+ ConstructByIterable(array, obj, iteratorFn, elementsInfo)
+ otherwise IfConstructByArrayLike;
+ }
+ label IfIteratorUndefined {
+ const lengthObj: Object = GetProperty(obj, kLengthString);
+ const length: Smi = ToSmiLength(lengthObj)
+ otherwise goto IfInvalidLength(lengthObj);
+ goto IfConstructByArrayLike(obj, length, GetArrayBufferFunction());
+ }
+ label IfInvalidLength(length: Object) {
+ ThrowRangeError(kInvalidTypedArrayLength, length);
+ }
+ }
+
+ // 22.2.4 The TypedArray Constructors
+ // ES #sec-typedarray-constructors
+ transitioning builtin CreateTypedArray(
+ context: Context, target: JSFunction, newTarget: JSReceiver, arg1: Object,
+ arg2: Object, arg3: Object): JSTypedArray {
+ assert(IsConstructor(target));
+ // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
+ // "%TypedArrayPrototype%").
+ const array: JSTypedArray = EmitFastNewObject(target, newTarget);
+ // We need to set the byte_offset / byte_length to some sane values
+ // to keep the heap verifier happy.
+ // TODO(bmeurer): Fix this initialization to not use EmitFastNewObject,
+ // which causes the problem, since it puts Undefined into all slots of
+ // the object even though that doesn't make any sense for these fields.
+ array.byte_offset = 0;
+ array.byte_length = 0;
+
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo: typed_array::TypedArrayElementsInfo =
+ typed_array::GetTypedArrayElementsInfo(array);
+
+ try {
+ typeswitch (arg1) {
+ case (length: Smi): {
+ goto IfConstructByLength(length);
+ }
+ case (buffer: JSArrayBuffer): {
+ ConstructByArrayBuffer(array, buffer, arg2, arg3, elementsInfo);
+ }
+ case (typedArray: JSTypedArray): {
+ ConstructByTypedArray(array, typedArray, elementsInfo)
+ otherwise IfConstructByArrayLike;
+ }
+ case (obj: JSReceiver): {
+ ConstructByJSReceiver(array, obj, elementsInfo)
+ otherwise IfConstructByArrayLike;
+ }
+ // The first argument was a number or fell through and is treated as
+ // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
+ case (lengthObj: HeapObject): {
+ goto IfConstructByLength(lengthObj);
+ }
+ }
+ }
+ label IfConstructByLength(length: Object) {
+ ConstructByLength(array, length, elementsInfo);
+ }
+ label IfConstructByArrayLike(
+ arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) {
+ ConstructByArrayLike(
+ array, arrayLike, length, elementsInfo, bufferConstructor);
+ }
+ return array;
+ }
+
+ transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
+ methodName: constexpr string, numArgs: constexpr int31,
+ exemplar: JSTypedArray, arg0: Object, arg1: Object,
+ arg2: Object): JSTypedArray {
+ const defaultConstructor = GetDefaultConstructor(exemplar);
+
+ try {
+ if (!IsPrototypeTypedArrayPrototype(exemplar.map)) goto IfSlow;
+ if (IsTypedArraySpeciesProtectorCellInvalid()) goto IfSlow;
+
+ const typedArray = CreateTypedArray(
+ context, defaultConstructor, defaultConstructor, arg0, arg1, arg2);
+
+ // It is assumed that the CreateTypedArray builtin does not produce a
+ // typed array that fails ValidateTypedArray
+ assert(!IsDetachedBuffer(typedArray.buffer));
+
+ return typedArray;
+ }
+ label IfSlow deferred {
+ const constructor =
+ Cast<Constructor>(SpeciesConstructor(exemplar, defaultConstructor))
+ otherwise unreachable;
+
+ // TODO(pwong): Simplify and remove numArgs when varargs are supported in
+ // macros.
+ let newObj: Object = Undefined;
+ if constexpr (numArgs == 1) {
+ newObj = Construct(constructor, arg0);
+ } else {
+ assert(numArgs == 3);
+ newObj = Construct(constructor, arg0, arg1, arg2);
+ }
+
+ return typed_array::ValidateTypedArray(context, newObj, methodName);
+ }
+ }
+
+ transitioning macro TypedArraySpeciesCreateByLength(implicit context:
+ Context)(
+ methodName: constexpr string, exemplar: JSTypedArray,
+ length: Smi): JSTypedArray {
+ assert(Is<PositiveSmi>(length));
+ const numArgs: constexpr int31 = 1;
+ const typedArray: JSTypedArray = TypedArraySpeciesCreate(
+ methodName, numArgs, exemplar, length, Undefined, Undefined);
+ if (typedArray.length < length) deferred {
+ ThrowTypeError(kTypedArrayTooShort);
+ }
+
+ return typedArray;
+ }
}
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
new file mode 100644
index 0000000000..d73f21efa1
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -0,0 +1,79 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace typed_array_filter {
+ const kBuiltinName: constexpr string = '%TypedArray%.prototype.filter';
+
+ extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
+ void;
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
+ transitioning javascript builtin TypedArrayPrototypeFilter(
+ context: Context, receiver: Object, ...arguments): Object {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise ThrowTypeError(kNotTypedArray, kBuiltinName);
+ const src = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ // 3. Let len be O.[[ArrayLength]].
+ const len: Smi = src.length;
+
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ const callbackfn = Cast<Callable>(arguments[0])
+ otherwise ThrowTypeError(kCalledNonCallable, arguments[0]);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments[1];
+
+ // 6. Let kept be a new empty List.
+ let kept = growable_fixed_array::NewGrowableFixedArray();
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(src);
+
+ // 7. Let k be 0.
+ // 8. Let captured be 0.
+ // 9. Repeat, while k < len
+ for (let k: Smi = 0; k < len; k++) {
+ witness.Recheck() otherwise IsDetached;
+
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(O, Pk).
+ const value: Object = witness.Load(k);
+
+ // c. Let selected be ToBoolean(? Call(callbackfn, T, Ā« kValue, k, O
+ // Ā»)).
+ const selected: Object =
+ Call(context, callbackfn, thisArg, value, k, witness.GetStable());
+
+ // d. If selected is true, then
+ // i. Append kValue to the end of kept.
+ // ii. Increase captured by 1.
+ if (BranchIfToBooleanIsTrue(selected)) kept.Push(value);
+
+ // e.Increase k by 1.
+ }
+
+ // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
+ const lengthSmi: Smi = Convert<Smi>(kept.length);
+ const typedArray: JSTypedArray =
+ typed_array_createtypedarray::TypedArraySpeciesCreateByLength(
+ kBuiltinName, array, lengthSmi);
+
+ // 11. Let n be 0.
+ // 12. For each element e of kept, do
+ // a. Perform ! Set(A, ! ToString(n), e, true).
+ // b. Increment n by 1.
+ TypedArrayCopyElements(context, typedArray, kept.ToJSArray(), lengthSmi);
+
+ // 13. Return A.
+ return typedArray;
+ }
+ label IsDetached deferred {
+ ThrowTypeError(kDetachedOperation, kBuiltinName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
new file mode 100644
index 0000000000..49ed0a67c0
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array_foreach {
+ const kBuiltinName: constexpr string = '%TypedArray%.prototype.forEach';
+
+ transitioning macro ForEachAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: Object): Object {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: Smi = Convert<Smi>(array.length);
+ for (let k: Smi = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: Object = witness.Load(k);
+ Call(context, callbackfn, thisArg, value, k, witness.GetStable());
+ }
+ return Undefined;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
+ transitioning javascript builtin
+ TypedArrayPrototypeForEach(implicit context: Context)(
+ receiver: Object, ...arguments): Object {
+ // arguments[0] = callback
+ // arguments[1] = this_arg.
+
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return ForEachAllElements(uarray, callbackfn, thisArg);
+ }
+ label NotCallable deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NotTypedArray deferred {
+ ThrowTypeError(kNotTypedArray, kBuiltinName);
+ }
+ label IsDetached deferred {
+ ThrowTypeError(kDetachedOperation, kBuiltinName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
new file mode 100644
index 0000000000..232f2fc570
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array_reduce {
+ const kBuiltinName: constexpr string = '%TypedArray%.prototype.reduce';
+
+ transitioning macro ReduceAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ initialValue: Object): Object {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: Smi = Convert<Smi>(witness.Get().length);
+ let accumulator = initialValue;
+ for (let k: Smi = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: Object = witness.Load(k);
+ if (accumulator == Hole) {
+ accumulator = value;
+ } else {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulator, value, k,
+ witness.GetStable());
+ }
+ }
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, kBuiltinName);
+ }
+ return accumulator;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
+ transitioning javascript builtin
+ TypedArrayPrototypeReduce(implicit context: Context)(
+ receiver: Object, ...arguments): Object {
+ // arguments[0] = callback
+ // arguments[1] = initialValue.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+ return ReduceAllElements(uarray, callbackfn, initialValue);
+ }
+ label NotCallable deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NotTypedArray deferred {
+ ThrowTypeError(kNotTypedArray, kBuiltinName);
+ }
+ label IsDetached deferred {
+ ThrowTypeError(kDetachedOperation, kBuiltinName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
new file mode 100644
index 0000000000..3aa9511a06
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array_reduceright {
+ const kBuiltinName: constexpr string = '%TypedArray%.prototype.reduceRight';
+
+ transitioning macro ReduceRightAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ initialValue: Object): Object {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: Smi = Convert<Smi>(array.length);
+ let accumulator = initialValue;
+ for (let k: Smi = length - 1; k >= 0; k--) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: Object = witness.Load(k);
+ if (accumulator == Hole) {
+ accumulator = value;
+ } else {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulator, value, k,
+ witness.GetStable());
+ }
+ }
+ if (accumulator == Hole) {
+ ThrowTypeError(kReduceNoInitial, kBuiltinName);
+ }
+ return accumulator;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
+ transitioning javascript builtin
+ TypedArrayPrototypeReduceRight(implicit context: Context)(
+ receiver: Object, ...arguments): Object {
+ // arguments[0] = callback
+ // arguments[1] = initialValue.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+
+ return ReduceRightAllElements(uarray, callbackfn, initialValue);
+ }
+ label NotCallable deferred {
+ ThrowTypeError(kCalledNonCallable, arguments[0]);
+ }
+ label NotTypedArray deferred {
+ ThrowTypeError(kNotTypedArray, kBuiltinName);
+ }
+ label IsDetached deferred {
+ ThrowTypeError(kDetachedOperation, kBuiltinName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
new file mode 100644
index 0000000000..f45654b71e
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -0,0 +1,107 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array_slice {
+ const kBuiltinName: constexpr string = '%TypedArray%.prototype.slice';
+
+ extern macro TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
+ JSTypedArray, JSTypedArray, intptr, intptr): void;
+
+ macro FastCopy(
+ src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: intptr,
+ count: PositiveSmi) labels IfSlow {
+ GotoIfForceSlowPath() otherwise IfSlow;
+
+ const srcKind: ElementsKind = src.elements_kind;
+ const destInfo = typed_array::GetTypedArrayElementsInfo(dest);
+
+ // dest could be a different type from src or share the same buffer
+ // with the src because of custom species constructor. If the types
+ // of src and result array are the same and they are not sharing the
+ // same buffer, use memmove.
+ if (srcKind != destInfo.kind) goto IfSlow;
+ if (BitcastTaggedToWord(dest.buffer) == BitcastTaggedToWord(src.buffer)) {
+ goto IfSlow;
+ }
+
+ const countBytes: uintptr =
+ destInfo.CalculateByteLength(count) otherwise unreachable;
+ const startOffset: uintptr =
+ destInfo.CalculateByteLength(Convert<PositiveSmi>(k))
+ otherwise unreachable;
+ const srcPtr: RawPtr = src.data_ptr + Convert<intptr>(startOffset);
+
+ assert(countBytes <= dest.byte_length);
+ assert(countBytes <= src.byte_length - startOffset);
+
+ typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+ }
+
+ macro SlowCopy(implicit context: Context)(
+ src: JSTypedArray, dest: JSTypedArray, k: intptr, final: intptr) {
+ if (typed_array::IsBigInt64ElementsKind(src.elements_kind) !=
+ typed_array::IsBigInt64ElementsKind(dest.elements_kind))
+ deferred {
+ ThrowTypeError(kBigIntMixedTypes);
+ }
+
+ CallCCopyTypedArrayElementsSlice(src, dest, k, final);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
+ transitioning javascript builtin TypedArrayPrototypeSlice(
+ context: Context, receiver: Object, ...arguments): Object {
+ // arguments[0] = start
+ // arguments[1] = end
+
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ const src: JSTypedArray =
+ typed_array::ValidateTypedArray(context, receiver, kBuiltinName);
+
+ // 3. Let len be O.[[ArrayLength]].
+ const len = Convert<intptr>(src.length);
+
+ // 4. Let relativeStart be ? ToInteger(start).
+ // 5. If relativeStart < 0, let k be max((len + relativeStart), 0);
+ // else let k be min(relativeStart, len).
+ const start = arguments[0];
+ const k: intptr =
+ start != Undefined ? ConvertToRelativeIndex(start, len) : 0;
+
+ // 6. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ // 7. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const end = arguments[1];
+ const final: intptr =
+ end != Undefined ? ConvertToRelativeIndex(end, len) : len;
+
+ // 8. Let count be max(final - k, 0).
+ const count = Convert<PositiveSmi>(IntPtrMax(final - k, 0));
+
+ // 9. Let A be ? TypedArraySpeciesCreate(O, Ā« count Ā»).
+ const dest: JSTypedArray =
+ typed_array_createtypedarray::TypedArraySpeciesCreateByLength(
+ kBuiltinName, src, count);
+
+ if (count > 0) {
+ try {
+ const srcAttached = typed_array::EnsureAttached(src)
+ otherwise IfDetached;
+ FastCopy(srcAttached, dest, k, count) otherwise IfSlow;
+ }
+ label IfDetached deferred {
+ ThrowTypeError(kDetachedOperation, kBuiltinName);
+ }
+ label IfSlow deferred {
+ SlowCopy(src, dest, k, final);
+ }
+ }
+
+ return dest;
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
new file mode 100644
index 0000000000..54b945f44e
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -0,0 +1,63 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace typed_array_subarray {
+ // ES %TypedArray%.prototype.subarray
+ transitioning javascript builtin TypedArrayPrototypeSubArray(
+ context: Context, receiver: Object, ...arguments): JSTypedArray {
+ const methodName: constexpr string = '%TypedArray%.prototype.subarray';
+
+ // 1. Let O be the this value.
+ // 3. If O does not have a [[TypedArrayName]] internal slot, throw a
+ // TypeError exception.
+ const source = Cast<JSTypedArray>(receiver)
+ otherwise ThrowTypeError(kIncompatibleMethodReceiver, methodName);
+
+ // 5. Let buffer be O.[[ViewedArrayBuffer]].
+ const buffer = typed_array::GetBuffer(source);
+
+ // 6. Let srcLength be O.[[ArrayLength]].
+ const srcLength = Convert<intptr>(source.length);
+
+ // 7. Let relativeBegin be ? ToInteger(begin).
+ // 8. If relativeBegin < 0, let beginIndex be max((srcLength +
+ // relativeBegin), 0); else let beginIndex be min(relativeBegin,
+ // srcLength).
+ const arg0 = arguments[0];
+ const begin: intptr =
+ arg0 != Undefined ? ConvertToRelativeIndex(arg0, srcLength) : 0;
+
+ // 9. If end is undefined, let relativeEnd be srcLength;
+ // else, let relativeEnd be ? ToInteger(end).
+ // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd),
+ // 0); else let endIndex be min(relativeEnd, srcLength).
+ const arg1 = arguments[1];
+ const end: intptr =
+ arg1 != Undefined ? ConvertToRelativeIndex(arg1, srcLength) : srcLength;
+
+ // 11. Let newLength be max(endIndex - beginIndex, 0).
+ const newLength = Convert<PositiveSmi>(IntPtrMax(end - begin, 0));
+
+ // 12. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 13. Let elementSize be the Number value of the Element Size value
+ // specified in Table 52 for constructorName.
+ const elementsInfo = typed_array::GetTypedArrayElementsInfo(source);
+
+ // 14. Let srcByteOffset be O.[[ByteOffset]].
+ const srcByteOffset: uintptr = source.byte_offset;
+
+ // 15. Let beginByteOffset be srcByteOffset + beginIndex Ɨ elementSize.
+ const beginByteOffset = srcByteOffset +
+ elementsInfo.CalculateByteLength(Convert<PositiveSmi>(begin))
+ otherwise ThrowRangeError(kInvalidArrayBufferLength);
+
+ // 16. Let argumentsList be Ā« buffer, beginByteOffset, newLength Ā».
+ const beginByteOffsetNum = Convert<Number>(beginByteOffset);
+
+ // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ const numArgs: constexpr int31 = 3;
+ return typed_array_createtypedarray::TypedArraySpeciesCreate(
+ methodName, numArgs, source, buffer, beginByteOffsetNum, newLength);
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 278e844966..607c9d225f 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -5,10 +5,54 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
+ struct TypedArrayElementsInfo {
+ // Calculates the number of bytes required for specified number of elements.
+ CalculateByteLength(lengthSmi: PositiveSmi): uintptr labels IfInvalid {
+ const length = Convert<uintptr>(lengthSmi);
+ const byteLength = length << this.sizeLog2;
+ // If an overflow ocurred, the byte length exceeds
+ // JSArrayBuffer::kMaxByteLength and is invalid.
+ if (byteLength >>> this.sizeLog2 != length) goto IfInvalid;
+ return byteLength;
+ }
+
+ // Calculates the maximum number of elements supported by a specified number
+ // of bytes.
+ CalculateLength(byteLength: uintptr): PositiveSmi labels IfInvalid {
+ return TryUintPtrToPositiveSmi(byteLength >>> this.sizeLog2)
+ otherwise IfInvalid;
+ }
+
+ // Determines if `bytes` (byte offset or length) cannot be evenly divided by
+ // element size.
+ IsUnaligned(bytes: uintptr): bool {
+ // Exploits the fact the element size is a power of 2. Determining whether
+ // there is remainder (not aligned) can be achieved efficiently with bit
+ // masking. Shift is safe as sizeLog2 can be 3 at most (see
+ // ElementsKindToShiftSize).
+ return (bytes & ((1 << this.sizeLog2) - 1)) != 0;
+ }
+
+ sizeLog2: uintptr;
+ map: Map;
+ kind: ElementsKind;
+ }
extern runtime TypedArraySortFast(Context, Object): JSTypedArray;
extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
Context, Object, constexpr string): JSTypedArray;
+ extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
+ RawPtr, RawPtr, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::CallCMemmove(
+ RawPtr, RawPtr, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::CallCMemset(
+ RawPtr, intptr, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::GetBuffer(
+ implicit context: Context)(JSTypedArray): JSArrayBuffer;
+ extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
+ JSTypedArray): TypedArrayElementsInfo;
+ extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+ ElementsKind): bool;
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
extern macro StoreFixedTypedArrayElementFromTagged(
@@ -24,10 +68,87 @@ namespace typed_array {
// would try to check this through an assert(Is<>), so the test
// is bypassed in this specialization.
UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
- return %RawObjectCast<LoadFn>(o);
+ return %RawDownCast<LoadFn>(o);
}
UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
- return %RawObjectCast<StoreFn>(o);
+ return %RawDownCast<StoreFn>(o);
+ }
+
+ // AttachedJSTypedArray guards that the array's buffer is not detached.
+ transient type AttachedJSTypedArray extends JSTypedArray;
+
+ macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
+ labels Detached {
+ if (IsDetachedBuffer(array.buffer)) goto Detached;
+ return %RawDownCast<AttachedJSTypedArray>(array);
+ }
+
+ struct AttachedJSTypedArrayWitness {
+ Get(): AttachedJSTypedArray {
+ return this.unstable;
+ }
+
+ GetStable(): JSTypedArray {
+ return this.stable;
+ }
+
+ Recheck() labels Detached {
+ if (IsDetachedBuffer(this.stable.buffer)) goto Detached;
+ this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
+ }
+
+ Load(implicit context: Context)(k: Smi): Object {
+ const lf: LoadFn = this.loadfn;
+ return lf(context, this.unstable, k);
+ }
+
+ stable: JSTypedArray;
+ unstable: AttachedJSTypedArray;
+ loadfn: LoadFn;
+ }
+
+ macro NewAttachedJSTypedArrayWitness(array: AttachedJSTypedArray):
+ AttachedJSTypedArrayWitness {
+ const kind = array.elements_kind;
+ return AttachedJSTypedArrayWitness{
+ array,
+ array,
+ GetLoadFnForElementsKind(kind)
+ };
+ }
+
+ macro GetLoadFnForElementsKind(elementsKind: ElementsKind): LoadFn {
+ if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
+ if (elementsKind == INT32_ELEMENTS) {
+ return LoadFixedElement<FixedInt32Array>;
+ } else if (elementsKind == FLOAT32_ELEMENTS) {
+ return LoadFixedElement<FixedFloat32Array>;
+ } else if (elementsKind == FLOAT64_ELEMENTS) {
+ return LoadFixedElement<FixedFloat64Array>;
+ } else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
+ return LoadFixedElement<FixedUint8ClampedArray>;
+ } else if (elementsKind == BIGUINT64_ELEMENTS) {
+ return LoadFixedElement<FixedBigUint64Array>;
+ } else if (elementsKind == BIGINT64_ELEMENTS) {
+ return LoadFixedElement<FixedBigInt64Array>;
+ } else {
+ unreachable;
+ }
+ } else {
+ if (elementsKind == UINT8_ELEMENTS) {
+ return LoadFixedElement<FixedUint8Array>;
+ } else if (elementsKind == INT8_ELEMENTS) {
+ return LoadFixedElement<FixedInt8Array>;
+ } else if (elementsKind == UINT16_ELEMENTS) {
+ return LoadFixedElement<FixedUint16Array>;
+ } else if (elementsKind == INT16_ELEMENTS) {
+ return LoadFixedElement<FixedInt16Array>;
+ } else if (elementsKind == UINT32_ELEMENTS) {
+ return LoadFixedElement<FixedUint32Array>;
+ } else {
+ unreachable;
+ }
+ }
}
macro KindForArrayType<T: type>(): constexpr ElementsKind;
@@ -81,16 +202,17 @@ namespace typed_array {
return Undefined;
}
- transitioning macro CallCompareWithDetachedCheck(
- context: Context, array: JSTypedArray, comparefn: Callable, a: Object,
- b: Object): Number
- labels Detached {
+ transitioning macro CallCompare(
+ implicit context: Context, array: JSTypedArray,
+ comparefn: Callable)(a: Object, b: Object): Number {
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
const v: Number =
ToNumber_Inline(context, Call(context, comparefn, Undefined, a, b));
// b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(array.buffer)) goto Detached;
+ if (IsDetachedBuffer(array.buffer)) {
+ ThrowTypeError(kDetachedOperation, '%TypedArray%.prototype.sort');
+ }
// c. If v is NaN, return +0.
if (NumberIsNaN(v)) return 0;
@@ -99,174 +221,56 @@ namespace typed_array {
return v;
}
- // InsertionSort is used for smaller arrays.
- transitioning macro TypedArrayInsertionSort(
- context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
- comparefn: Callable, load: LoadFn, store: StoreFn)
- labels Detached {
- let from: Smi = fromArg;
- let to: Smi = toArg;
-
- if (IsDetachedBuffer(array.buffer)) goto Detached;
-
- for (let i: Smi = from + 1; i < to; ++i) {
- const element: Object = load(context, array, i);
- let j: Smi = i - 1;
- for (; j >= from; --j) {
- const tmp: Object = load(context, array, j);
- const order: Number = CallCompareWithDetachedCheck(
- context, array, comparefn, tmp, element) otherwise Detached;
- if (order > 0) {
- store(context, array, j + 1, tmp);
+ // Merges two sorted runs [from, middle) and [middle, to)
+ // from "source" into "target".
+ transitioning macro
+ TypedArrayMerge(
+ implicit context: Context, array: JSTypedArray, comparefn: Callable)(
+ source: FixedArray, from: Smi, middle: Smi, to: Smi, target: FixedArray) {
+ let left: Smi = from;
+ let right: Smi = middle;
+
+ for (let targetIndex: Smi = from; targetIndex < to; ++targetIndex) {
+ if (left < middle && right >= to) {
+ // If the left run has elements, but the right does not, we take
+ // from the left.
+ target.objects[targetIndex] = source.objects[left++];
+ } else if (left < middle) {
+ // If both have elements, we need to compare.
+ const leftElement: Object = source.objects[left];
+ const rightElement: Object = source.objects[right];
+ if (CallCompare(leftElement, rightElement) <= 0) {
+ target.objects[targetIndex] = leftElement;
+ left++;
} else {
- break;
+ target.objects[targetIndex] = rightElement;
+ right++;
}
+ } else {
+ // No elements on the left, but the right does, so we take
+ // from the right.
+ assert(left == middle);
+ target.objects[targetIndex] = source.objects[right++];
}
- store(context, array, j + 1, element);
}
}
- transitioning macro TypedArrayQuickSortImpl(
- context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
- comparefn: Callable, load: LoadFn, store: StoreFn)
- labels Detached {
- let from: Smi = fromArg;
- let to: Smi = toArg;
-
- while (to - from > 1) {
- if (to - from <= 10) {
- // TODO(szuend): Investigate InsertionSort removal.
- // Currently it does not make any difference when the
- // benchmarks are run locally.
- TypedArrayInsertionSort(
- context, array, from, to, comparefn, load, store)
- otherwise Detached;
- break;
- }
+ transitioning builtin
+ TypedArrayMergeSort(
+ implicit context: Context, array: JSTypedArray, comparefn: Callable)(
+ source: FixedArray, from: Smi, to: Smi, target: FixedArray): Object {
+ assert(to - from > 1);
+ const middle: Smi = from + ((to - from) >> 1);
- // TODO(szuend): Check if a more involved thirdIndex calculation is
- // worth it for very large arrays.
- const thirdIndex: Smi = from + ((to - from) >> 1);
-
- if (IsDetachedBuffer(array.buffer)) goto Detached;
-
- // Find a pivot as the median of first, last and middle element.
- let v0: Object = load(context, array, from);
- let v1: Object = load(context, array, to - 1);
- let v2: Object = load(context, array, thirdIndex);
-
- const c01: Number = CallCompareWithDetachedCheck(
- context, array, comparefn, v0, v1) otherwise Detached;
- if (c01 > 0) {
- // v1 < v0, so swap them.
- let tmp: Object = v0;
- v0 = v1;
- v1 = tmp;
- }
- // v0 <= v1.
- const c02: Number = CallCompareWithDetachedCheck(
- context, array, comparefn, v0, v2) otherwise Detached;
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- const tmp: Object = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2.
- const c12: Number = CallCompareWithDetachedCheck(
- context, array, comparefn, v1, v2) otherwise Detached;
- if (c12 > 0) {
- // v0 <= v2 < v1.
- const tmp: Object = v1;
- v1 = v2;
- v2 = tmp;
- }
- }
+ // On the next recursion step source becomes target and vice versa.
+ // This saves the copy of the relevant range from the original
+ // array into a work array on each recursion step.
+ if (middle - from > 1) TypedArrayMergeSort(target, from, middle, source);
+ if (to - middle > 1) TypedArrayMergeSort(target, middle, to, source);
- // v0 <= v1 <= v2.
- store(context, array, from, v0);
- store(context, array, to - 1, v2);
-
- const pivot: Object = v1;
- let lowEnd: Smi = from + 1; // Upper bound of elems lower than pivot.
- let highStart: Smi = to - 1; // Lower bound of elems greater than pivot.
-
- let lowEndValue: Object = load(context, array, lowEnd);
- store(context, array, thirdIndex, lowEndValue);
- store(context, array, lowEnd, pivot);
-
- // From lowEnd to idx are elements equal to pivot.
- // From idx to highStart are elements that haven"t been compared yet.
- for (let idx: Smi = lowEnd + 1; idx < highStart; idx++) {
- let element: Object = load(context, array, idx);
- let order: Number = CallCompareWithDetachedCheck(
- context, array, comparefn, element, pivot) otherwise Detached;
-
- if (order < 0) {
- lowEndValue = load(context, array, lowEnd);
- store(context, array, idx, lowEndValue);
- store(context, array, lowEnd, element);
- lowEnd++;
- } else if (order > 0) {
- let breakFor: bool = false;
-
- while (order > 0) {
- highStart--;
- if (highStart == idx) {
- breakFor = true;
- break;
- }
-
- const topElement: Object = load(context, array, highStart);
- order = CallCompareWithDetachedCheck(
- context, array, comparefn, topElement, pivot)
- otherwise Detached;
- }
-
- if (breakFor) {
- break;
- }
-
- const highStartValue: Object = load(context, array, highStart);
- store(context, array, idx, highStartValue);
- store(context, array, highStart, element);
-
- if (order < 0) {
- element = load(context, array, idx);
-
- lowEndValue = load(context, array, lowEnd);
- store(context, array, idx, lowEndValue);
- store(context, array, lowEnd, element);
- lowEnd++;
- }
- }
- }
+ TypedArrayMerge(source, from, middle, to, target);
- if ((to - highStart) < (lowEnd - from)) {
- TypedArrayQuickSort(
- context, array, highStart, to, comparefn, load, store);
- to = lowEnd;
- } else {
- TypedArrayQuickSort(
- context, array, from, lowEnd, comparefn, load, store);
- from = highStart;
- }
- }
- }
-
- transitioning builtin TypedArrayQuickSort(
- context: Context, array: JSTypedArray, from: Smi, to: Smi,
- comparefn: Callable, load: LoadFn, store: StoreFn): JSTypedArray {
- try {
- TypedArrayQuickSortImpl(context, array, from, to, comparefn, load, store)
- otherwise Detached;
- }
- label Detached {
- ThrowTypeError(
- context, kDetachedOperation, '%TypedArray%.prototype.sort');
- }
- return array;
+ return Undefined;
}
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
@@ -277,7 +281,7 @@ namespace typed_array {
const comparefnObj: Object =
arguments.length > 0 ? arguments[0] : Undefined;
if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
- ThrowTypeError(context, kBadSortComparisonFunction, comparefnObj);
+ ThrowTypeError(kBadSortComparisonFunction, comparefnObj);
}
// 2. Let obj be the this value.
@@ -296,62 +300,77 @@ namespace typed_array {
// 4. Let len be obj.[[ArrayLength]].
const len: Smi = array.length;
- try {
- const comparefn: Callable =
- Cast<Callable>(comparefnObj) otherwise CastError;
- let loadfn: LoadFn;
- let storefn: StoreFn;
-
- let elementsKind: ElementsKind = array.elements_kind;
-
- if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
- if (elementsKind == INT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt32Array>;
- storefn = StoreFixedElement<FixedInt32Array>;
- } else if (elementsKind == FLOAT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedFloat32Array>;
- storefn = StoreFixedElement<FixedFloat32Array>;
- } else if (elementsKind == FLOAT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedFloat64Array>;
- storefn = StoreFixedElement<FixedFloat64Array>;
- } else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint8ClampedArray>;
- storefn = StoreFixedElement<FixedUint8ClampedArray>;
- } else if (elementsKind == BIGUINT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedBigUint64Array>;
- storefn = StoreFixedElement<FixedBigUint64Array>;
- } else if (elementsKind == BIGINT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedBigInt64Array>;
- storefn = StoreFixedElement<FixedBigInt64Array>;
- } else {
- unreachable;
- }
+ // Arrays of length 1 or less are considered sorted.
+ if (len < 2) return array;
+
+ const comparefn: Callable =
+ Cast<Callable>(comparefnObj) otherwise unreachable;
+ let loadfn: LoadFn;
+ let storefn: StoreFn;
+
+ let elementsKind: ElementsKind = array.elements_kind;
+
+ if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
+ if (elementsKind == INT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt32Array>;
+ storefn = StoreFixedElement<FixedInt32Array>;
+ } else if (elementsKind == FLOAT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedFloat32Array>;
+ storefn = StoreFixedElement<FixedFloat32Array>;
+ } else if (elementsKind == FLOAT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedFloat64Array>;
+ storefn = StoreFixedElement<FixedFloat64Array>;
+ } else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint8ClampedArray>;
+ storefn = StoreFixedElement<FixedUint8ClampedArray>;
+ } else if (elementsKind == BIGUINT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedBigUint64Array>;
+ storefn = StoreFixedElement<FixedBigUint64Array>;
+ } else if (elementsKind == BIGINT64_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedBigInt64Array>;
+ storefn = StoreFixedElement<FixedBigInt64Array>;
} else {
- if (elementsKind == UINT8_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint8Array>;
- storefn = StoreFixedElement<FixedUint8Array>;
- } else if (elementsKind == INT8_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt8Array>;
- storefn = StoreFixedElement<FixedInt8Array>;
- } else if (elementsKind == UINT16_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint16Array>;
- storefn = StoreFixedElement<FixedUint16Array>;
- } else if (elementsKind == INT16_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt16Array>;
- storefn = StoreFixedElement<FixedInt16Array>;
- } else if (elementsKind == UINT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint32Array>;
- storefn = StoreFixedElement<FixedUint32Array>;
- } else {
- unreachable;
- }
+ unreachable;
+ }
+ } else {
+ if (elementsKind == UINT8_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint8Array>;
+ storefn = StoreFixedElement<FixedUint8Array>;
+ } else if (elementsKind == INT8_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt8Array>;
+ storefn = StoreFixedElement<FixedInt8Array>;
+ } else if (elementsKind == UINT16_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint16Array>;
+ storefn = StoreFixedElement<FixedUint16Array>;
+ } else if (elementsKind == INT16_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedInt16Array>;
+ storefn = StoreFixedElement<FixedInt16Array>;
+ } else if (elementsKind == UINT32_ELEMENTS) {
+ loadfn = LoadFixedElement<FixedUint32Array>;
+ storefn = StoreFixedElement<FixedUint32Array>;
+ } else {
+ unreachable;
}
-
- TypedArrayQuickSort(context, array, 0, len, comparefn, loadfn, storefn);
}
- label CastError {
- unreachable;
+
+ // Prepare the two work arrays. All numbers are converted to tagged
+ // objects first, and merge sorted between the two FixedArrays.
+ // The result is then written back into the JSTypedArray.
+ const work1: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
+ const work2: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
+
+ for (let i: Smi = 0; i < len; ++i) {
+ const element: Object = loadfn(context, array, i);
+ work1.objects[i] = element;
+ work2.objects[i] = element;
}
+
+ TypedArrayMergeSort(work2, 0, len, work1);
+
+ // work1 contains the sorted numbers. Write them back.
+ for (let i: Smi = 0; i < len; ++i)
+ storefn(context, array, i, work1.objects[i]);
+
return array;
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 51ed934869..11bb9ca44a 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -11,6 +11,8 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/cell.h"
@@ -61,7 +63,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Push(rdi);
__ CallRuntime(function_id, 1);
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
// Restore target function and new target.
__ Pop(rdx);
@@ -96,11 +98,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
// ----------- S t a t e -------------
// -- rax: number of arguments (untagged)
// -- rdi: constructor function
@@ -113,9 +115,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ Push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
__ bind(&entry);
- __ decp(rcx);
+ __ decq(rcx);
__ j(greater_equal, &loop, Label::kNear);
// Call the function.
@@ -126,9 +128,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION);
// Restore context from the frame.
- __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ movp(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -136,7 +138,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
+ __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
@@ -150,13 +152,13 @@ void Generate_StackOverflowCheck(
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit);
- __ movp(scratch, rsp);
+ __ movq(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
- __ subp(scratch, kScratchRegister);
- __ sarp(scratch, Immediate(kSystemPointerSizeLog2));
+ __ subq(scratch, kScratchRegister);
+ __ sarq(scratch, Immediate(kSystemPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(scratch, num_args);
+ __ cmpq(scratch, num_args);
// Signed comparison.
__ j(less_equal, stack_overflow, stack_overflow_distance);
}
@@ -194,15 +196,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
__ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
- __ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
- __ j(not_zero, &not_create_implicit_receiver, Label::kNear);
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
+ __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
@@ -246,11 +245,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
// Restore constructor function and argument count.
- __ movp(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Set up pointer to last argument.
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
@@ -260,7 +259,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&stack_overflow);
// Restore context from the frame.
- __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowStackOverflow);
// This should be unreachable.
__ int3();
@@ -269,7 +268,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
// ----------- S t a t e -------------
// -- rax: number of arguments (untagged)
// -- rdx: new target
@@ -284,9 +283,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
- __ Push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
__ bind(&entry);
- __ decp(rcx);
+ __ decq(rcx);
__ j(greater_equal, &loop, Label::kNear);
// Call the function.
@@ -307,7 +306,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
masm->pc_offset());
// Restore context from the frame.
- __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
@@ -336,18 +335,18 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movp(rax, Operand(rsp, 0 * kSystemPointerSize));
+ __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
__ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
__ bind(&leave_frame);
// Restore the arguments count.
- __ movp(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
+ __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
}
@@ -381,13 +380,13 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
__ pushq(rbp);
- __ movp(rbp, rsp);
+ __ movq(rbp, rsp);
// Push the stack frame type.
__ Push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
- __ subp(rsp, Immediate(kSystemPointerSize));
+ __ subq(rsp, Immediate(kSystemPointerSize));
// Save callee-saved registers (X64/X32/Win64 calling conventions).
__ pushq(r12);
__ pushq(r13);
@@ -401,7 +400,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save.
- __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -420,7 +419,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in arg_reg_1.
- __ movp(kRootRegister, arg_reg_1);
+ __ movq(kRootRegister, arg_reg_1);
}
// Save copies of the top frame descriptor on the stack.
@@ -436,16 +435,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kContextAddress, masm->isolate());
__ Load(kScratchRegister, context_address);
static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
- __ movp(Operand(rbp, kOffsetToContextSlot), kScratchRegister);
+ __ movq(Operand(rbp, kOffsetToContextSlot), kScratchRegister);
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp = ExternalReference::Create(
IsolateAddressId::kJSEntrySPAddress, masm->isolate());
__ Load(rax, js_entry_sp);
- __ testp(rax, rax);
+ __ testq(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movp(rax, rbp);
+ __ movq(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
@@ -486,10 +485,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ Pop(rbx);
- __ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ cmpq(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
- __ movp(Operand(kScratchRegister, 0), Immediate(0));
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
@@ -511,7 +510,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
__ popq(rbx);
@@ -524,7 +523,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ popq(r14);
__ popq(r13);
__ popq(r12);
- __ addp(rsp, Immediate(2 * kSystemPointerSize)); // remove markers
+ __ addq(rsp, Immediate(2 * kSystemPointerSize)); // remove markers
// Restore frame pointer and return.
__ popq(rbp);
@@ -583,7 +582,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r8 : argc
// r9 : argv
- __ movp(rdi, arg_reg_3);
+ __ movq(rdi, arg_reg_3);
__ Move(rdx, arg_reg_2);
// rdi : function
// rdx : new_target
@@ -597,7 +596,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
- __ movp(rsi, masm->ExternalReferenceAsOperand(context_address));
+ __ movq(rsi, masm->ExternalReferenceAsOperand(context_address));
// Push the function and the receiver onto the stack.
__ Push(rdi);
@@ -605,14 +604,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
#ifdef _WIN64
// Load the previous frame pointer to access C arguments on stack
- __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movq(kScratchRegister, Operand(rbp, 0));
// Load the number of arguments and setup pointer to the arguments.
- __ movp(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
- __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
+ __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
#else // _WIN64
// Load the number of arguments and setup pointer to the arguments.
- __ movp(rax, r8);
- __ movp(rbx, r9);
+ __ movq(rax, r8);
+ __ movq(rbx, r9);
#endif // _WIN64
// Current stack contents:
@@ -646,11 +645,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry, Label::kNear);
__ bind(&loop);
- __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
__ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ addp(rcx, Immediate(1));
+ __ addq(rcx, Immediate(1));
__ bind(&entry);
- __ cmpp(rcx, rax);
+ __ cmpq(rcx, rax);
__ j(not_equal, &loop, Label::kNear);
// Invoke the builtin code.
@@ -677,7 +676,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
// arg_reg_2: microtask_queue
- __ movp(RunMicrotasksDescriptor::MicrotaskQueueRegister(), arg_reg_2);
+ __ movq(RunMicrotasksDescriptor::MicrotaskQueueRegister(), arg_reg_2);
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
@@ -688,12 +687,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
__ j(not_equal, &done, Label::kNear);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? scratch1 : no_reg;
__ LoadTaggedPointerField(
- sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset),
- decompr_scratch_for_debug);
+ sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
@@ -715,15 +711,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Load suspended function and context.
__ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
- decompr_scratch_for_debug);
- __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
- decompr_scratch_for_debug);
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -739,7 +731,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_suspended_generator_address(masm->isolate());
Operand debug_suspended_generator_operand =
masm->ExternalReferenceAsOperand(debug_suspended_generator);
- __ cmpp(rdx, debug_suspended_generator_operand);
+ __ cmpq(rdx, debug_suspended_generator_operand);
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -754,8 +746,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ PushTaggedPointerField(
- FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1,
- decompr_scratch_for_debug);
+ FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1);
// ----------- S t a t e -------------
// -- rax : return address
@@ -767,14 +758,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Copy the function arguments from the generator object's register file.
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadTaggedPointerField(
- rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset),
- decompr_scratch_for_debug);
+ rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
@@ -785,7 +774,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ j(greater_equal, &done_loop, Label::kNear);
__ PushTaggedAnyField(
FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch1, decompr_scratch2, decompr_scratch_for_debug);
+ decompr_scratch1, decompr_scratch2);
__ addl(r9, Immediate(1));
__ jmp(&loop);
@@ -795,11 +784,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
- rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
@@ -809,16 +796,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
__ PushReturnAddressFrom(rax);
__ LoadTaggedPointerField(
- rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ JumpCodeObject(rcx);
}
@@ -832,8 +817,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
- decompr_scratch_for_debug);
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@@ -844,8 +828,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
__ LoadTaggedPointerField(
- rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
- decompr_scratch_for_debug);
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
}
__ jmp(&stepping_prepared);
@@ -866,7 +849,7 @@ static void ReplaceClosureCodeWithOptimizedCode(
// Store the optimized code in the closure.
__ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
- __ movp(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
+ __ movq(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@@ -877,7 +860,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register return_pc = scratch2;
// Get the arguments + receiver count.
- __ movp(args_count,
+ __ movq(args_count,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movl(args_count,
FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
@@ -887,7 +870,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Drop receiver + arguments.
__ PopReturnAddressTo(return_pc);
- __ addp(rsp, args_count);
+ __ addq(rsp, args_count);
__ PushReturnAddressFrom(return_pc);
}
@@ -921,13 +904,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = rdi;
Register optimized_code_entry = scratch1;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? scratch3 : no_reg;
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
- decompr_scratch, decompr_scratch_for_debug);
+ decompr_scratch);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -979,8 +960,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch2,
- FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset),
- decompr_scratch_for_debug);
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ testl(
FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
@@ -1036,16 +1016,16 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the next bytecode and update table to the wide scaled table.
__ incl(bytecode_offset);
- __ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
- __ addp(bytecode_size_table,
+ __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ __ addq(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ incl(bytecode_offset);
- __ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
- __ addp(bytecode_size_table,
+ __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ __ addq(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
@@ -1059,7 +1039,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#undef JUMP_IF_EQUAL
// Otherwise, load the size of the current bytecode and advance the offset.
- __ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
+ __ addl(bytecode_offset,
+ Operand(bytecode_size_table, bytecode, times_int_size, 0));
}
// Generate code for entering a JS function with the interpreter.
@@ -1079,18 +1060,14 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = rdi;
Register feedback_vector = rbx;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadTaggedPointerField(
- rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
- FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset),
- decompr_scratch_for_debug);
+ FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
@@ -1102,11 +1079,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadTaggedPointerField(
- feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset),
- decompr_scratch_for_debug);
+ feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
- FieldOperand(feedback_vector, Cell::kValueOffset),
- decompr_scratch_for_debug);
+ FieldOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
@@ -1127,7 +1102,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ pushq(rbp); // Caller's frame pointer.
- __ movp(rbp, rsp);
+ __ movq(rbp, rsp);
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
@@ -1137,7 +1112,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(BytecodeArray::kNoAgeBytecodeAge));
// Load initial bytecode offset.
- __ movp(kInterpreterBytecodeOffsetRegister,
+ __ movq(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Push bytecode array and Smi tagged bytecode offset.
@@ -1153,8 +1128,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ movp(rax, rsp);
- __ subp(rax, rcx);
+ __ movq(rax, rsp);
+ __ subq(rax, rcx);
__ CompareRoot(rax, RootIndex::kRealStackLimit);
__ j(above_equal, &ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1170,7 +1145,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rax);
// Continue loop if not done.
__ bind(&loop_check);
- __ subp(rcx, Immediate(kSystemPointerSize));
+ __ subq(rcx, Immediate(kSystemPointerSize));
__ j(greater_equal, &loop_header, Label::kNear);
}
@@ -1183,7 +1158,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ testl(rax, rax);
__ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear);
- __ movp(Operand(rbp, rax, times_pointer_size, 0), rdx);
+ __ movq(Operand(rbp, rax, times_system_pointer_size, 0), rdx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator with undefined.
@@ -1196,11 +1171,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(
- kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ movq(kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11,
+ times_system_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1208,16 +1183,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
- __ movp(kInterpreterBytecodeArrayRegister,
+ __ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ movp(kInterpreterBytecodeOffsetRegister,
+ __ movq(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
@@ -1240,18 +1215,18 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register scratch) {
// Find the address of the last argument.
__ Move(scratch, num_args);
- __ shlp(scratch, Immediate(kSystemPointerSizeLog2));
- __ negp(scratch);
- __ addp(scratch, start_address);
+ __ shlq(scratch, Immediate(kSystemPointerSizeLog2));
+ __ negq(scratch);
+ __ addq(scratch, start_address);
// Push the arguments.
Label loop_header, loop_check;
__ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
- __ subp(start_address, Immediate(kSystemPointerSize));
+ __ subq(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
- __ cmpp(start_address, scratch);
+ __ cmpq(start_address, scratch);
__ j(greater, &loop_header, Label::kNear);
}
@@ -1384,31 +1359,26 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
// If the SFI function_data is an InterpreterData, the function will have a
// custom copy of the interpreter entry trampoline for profiling. If so,
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
- rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
- rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset),
- decompr_scratch_for_debug);
+ rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
- __ movp(rbx,
+ __ movq(rbx,
FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
- __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addq(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
// TODO(jgruber): Replace this by a lookup in the builtin entry table.
- __ movp(rbx,
+ __ movq(rbx,
__ ExternalReferenceAsOperand(
ExternalReference::
address_of_interpreter_entry_trampoline_instruction_start(
@@ -1416,7 +1386,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kScratchRegister));
__ bind(&trampoline_loaded);
- __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
+ __ addq(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
__ Push(rbx);
// Initialize dispatch table register.
@@ -1425,7 +1395,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
- __ movp(kInterpreterBytecodeArrayRegister,
+ __ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
@@ -1439,31 +1409,31 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ movp(kInterpreterBytecodeOffsetRegister,
+ __ movq(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(
- kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ movq(kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11,
+ times_system_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
- __ movp(kInterpreterBytecodeArrayRegister,
+ __ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ movp(kInterpreterBytecodeOffsetRegister,
+ __ movq(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
@@ -1474,7 +1444,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(rbx, kInterpreterBytecodeOffsetRegister);
- __ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
+ __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
@@ -1497,7 +1467,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve argument count for later compare.
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
// Push the number of arguments to the callee.
__ SmiTag(rax, rax);
__ Push(rax);
@@ -1512,7 +1482,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
for (int j = 0; j < 4; ++j) {
Label over;
if (j < 3) {
- __ cmpp(rcx, Immediate(j));
+ __ cmpq(rcx, Immediate(j));
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
@@ -1540,8 +1510,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
- __ incp(rcx);
- __ leap(rsp, Operand(rsp, rcx, times_pointer_size, 0));
+ __ incq(rcx);
+ __ leaq(rsp, Operand(rsp, rcx, times_system_pointer_size, 0));
__ PushReturnAddressFrom(rbx);
__ ret(0);
@@ -1554,10 +1524,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ JumpCodeObject(rcx);
}
@@ -1623,7 +1590,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
- __ movp(rax, Operand(rsp, kPCOnStackSize));
+ __ movq(rax, Operand(rsp, kPCOnStackSize));
__ ret(1 * kSystemPointerSize); // Remove rax.
}
@@ -1644,20 +1611,21 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label no_arg_array, no_this_arg;
StackArgumentsAccessor args(rsp, rax);
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
- __ movp(rbx, rdx);
- __ movp(rdi, args.GetReceiverOperand());
- __ testp(rax, rax);
+ __ movq(rbx, rdx);
+ __ movq(rdi, args.GetReceiverOperand());
+ __ testq(rax, rax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ movp(rdx, args.GetArgumentOperand(1));
- __ cmpp(rax, Immediate(1));
+ __ movq(rdx, args.GetArgumentOperand(1));
+ __ cmpq(rax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(2));
+ __ movq(rbx, args.GetArgumentOperand(2));
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
+ __ leaq(rsp,
+ Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
@@ -1707,19 +1675,19 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{
Label done;
- __ testp(rax, rax);
+ __ testq(rax, rax);
__ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rbx);
- __ incp(rax);
+ __ incq(rax);
__ bind(&done);
}
// 2. Get the callable to call (passed as receiver) from the stack.
{
StackArgumentsAccessor args(rsp, rax);
- __ movp(rdi, args.GetReceiverOperand());
+ __ movq(rdi, args.GetReceiverOperand());
}
// 3. Shift arguments and return address one slot down on the stack
@@ -1727,15 +1695,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// the original first argument the new receiver.
{
Label loop;
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
- __ movp(rbx, args.GetArgumentOperand(1));
- __ movp(args.GetArgumentOperand(0), rbx);
- __ decp(rcx);
+ __ movq(rbx, args.GetArgumentOperand(1));
+ __ movq(args.GetArgumentOperand(0), rbx);
+ __ decq(rcx);
__ j(not_zero, &loop); // While non-zero.
__ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
- __ decp(rax); // One fewer argument (first argument is new receiver).
+ __ decq(rax); // One fewer argument (first argument is new receiver).
}
// 4. Call the callable.
@@ -1761,19 +1729,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label done;
StackArgumentsAccessor args(rsp, rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
- __ movp(rdx, rdi);
- __ movp(rbx, rdi);
- __ cmpp(rax, Immediate(1));
+ __ movq(rdx, rdi);
+ __ movq(rbx, rdi);
+ __ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movp(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdi, args.GetArgumentOperand(1)); // target
__ j(equal, &done, Label::kNear);
- __ movp(rdx, args.GetArgumentOperand(2)); // thisArgument
- __ cmpp(rax, Immediate(3));
+ __ movq(rdx, args.GetArgumentOperand(2)); // thisArgument
+ __ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(3)); // argumentsList
+ __ movq(rbx, args.GetArgumentOperand(3)); // argumentsList
__ bind(&done);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
+ __ leaq(rsp,
+ Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
@@ -1812,20 +1781,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label done;
StackArgumentsAccessor args(rsp, rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
- __ movp(rdx, rdi);
- __ movp(rbx, rdi);
- __ cmpp(rax, Immediate(1));
+ __ movq(rdx, rdi);
+ __ movq(rbx, rdi);
+ __ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movp(rdi, args.GetArgumentOperand(1)); // target
- __ movp(rdx, rdi); // new.target defaults to target
+ __ movq(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(2)); // argumentsList
- __ cmpp(rax, Immediate(3));
+ __ movq(rbx, args.GetArgumentOperand(2)); // argumentsList
+ __ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movp(rdx, args.GetArgumentOperand(3)); // new.target
+ __ movq(rdx, args.GetArgumentOperand(3)); // new.target
__ bind(&done);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
+ __ leaq(rsp,
+ Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize));
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rcx);
}
@@ -1857,15 +1827,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Label generic_array_code;
if (FLAG_debug_code) {
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Initial map for the builtin InternalArray functions should be maps.
__ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
- decompr_scratch_for_debug);
+ rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1883,7 +1849,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pushq(rbp);
- __ movp(rbp, rsp);
+ __ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1902,16 +1868,16 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movp(rsp, rbp);
+ __ movq(rsp, rbp);
__ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
+ __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1923,94 +1889,133 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- rdi : function (passed through to callee)
// -----------------------------------
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
- Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
- __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
- __ cmpp(rax, rbx);
- __ j(less, &too_few);
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testl(
+ FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
+ Immediate(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
+ __ j(not_zero, &skip_adapt_arguments);
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
+ // -------------------------------------------
+ // Adapt arguments.
+ // -------------------------------------------
+ {
EnterArgumentsAdaptorFrame(masm);
- // The registers rcx and r8 will be modified. The register rbx is only read.
Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
+ Label under_application, over_application, invoke;
+ __ cmpq(rax, rbx);
+ __ j(less, &under_application, Label::kNear);
- Label copy;
- __ bind(&copy);
- __ incp(r8);
- __ Push(Operand(rax, 0));
- __ subp(rax, Immediate(kSystemPointerSize));
- __ cmpp(r8, rbx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
+ // Enough parameters: Actual >= expected.
+ __ bind(&over_application);
+ {
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ leaq(r8, Operand(rbp, rax, times_system_pointer_size, offset));
+ __ Set(rax, -1); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(rax);
+ __ Push(Operand(r8, 0));
+ __ subq(r8, Immediate(kSystemPointerSize));
+ __ cmpq(rax, rbx);
+ __ j(less, &copy);
+ __ jmp(&invoke, Label::kNear);
+ }
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
+ // Too few parameters: Actual < expected.
+ __ bind(&under_application);
+ {
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
+ __ Set(r8, -1); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(r8);
+ __ Push(Operand(r9, 0));
+ __ subq(r9, Immediate(kSystemPointerSize));
+ __ cmpq(r8, rax);
+ __ j(less, &copy);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ bind(&fill);
+ __ incq(rax);
+ __ Push(kScratchRegister);
+ __ cmpq(rax, rbx);
+ __ j(less, &fill);
+ }
- EnterArgumentsAdaptorFrame(masm);
- // The registers rcx and r8 will be modified. The register rbx is only read.
- Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
+ // Call the entry point.
+ __ bind(&invoke);
+ // rax : expected number of arguments
+ // rdx : new target (passed through to callee)
+ // rdi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
+ __ CallCodeObject(rcx);
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incp(r8);
- __ Push(Operand(rdi, 0));
- __ subp(rdi, Immediate(kSystemPointerSize));
- __ cmpp(r8, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ incp(r8);
- __ Push(kScratchRegister);
- __ cmpp(r8, rbx);
- __ j(less, &fill);
-
- // Restore function pointer.
- __ movp(rdi, Operand(rbp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ movp(rax, rbx);
- // rax : expected number of arguments
- // rdx : new target (passed through to callee)
- // rdi : function (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
- decompr_scratch_for_debug);
- __ CallCodeObject(rcx);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
+ masm->pc_offset());
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ ret(0);
+ }
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
+ // -------------------------------------------
+ // Skip adapt arguments.
+ // -------------------------------------------
+ __ bind(&skip_adapt_arguments);
+ {
+ // The callee cannot observe the actual arguments, so it's safe to just
+ // pass the expected arguments by massaging the stack appropriately. See
+ // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
+ Label under_application, over_application, invoke;
+ __ PopReturnAddressTo(rcx);
+ __ cmpq(rax, rbx);
+ __ j(less, &under_application, Label::kNear);
+
+ __ bind(&over_application);
+ {
+ // Remove superfluous parameters from the stack.
+ __ xchgq(rax, rbx);
+ __ subq(rbx, rax);
+ __ leaq(rsp, Operand(rsp, rbx, times_system_pointer_size, 0));
+ __ jmp(&invoke, Label::kNear);
+ }
+
+ __ bind(&under_application);
+ {
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ bind(&fill);
+ __ incq(rax);
+ __ Push(kScratchRegister);
+ __ cmpq(rax, rbx);
+ __ j(less, &fill);
+ }
+
+ __ bind(&invoke);
+ __ PushReturnAddressFrom(rcx);
+ }
// -------------------------------------------
- // Dont adapt arguments.
+ // Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ JumpCodeObject(rcx);
__ bind(&stack_overflow);
@@ -2034,16 +2039,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -----------------------------------
Register scratch = r11;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
if (masm->emit_debug_code()) {
// Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
- __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset));
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
@@ -2073,7 +2075,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadAnyTaggedField(
value,
FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch, decompr_scratch_for_debug);
+ decompr_scratch);
__ CompareRoot(value, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(value, RootIndex::kUndefinedValue);
@@ -2104,15 +2106,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- rcx : start index (to support rest parameters)
// -----------------------------------
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
@@ -2128,18 +2126,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ cmpq(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
- __ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
- r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
r8, FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
- __ movp(rbx, rbp);
+ __ movq(rbx, rbp);
}
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
@@ -2187,8 +2184,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
StackArgumentsAccessor args(rsp, rax);
__ AssertFunction(rdi);
@@ -2197,8 +2192,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadTaggedPointerField(
- rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -2212,8 +2206,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
@@ -2233,7 +2226,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(rcx);
} else {
Label convert_to_object, convert_receiver;
- __ movp(rcx, args.GetReceiverOperand());
+ __ movq(rcx, args.GetReceiverOperand());
__ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
@@ -2260,22 +2253,21 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(rax, rax);
__ Push(rax);
__ Push(rdi);
- __ movp(rax, rcx);
+ __ movq(rax, rcx);
__ Push(rsi);
__ Call(BUILTIN_CODE(masm->isolate(), ToObject),
RelocInfo::CODE_TARGET);
__ Pop(rsi);
- __ movp(rcx, rax);
+ __ movq(rcx, rax);
__ Pop(rdi);
__ Pop(rax);
__ SmiUntag(rax, rax);
}
__ LoadTaggedPointerField(
- rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ movp(args.GetReceiverOperand(), rcx);
+ __ movq(args.GetReceiverOperand(), rcx);
}
__ bind(&done_convert);
@@ -2312,14 +2304,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -----------------------------------
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
@@ -2335,15 +2324,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ leap(kScratchRegister, Operand(rbx, times_pointer_size, 0));
- __ subp(rsp, kScratchRegister);
+ __ leaq(kScratchRegister, Operand(rbx, times_system_pointer_size, 0));
+ __ subq(rsp, kScratchRegister);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
__ CompareRoot(rsp, RootIndex::kRealStackLimit);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
- __ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
+ __ leaq(rsp, Operand(rsp, rbx, times_system_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2359,10 +2348,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(rcx, 0);
- __ leap(rbx, Operand(rsp, rbx, times_pointer_size, 0));
+ __ leaq(rbx, Operand(rsp, rbx, times_system_pointer_size, 0));
__ bind(&loop);
- __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ movp(Operand(rsp, rcx, times_pointer_size, 0), kScratchRegister);
+ __ movq(kScratchRegister,
+ Operand(rbx, rcx, times_system_pointer_size, 0));
+ __ movq(Operand(rsp, rcx, times_system_pointer_size, 0),
+ kScratchRegister);
__ incl(rcx);
__ cmpl(rcx, rax);
__ j(less, &loop);
@@ -2372,8 +2363,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
// Instead of doing decl(rbx) here subtract kTaggedSize from the header
@@ -2383,8 +2373,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadAnyTaggedField(r12,
FieldOperand(rcx, rbx, times_tagged_size,
FixedArray::kHeaderSize - kTaggedSize),
- decompr_scratch, decompr_scratch_for_debug);
- __ movp(Operand(rsp, rax, times_pointer_size, 0), r12);
+ decompr_scratch);
+ __ movq(Operand(rsp, rax, times_system_pointer_size, 0), r12);
__ leal(rax, Operand(rax, 1));
__ decl(rbx);
__ j(greater, &loop);
@@ -2409,23 +2399,20 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(rdi);
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
__ LoadAnyTaggedField(rbx,
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset),
- decompr_scratch, decompr_scratch_for_debug);
- __ movp(args.GetReceiverOperand(), rbx);
+ decompr_scratch);
+ __ movq(args.GetReceiverOperand(), rbx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadTaggedPointerField(
- rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
- decompr_scratch_for_debug);
+ rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2462,7 +2449,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ movp(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2488,17 +2475,13 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertFunction(rdi);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
__ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
@@ -2518,27 +2501,22 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertBoundFunction(rdi);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
Label done;
- __ cmpp(rdi, rdx);
+ __ cmpq(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
__ LoadTaggedPointerField(
- rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
- decompr_scratch_for_debug);
+ rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadTaggedPointerField(
- rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
- decompr_scratch_for_debug);
+ rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2552,16 +2530,12 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -----------------------------------
StackArgumentsAccessor args(rsp, rax);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
// Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset),
- decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
@@ -2585,7 +2559,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ movp(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2601,8 +2575,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2613,7 +2587,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Label skip;
// If the code object is null, just return to the caller.
- __ testp(rax, rax);
+ __ testq(rax, rax);
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -2633,7 +2607,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
- __ leap(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
+ __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
@@ -2663,7 +2637,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
arraysize(wasm::kFpParamRegisters),
"frame size mismatch");
- __ subp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
+ __ subq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(rsp, offset), reg);
@@ -2675,13 +2649,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object.
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(
- rcx,
- FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
@@ -2695,7 +2665,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(reg, Operand(rsp, offset));
}
DCHECK_EQ(0, offset);
- __ addp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
+ __ addq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
__ Pop(reg);
}
@@ -2749,7 +2719,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
DCHECK(!builtin_exit_frame);
__ EnterApiExitFrame(arg_stack_space);
// Move argc into r14 (argv is already in r15).
- __ movp(r14, rax);
+ __ movq(r14, rax);
} else {
__ EnterExitFrame(
arg_stack_space, save_doubles == kSaveFPRegs,
@@ -2772,16 +2742,16 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (result_size <= kMaxRegisterResultSize) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax), or a register pair (rax, rdx).
- __ movp(kCCallArg0, r14); // argc.
- __ movp(kCCallArg1, r15); // argv.
+ __ movq(kCCallArg0, r14); // argc.
+ __ movq(kCCallArg1, r15); // argv.
__ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
} else {
DCHECK_LE(result_size, 2);
// Pass a pointer to the result location as the first argument.
- __ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
+ __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
// Pass a pointer to the Arguments object as the second argument.
- __ movp(kCCallArg1, r14); // argc.
- __ movp(kCCallArg2, r15); // argv.
+ __ movq(kCCallArg1, r14); // argc.
+ __ movq(kCCallArg2, r15); // argv.
__ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
}
__ call(rbx);
@@ -2809,7 +2779,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalReferenceAsOperand(pending_exception_address);
- __ cmpp(r14, pending_exception_operand);
+ __ cmpq(r14, pending_exception_operand);
__ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
@@ -2838,24 +2808,24 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ movp(arg_reg_1, Immediate(0)); // argc.
- __ movp(arg_reg_2, Immediate(0)); // argv.
+ __ movq(arg_reg_1, Immediate(0)); // argc.
+ __ movq(arg_reg_2, Immediate(0)); // argv.
__ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
__ PrepareCallCFunction(3);
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
- __ movp(rsi,
+ __ movq(rsi,
masm->ExternalReferenceAsOperand(pending_handler_context_address));
- __ movp(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
- __ movp(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
+ __ movq(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
+ __ movq(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (rsi == 0) for non-JS frames.
Label skip;
- __ testp(rsi, rsi);
+ __ testq(rsi, rsi);
__ j(zero, &skip, Label::kNear);
- __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
@@ -2865,7 +2835,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
- __ movp(rdi,
+ __ movq(rdi,
masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
__ jmp(rdi);
}
@@ -2874,7 +2844,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
- const int kArgumentOffset = 4 * kRegisterSize;
+ const int kArgumentOffset = 4 * kSystemPointerSize;
MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
MemOperand exponent_operand(
@@ -2935,135 +2905,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ ret(0);
}
-void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- const Register exponent = rdx;
- const Register scratch = rcx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ movp(scratch, Immediate(1));
- __ Cvtlsi2sd(double_result, scratch);
-
- Label fast_power, try_arithmetic_simplification;
- // Detect integer exponents stored as double.
- __ DoubleToI(exponent, double_exponent, double_scratch,
- &try_arithmetic_simplification, &try_arithmetic_simplification);
- __ jmp(&int_exponent);
-
- __ bind(&try_arithmetic_simplification);
- __ Cvttsd2si(exponent, double_exponent);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x1));
- __ j(overflow, &call_runtime);
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subp(rsp, Immediate(kDoubleSize));
- __ Movsd(Operand(rsp, 0), double_exponent);
- __ fld_d(Operand(rsp, 0)); // E
- __ Movsd(Operand(rsp, 0), double_base);
- __ fld_d(Operand(rsp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1);
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(rsp, 0));
- __ Movsd(double_result, Operand(rsp, 0));
- __ addp(rsp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ addp(rsp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- // Back up exponent as we need to check if exponent is negative later.
- __ movp(scratch, exponent); // Back up exponent.
- __ Movsd(double_scratch, double_base); // Back up base.
- __ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ testl(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ negl(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shrl(scratch, Immediate(1));
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ Movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shrl(scratch, Immediate(1));
- __ Mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ Mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // If the exponent is negative, return 1/result.
- __ testl(exponent, exponent);
- __ j(greater, &done);
- __ Divsd(double_scratch2, double_result);
- __ Movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ Xorpd(double_scratch2, double_scratch2);
- __ Ucomisd(double_scratch2, double_result);
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // input was a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ Cvtlsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- __ bind(&call_runtime);
- // Move base to the correct argument register. Exponent is already in xmm1.
- __ Movsd(xmm0, double_base);
- DCHECK(double_exponent == xmm1);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(2);
- __ CallCFunction(ExternalReference::power_double_double_function(), 2);
- }
- // Return value is in xmm0.
- __ Movsd(double_result, xmm0);
-
- __ bind(&done);
- __ ret(0);
-}
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -3072,17 +2913,13 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// -- rsp[8] : last argument
// -----------------------------------
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -3092,12 +2929,11 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// Figure out the right elements kind
__ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
- decompr_scratch_for_debug);
+ rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(rcx);
@@ -3106,7 +2942,7 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(equal, AbortReason::kInvalidElementsKindForInternalPackedArray);
// No arguments should be passed.
- __ testp(rax, rax);
+ __ testq(rax, rax);
__ Assert(zero, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
}
@@ -3137,7 +2973,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
- Label write_back;
Isolate* isolate = masm->isolate();
Factory* factory = isolate->factory();
@@ -3157,8 +2992,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register prev_limit_reg = rbx;
Register base_reg = r15;
__ Move(base_reg, next_address);
- __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
- __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ __ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ addl(Operand(base_reg, kLevelOffset), Immediate(1));
if (FLAG_log_timer_events) {
@@ -3200,21 +3035,21 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
}
// Load the value from ReturnValue
- __ movp(rax, return_value_operand);
+ __ movq(rax, return_value_operand);
__ bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ subl(Operand(base_reg, kLevelOffset), Immediate(1));
- __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ __ cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ j(not_equal, &delete_allocated_handles);
// Leave the API exit frame.
__ bind(&leave_exit_frame);
if (stack_space_operand != nullptr) {
DCHECK_EQ(stack_space, 0);
- __ movp(rbx, *stack_space_operand);
+ __ movq(rbx, *stack_space_operand);
}
__ LeaveApiExitFrame();
@@ -3230,11 +3065,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register map = rcx;
__ JumpIfSmi(return_value, &ok, Label::kNear);
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(map,
- FieldOperand(return_value, HeapObject::kMapOffset),
- decompr_scratch_for_debug);
+ FieldOperand(return_value, HeapObject::kMapOffset));
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
@@ -3278,12 +3110,12 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
- __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
- __ movp(prev_limit_reg, rax);
+ __ movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ __ movq(prev_limit_reg, rax);
__ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
__ LoadAddress(rax, ExternalReference::delete_handle_scope_extensions());
__ call(rax);
- __ movp(rax, prev_limit_reg);
+ __ movq(rax, prev_limit_reg);
__ jmp(&leave_exit_frame);
}
@@ -3298,31 +3130,25 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rsi : kTargetContext
- // -- rdx : kApiFunctionAddress
- // -- rcx : kArgc
- // --
+ // -- rsi : context
+ // -- rdx : api function address
+ // -- rcx : arguments count (not including the receiver)
+ // -- rbx : call data
+ // -- rdi : holder
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
// -- rsp[argc * 8] : first argument
// -- rsp[(argc + 1) * 8] : receiver
- // -- rsp[(argc + 2) * 8] : kHolder
- // -- rsp[(argc + 3) * 8] : kCallData
// -----------------------------------
Register api_function_address = rdx;
Register argc = rcx;
+ Register call_data = rbx;
+ Register holder = rdi;
- DCHECK(!AreAliased(api_function_address, argc, kScratchRegister));
-
- // Stack offsets (without argc).
- static constexpr int kReceiverOffset = kSystemPointerSize;
- static constexpr int kHolderOffset = kReceiverOffset + kSystemPointerSize;
- static constexpr int kCallDataOffset = kHolderOffset + kSystemPointerSize;
-
- // Extra stack arguments are: the receiver, kHolder, kCallData.
- static constexpr int kExtraStackArgumentCount = 3;
+ DCHECK(!AreAliased(api_function_address, argc, holder, call_data,
+ kScratchRegister));
typedef FunctionCallbackArguments FCA;
@@ -3348,41 +3174,20 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// rsp[5 * kSystemPointerSize]: kData
// rsp[6 * kSystemPointerSize]: undefined (kNewTarget)
- // Reserve space on the stack.
- __ subp(rsp, Immediate(FCA::kArgsLength * kSystemPointerSize));
-
- // Return address (the old stack location is overwritten later on).
- __ movp(kScratchRegister,
- Operand(rsp, FCA::kArgsLength * kSystemPointerSize));
- __ movp(Operand(rsp, 0 * kSystemPointerSize), kScratchRegister);
-
- // kHolder.
- __ movp(kScratchRegister,
- Operand(rsp, argc, times_pointer_size,
- FCA::kArgsLength * kSystemPointerSize + kHolderOffset));
- __ movp(Operand(rsp, 1 * kSystemPointerSize), kScratchRegister);
-
- // kIsolate.
- __ Move(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movp(Operand(rsp, 2 * kSystemPointerSize), kScratchRegister);
-
- // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ PopReturnAddressTo(rax);
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ movp(Operand(rsp, 3 * kSystemPointerSize), kScratchRegister);
- __ movp(Operand(rsp, 4 * kSystemPointerSize), kScratchRegister);
- __ movp(Operand(rsp, 6 * kSystemPointerSize), kScratchRegister);
-
- // kData.
- __ movp(kScratchRegister,
- Operand(rsp, argc, times_pointer_size,
- FCA::kArgsLength * kSystemPointerSize + kCallDataOffset));
- __ movp(Operand(rsp, 5 * kSystemPointerSize), kScratchRegister);
+ __ Push(kScratchRegister);
+ __ Push(call_data);
+ __ Push(kScratchRegister);
+ __ Push(kScratchRegister);
+ __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
+ __ Push(holder);
+ __ PushReturnAddressFrom(rax);
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
Register scratch = rbx;
- __ leap(scratch, Operand(rsp, 1 * kSystemPointerSize));
+ __ leaq(scratch, Operand(rsp, 1 * kSystemPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -3390,24 +3195,23 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ EnterApiExitFrame(kApiStackSpace);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
- __ movp(StackSpaceOperand(0), scratch);
+ __ movq(StackSpaceOperand(0), scratch);
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ leap(scratch, Operand(scratch, argc, times_pointer_size,
+ __ leaq(scratch, Operand(scratch, argc, times_system_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
- __ movp(StackSpaceOperand(1), scratch);
+ __ movq(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
- __ movp(StackSpaceOperand(2), argc);
+ __ movq(StackSpaceOperand(2), argc);
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ leaq(kScratchRegister,
- Operand(argc, times_pointer_size,
- (FCA::kArgsLength + kExtraStackArgumentCount) *
- kSystemPointerSize));
- __ movp(StackSpaceOperand(3), kScratchRegister);
+ Operand(argc, times_system_pointer_size,
+ (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
+ __ movq(StackSpaceOperand(3), kScratchRegister);
Register arguments_arg = arg_reg_1;
Register callback_arg = arg_reg_2;
@@ -3417,7 +3221,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(api_function_address != arguments_arg);
// v8::InvocationCallback's argument.
- __ leap(arguments_arg, StackSpaceOperand(0));
+ __ leaq(arguments_arg, StackSpaceOperand(0));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3446,11 +3250,9 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register scratch = rax;
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1,
- decompr_scratch2, decompr_scratch_for_debug));
+ decompr_scratch2));
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
@@ -3467,8 +3269,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ PopReturnAddressTo(scratch);
__ Push(receiver);
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
- decompr_scratch1, decompr_scratch2,
- decompr_scratch_for_debug);
+ decompr_scratch1, decompr_scratch2);
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
@@ -3476,7 +3277,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ Push(holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
- decompr_scratch1, decompr_scratch_for_debug);
+ decompr_scratch1);
__ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
@@ -3486,19 +3287,19 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kArgStackSpace = 1;
// Load address of v8::PropertyAccessorInfo::args_ array.
- __ leap(scratch, Operand(rsp, 2 * kSystemPointerSize));
+ __ leaq(scratch, Operand(rsp, 2 * kSystemPointerSize));
__ EnterApiExitFrame(kArgStackSpace);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
Operand info_object = StackSpaceOperand(0);
- __ movp(info_object, scratch);
+ __ movq(info_object, scratch);
- __ leap(name_arg, Operand(scratch, -kSystemPointerSize));
+ __ leaq(name_arg, Operand(scratch, -kSystemPointerSize));
// The context register (rsi) has been saved in EnterApiExitFrame and
// could be used to pass arguments.
- __ leap(accessor_info_arg, info_object);
+ __ leaq(accessor_info_arg, info_object);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
@@ -3508,9 +3309,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(api_function_address != accessor_info_arg);
DCHECK(api_function_address != name_arg);
__ LoadTaggedPointerField(
- scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset),
- decompr_scratch_for_debug);
- __ movp(api_function_address,
+ scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ movq(api_function_address,
FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
diff --git a/deps/v8/src/code-comments.cc b/deps/v8/src/code-comments.cc
index 6e64eb7fa1..12b9f7c3a8 100644
--- a/deps/v8/src/code-comments.cc
+++ b/deps/v8/src/code-comments.cc
@@ -45,11 +45,11 @@ const char* CodeCommentsIterator::GetComment() const {
}
uint32_t CodeCommentsIterator::GetCommentSize() const {
- return *reinterpret_cast<uint32_t*>(current_entry_ + kOffsetToCommentSize);
+ return ReadUnalignedValue<uint32_t>(current_entry_ + kOffsetToCommentSize);
}
uint32_t CodeCommentsIterator::GetPCOffset() const {
- return *reinterpret_cast<uint32_t*>(current_entry_ + kOffsetToPCOffset);
+ return ReadUnalignedValue<uint32_t>(current_entry_ + kOffsetToPCOffset);
}
void CodeCommentsIterator::Next() {
diff --git a/deps/v8/src/code-desc.cc b/deps/v8/src/code-desc.cc
new file mode 100644
index 0000000000..f66b73f0a0
--- /dev/null
+++ b/deps/v8/src/code-desc.cc
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-desc.h"
+
+#include "src/assembler-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+void CodeDesc::Initialize(CodeDesc* desc, Assembler* assembler,
+ int safepoint_table_offset, int handler_table_offset,
+ int constant_pool_offset, int code_comments_offset,
+ int reloc_info_offset) {
+ desc->buffer = assembler->buffer_start();
+ desc->buffer_size = assembler->buffer_size();
+ desc->instr_size = assembler->instruction_size();
+
+ desc->code_comments_offset = code_comments_offset;
+ desc->code_comments_size = desc->instr_size - code_comments_offset;
+
+ desc->constant_pool_offset = constant_pool_offset;
+ desc->constant_pool_size = desc->code_comments_offset - constant_pool_offset;
+
+ desc->handler_table_offset = handler_table_offset;
+ desc->handler_table_size = desc->constant_pool_offset - handler_table_offset;
+
+ desc->safepoint_table_offset = safepoint_table_offset;
+ desc->safepoint_table_size =
+ desc->handler_table_offset - safepoint_table_offset;
+
+ desc->reloc_offset = reloc_info_offset;
+ desc->reloc_size = desc->buffer_size - reloc_info_offset;
+
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
+
+ desc->origin = assembler;
+
+ CodeDesc::Verify(desc);
+}
+
+#ifdef DEBUG
+// static
+void CodeDesc::Verify(const CodeDesc* desc) {
+ // Zero-size code objects upset the system.
+ DCHECK_GT(desc->instr_size, 0);
+ DCHECK_NOT_NULL(desc->buffer);
+
+ // Instruction area layout invariants.
+ DCHECK_GE(desc->safepoint_table_size, 0);
+ DCHECK_EQ(desc->safepoint_table_size + desc->safepoint_table_offset,
+ desc->handler_table_offset);
+ DCHECK_GE(desc->handler_table_size, 0);
+ DCHECK_EQ(desc->handler_table_size + desc->handler_table_offset,
+ desc->constant_pool_offset);
+ DCHECK_GE(desc->constant_pool_size, 0);
+ DCHECK_EQ(desc->constant_pool_size + desc->constant_pool_offset,
+ desc->code_comments_offset);
+ DCHECK_GE(desc->code_comments_size, 0);
+ DCHECK_EQ(desc->code_comments_size + desc->code_comments_offset,
+ desc->instr_size);
+
+ DCHECK_GE(desc->reloc_offset, 0);
+ DCHECK_GE(desc->reloc_size, 0);
+ DCHECK_GE(desc->unwinding_info_size, 0);
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-desc.h b/deps/v8/src/code-desc.h
new file mode 100644
index 0000000000..4da4ee395c
--- /dev/null
+++ b/deps/v8/src/code-desc.h
@@ -0,0 +1,83 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_DESC_H_
+#define V8_CODE_DESC_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// A CodeDesc describes a buffer holding instructions and relocation
+// information. The instructions start at the beginning of the buffer
+// and grow forward, the relocation information starts at the end of
+// the buffer and grows backward. Inlined metadata sections may exist
+// at the end of the instructions.
+//
+// ā”‚<--------------- buffer_size ----------------------------------->ā”‚
+// ā”‚<---------------- instr_size ------------->ā”‚ ā”‚<-reloc_size->ā”‚
+// ā”œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¤
+// ā”‚ instructions ā”‚ data ā”‚ free ā”‚ reloc info ā”‚
+// ā”œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”“ā”€ā”€ā”€ā”€ā”€ā”€ā”“ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”˜
+
+// TODO(jgruber): Add a single chokepoint for specifying the instruction area
+// layout (i.e. the order of inlined metadata fields).
+// TODO(jgruber): Systematically maintain inlined metadata offsets and sizes
+// to simplify CodeDesc initialization.
+
+class CodeDesc {
+ public:
+ static void Initialize(CodeDesc* desc, Assembler* assembler,
+ int safepoint_table_offset, int handler_table_offset,
+ int constant_pool_offset, int code_comments_offset,
+ int reloc_info_offset);
+
+#ifdef DEBUG
+ static void Verify(const CodeDesc* desc);
+#else
+ inline static void Verify(const CodeDesc* desc) {}
+#endif
+
+ public:
+ byte* buffer = nullptr;
+ int buffer_size = 0;
+
+ // The instruction area contains executable code plus inlined metadata.
+
+ int instr_size = 0;
+
+ // Metadata packed into the instructions area.
+
+ int safepoint_table_offset = 0;
+ int safepoint_table_size = 0;
+
+ int handler_table_offset = 0;
+ int handler_table_size = 0;
+
+ int constant_pool_offset = 0;
+ int constant_pool_size = 0;
+
+ int code_comments_offset = 0;
+ int code_comments_size = 0;
+
+ // Relocation info is located at the end of the buffer and not part of the
+ // instructions area.
+
+ int reloc_offset = 0;
+ int reloc_size = 0;
+
+ // Unwinding information.
+ // TODO(jgruber,mstarzinger): Pack this into the inlined metadata section.
+
+ byte* unwinding_info = nullptr;
+ int unwinding_info_size = 0;
+
+ Assembler* origin = nullptr;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODE_DESC_H_
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 7303f44f2b..3c52fb0752 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-descriptors.h"
#include "src/ic/ic.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/code-reference.cc b/deps/v8/src/code-reference.cc
index 941e69f36d..4c8077754d 100644
--- a/deps/v8/src/code-reference.cc
+++ b/deps/v8/src/code-reference.cc
@@ -4,6 +4,7 @@
#include "src/code-reference.h"
+#include "src/code-desc.h"
#include "src/globals.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
@@ -50,7 +51,7 @@ struct CodeDescOps {
const CodeDesc* code_desc;
Address constant_pool() const {
- return instruction_start() + code_desc->constant_pool_offset();
+ return instruction_start() + code_desc->constant_pool_offset;
}
Address instruction_start() const {
return reinterpret_cast<Address>(code_desc->buffer);
@@ -60,14 +61,14 @@ struct CodeDescOps {
}
int instruction_size() const { return code_desc->instr_size; }
const byte* relocation_start() const {
- return code_desc->buffer + code_desc->buffer_size - code_desc->reloc_size;
+ return code_desc->buffer + code_desc->reloc_offset;
}
const byte* relocation_end() const {
return code_desc->buffer + code_desc->buffer_size;
}
int relocation_size() const { return code_desc->reloc_size; }
Address code_comments() const {
- return instruction_start() + code_desc->code_comments_size;
+ return instruction_start() + code_desc->code_comments_offset;
}
};
} // namespace
@@ -87,14 +88,14 @@ struct CodeDescOps {
} \
}
-DISPATCH(Address, constant_pool);
-DISPATCH(Address, instruction_start);
-DISPATCH(Address, instruction_end);
-DISPATCH(int, instruction_size);
-DISPATCH(const byte*, relocation_start);
-DISPATCH(const byte*, relocation_end);
-DISPATCH(int, relocation_size);
-DISPATCH(Address, code_comments);
+DISPATCH(Address, constant_pool)
+DISPATCH(Address, instruction_start)
+DISPATCH(Address, instruction_end)
+DISPATCH(int, instruction_size)
+DISPATCH(const byte*, relocation_start)
+DISPATCH(const byte*, relocation_end)
+DISPATCH(int, relocation_size)
+DISPATCH(Address, code_comments)
#undef DISPATCH
diff --git a/deps/v8/src/code-reference.h b/deps/v8/src/code-reference.h
index 7dce2e1857..314618afcb 100644
--- a/deps/v8/src/code-reference.h
+++ b/deps/v8/src/code-reference.h
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
class Code;
-struct CodeDesc;
+class CodeDesc;
namespace wasm {
class WasmCode;
@@ -59,7 +59,7 @@ class CodeReference {
Handle<Code> js_code_;
};
- DISALLOW_NEW_AND_DELETE();
+ DISALLOW_NEW_AND_DELETE()
};
ASSERT_TRIVIALLY_COPYABLE(CodeReference);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 314ad411e1..74e5423693 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -8,6 +8,8 @@
#include "src/counters.h"
#include "src/frames-inl.h"
#include "src/frames.h"
+#include "src/function-kind.h"
+#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
#include "src/objects/api-callbacks.h"
#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
@@ -148,10 +150,13 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
}
void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
- Label ok(this);
- GotoIf(condition, &ok);
- DebugBreak();
- Goto(&ok);
+ Label ok(this), not_ok(this, Label::kDeferred);
+ Branch(condition, &ok, &not_ok);
+ BIND(&not_ok);
+ {
+ DebugBreak();
+ Goto(&ok);
+ }
BIND(&ok);
}
@@ -244,7 +249,7 @@ TNode<Object> CodeStubAssembler::NoContextConstant() {
std::declval<Heap>().rootAccessorName())>::type>::type>( \
LoadRoot(RootIndex::k##rootIndexName)); \
}
-HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
+HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
@@ -255,7 +260,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type>( \
LoadRoot(RootIndex::k##rootIndexName)); \
}
-HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
+HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
@@ -267,7 +272,7 @@ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
SloppyTNode<Object> value) { \
return WordNotEqual(value, name##Constant()); \
}
-HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST);
+HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
@@ -1197,6 +1202,12 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
@@ -1216,6 +1227,12 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
+ if (!(flags & kAllowLargeObjectAllocation)) {
+ intptr_t size_constant;
+ if (ToIntPtrConstant(size_in_bytes, size_constant)) {
+ CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
+ }
+ }
if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) {
return OptimizedAllocate(size_in_bytes, new_space
? PretenureFlag::NOT_TENURED
@@ -1225,18 +1242,19 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- DCHECK_EQ(kTaggedSize,
+ DCHECK_EQ(kSystemPointerSize,
ExternalReference::new_space_allocation_limit_address(isolate())
.address() -
ExternalReference::new_space_allocation_top_address(isolate())
.address());
- DCHECK_EQ(kTaggedSize,
+ DCHECK_EQ(kSystemPointerSize,
ExternalReference::old_space_allocation_limit_address(isolate())
.address() -
ExternalReference::old_space_allocation_top_address(isolate())
.address());
- TNode<IntPtrT> limit_address = IntPtrAdd(
- ReinterpretCast<IntPtrT>(top_address), IntPtrConstant(kTaggedSize));
+ TNode<IntPtrT> limit_address =
+ IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
+ IntPtrConstant(kSystemPointerSize));
if (flags & kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags,
@@ -1395,23 +1413,6 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
}
}
-TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
- RootIndex root_index) {
- Node* isolate_root =
- ExternalConstant(ExternalReference::isolate_root(isolate()));
- int offset = IsolateData::root_slot_offset(root_index);
- if (SmiValuesAre32Bits()) {
-#if V8_TARGET_LITTLE_ENDIAN
- offset += 4;
-#endif
- return UncheckedCast<Int32T>(
- Load(MachineType::Int32(), isolate_root, IntPtrConstant(offset)));
- } else {
- return SmiToInt32(
- Load(MachineType::AnyTagged(), isolate_root, IntPtrConstant(offset)));
- }
-}
-
void CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
if (SmiValuesAre32Bits()) {
int zero_offset = offset + 4;
@@ -1437,7 +1438,8 @@ TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
}
TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
- return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset));
+ return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset,
+ MachineType::TaggedPointer()));
}
TNode<Int32T> CodeStubAssembler::LoadInstanceType(
@@ -1963,10 +1965,14 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, Node* index_node, int additional_offset,
- ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
+ ParameterMode parameter_mode, LoadSensitivity needs_poisoning,
+ CheckBounds check_bounds) {
CSA_ASSERT(this, IsFixedArraySubclass(object));
CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
- FixedArrayBoundsCheck(object, index_node, additional_offset, parameter_mode);
+ if (NeedsBoundsCheck(check_bounds)) {
+ FixedArrayBoundsCheck(object, index_node, additional_offset,
+ parameter_mode);
+ }
TNode<MaybeObject> element =
LoadArrayElement(object, FixedArray::kHeaderSize, index_node,
additional_offset, parameter_mode, needs_poisoning);
@@ -2002,6 +2008,27 @@ TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
}
+TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayOnHeapBackingStore(
+ TNode<FixedTypedArrayBase> typed_array) {
+ // This is specialized method of retrieving the backing store pointer for on
+ // heap allocated typed array buffer. On heap allocated buffer's backing
+ // stores are a fixed offset from the pointer to a typed array's elements. See
+ // TypedArrayBuiltinsAssembler::AllocateOnHeapElements().
+ TNode<WordT> backing_store =
+ IntPtrAdd(BitcastTaggedToWord(typed_array),
+ IntPtrConstant(
+ FixedTypedArrayBase::ExternalPointerValueForOnHeapArray()));
+
+#ifdef DEBUG
+ // Verify that this is an on heap backing store.
+ TNode<RawPtrT> expected_backing_store_pointer =
+ LoadFixedTypedArrayBackingStore(typed_array);
+ CSA_ASSERT(this, WordEqual(backing_store, expected_backing_store_pointer));
+#endif
+
+ return UncheckedCast<RawPtrT>(backing_store);
+}
+
Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
if (Is64()) {
@@ -2790,10 +2817,12 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
void CodeStubAssembler::StoreFixedDoubleArrayElement(
TNode<FixedDoubleArray> object, Node* index_node, TNode<Float64T> value,
- ParameterMode parameter_mode) {
+ ParameterMode parameter_mode, CheckBounds check_bounds) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
- FixedArrayBoundsCheck(object, index_node, 0, parameter_mode);
+ if (NeedsBoundsCheck(check_bounds)) {
+ FixedArrayBoundsCheck(object, index_node, 0, parameter_mode);
+ }
Node* offset =
ElementOffsetFromIndex(index_node, PACKED_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
@@ -3284,100 +3313,34 @@ TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
offset);
}
-TNode<String> CodeStubAssembler::AllocateConsString(RootIndex map_root_index,
- TNode<Uint32T> length,
- TNode<String> first,
- TNode<String> second,
- AllocationFlags flags) {
- DCHECK(map_root_index == RootIndex::kConsOneByteStringMap ||
- map_root_index == RootIndex::kConsStringMap);
- Node* result = Allocate(ConsString::kSize, flags);
- DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
- StoreMapNoWriteBarrier(result, map_root_index);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
- MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
- Int32Constant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
- bool const new_space = !(flags & kPretenured);
- if (new_space) {
- StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
- MachineRepresentation::kTagged);
- } else {
- StoreObjectField(result, ConsString::kFirstOffset, first);
- StoreObjectField(result, ConsString::kSecondOffset, second);
- }
- return CAST(result);
-}
-
-TNode<String> CodeStubAssembler::AllocateOneByteConsString(
- TNode<Uint32T> length, TNode<String> first, TNode<String> second,
- AllocationFlags flags) {
- return AllocateConsString(RootIndex::kConsOneByteStringMap, length, first,
- second, flags);
-}
-
-TNode<String> CodeStubAssembler::AllocateTwoByteConsString(
- TNode<Uint32T> length, TNode<String> first, TNode<String> second,
- AllocationFlags flags) {
- return AllocateConsString(RootIndex::kConsStringMap, length, first, second,
- flags);
-}
-
-TNode<String> CodeStubAssembler::NewConsString(TNode<Uint32T> length,
- TNode<String> left,
- TNode<String> right,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateConsString(TNode<Uint32T> length,
+ TNode<String> left,
+ TNode<String> right) {
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
Node* right_instance_type = LoadInstanceType(right);
- // Compute intersection and difference of instance types.
- Node* anded_instance_types =
- Word32And(left_instance_type, right_instance_type);
- Node* xored_instance_types =
- Word32Xor(left_instance_type, right_instance_type);
-
- // We create a one-byte cons string if
- // 1. both strings are one-byte, or
- // 2. at least one of the strings is two-byte, but happens to contain only
- // one-byte characters.
- // To do this, we check
- // 1. if both strings are one-byte, or if the one-byte data hint is set in
- // both strings, or
- // 2. if one of the strings has the one-byte data hint set and the other
- // string is one-byte.
+ // Determine the resulting ConsString map to use depending on whether
+ // any of {left} or {right} has two byte encoding.
STATIC_ASSERT(kOneByteStringTag != 0);
- STATIC_ASSERT(kOneByteDataHintTag != 0);
- Label one_byte_map(this);
- Label two_byte_map(this);
- TVARIABLE(String, result);
- Label done(this, &result);
- GotoIf(IsSetWord32(anded_instance_types,
- kStringEncodingMask | kOneByteDataHintTag),
- &one_byte_map);
- Branch(Word32NotEqual(Word32And(xored_instance_types,
- Int32Constant(kStringEncodingMask |
- kOneByteDataHintMask)),
- Int32Constant(kOneByteStringTag | kOneByteDataHintTag)),
- &two_byte_map, &one_byte_map);
-
- BIND(&one_byte_map);
- Comment("One-byte ConsString");
- result = AllocateOneByteConsString(length, left, right, flags);
- Goto(&done);
-
- BIND(&two_byte_map);
- Comment("Two-byte ConsString");
- result = AllocateTwoByteConsString(length, left, right, flags);
- Goto(&done);
-
- BIND(&done);
-
- return result.value();
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ Node* combined_instance_type =
+ Word32And(left_instance_type, right_instance_type);
+ TNode<Map> result_map = CAST(Select<Object>(
+ IsSetWord32(combined_instance_type, kStringEncodingMask),
+ [=] { return LoadRoot(RootIndex::kConsOneByteStringMap); },
+ [=] { return LoadRoot(RootIndex::kConsStringMap); }));
+ Node* result = AllocateInNewSpace(ConsString::kSize);
+ StoreMapNoWriteBarrier(result, result_map);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right);
+ return CAST(result);
}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
@@ -3597,11 +3560,11 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
const std::function<void(Node*, Label*, Label*)>& key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
- Node* const number_of_buckets = SmiUntag(CAST(LoadFixedArrayElement(
+ Node* const number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
CAST(table), CollectionType::NumberOfBucketsIndex())));
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const first_entry = SmiUntag(CAST(LoadFixedArrayElement(
+ Node* const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
CAST(table), bucket,
CollectionType::HashTableStartIndex() * kTaggedSize)));
@@ -3626,9 +3589,9 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
UintPtrLessThan(
var_entry.value(),
SmiUntag(SmiAdd(
- CAST(LoadFixedArrayElement(
+ CAST(UnsafeLoadFixedArrayElement(
CAST(table), CollectionType::NumberOfElementsIndex())),
- CAST(LoadFixedArrayElement(
+ CAST(UnsafeLoadFixedArrayElement(
CAST(table),
CollectionType::NumberOfDeletedElementsIndex()))))));
@@ -3639,7 +3602,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
number_of_buckets);
// Load the key from the entry.
- Node* const candidate_key = LoadFixedArrayElement(
+ Node* const candidate_key = UnsafeLoadFixedArrayElement(
CAST(table), entry_start,
CollectionType::HashTableStartIndex() * kTaggedSize);
@@ -3647,7 +3610,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
BIND(&continue_next_entry);
// Load the index of the next entry in the bucket chain.
- var_entry.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ var_entry.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement(
CAST(table), entry_start,
(CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
kTaggedSize))));
@@ -3832,9 +3795,10 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
IntPtrConstant(JSArray::kMaxFastArrayLength)));
}
-TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
- TNode<Map> array_map, TNode<Smi> length, Node* allocation_site) {
- Comment("begin allocation of JSArray without elements");
+TNode<JSArray> CodeStubAssembler::AllocateJSArray(
+ TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
+ Node* allocation_site) {
+ Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = JSArray::kSize;
@@ -3843,7 +3807,10 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
}
TNode<IntPtrT> size = IntPtrConstant(base_size);
- return AllocateUninitializedJSArray(array_map, length, allocation_site, size);
+ TNode<JSArray> result =
+ AllocateUninitializedJSArray(array_map, length, allocation_site, size);
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, elements);
+ return result;
}
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
@@ -3855,81 +3822,99 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- int base_size = JSArray::kSize;
- if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
+ TVARIABLE(JSArray, array);
+ TVARIABLE(FixedArrayBase, elements);
- const int elements_offset = base_size;
+ if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
+ TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+ array = AllocateJSArray(array_map, empty_array, length, allocation_site);
+ return {array.value(), empty_array};
+ }
- // Compute space for elements
- base_size += FixedArray::kHeaderSize;
- TNode<IntPtrT> size =
- ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
+ Label out(this), empty(this), nonempty(this);
- TVARIABLE(JSArray, array);
- TVARIABLE(FixedArrayBase, elements);
+ Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
+ &empty, &nonempty);
- Label out(this);
+ BIND(&empty);
+ {
+ TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+ array = AllocateJSArray(array_map, empty_array, length, allocation_site);
+ elements = empty_array;
+ Goto(&out);
+ }
- // For very large arrays in which the requested allocation exceeds the
- // maximal size of a regular heap object, we cannot use the allocation
- // folding trick. Instead, we first allocate the elements in large object
- // space, and then allocate the JSArray (and possibly the allocation memento)
- // in new space.
- if (allocation_flags & kAllowLargeObjectAllocation) {
- Label next(this);
- GotoIf(IsRegularHeapObjectSize(size), &next);
+ BIND(&nonempty);
+ {
+ int base_size = JSArray::kSize;
+ if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
- CSA_CHECK(this, IsValidFastJSArrayCapacity(capacity, capacity_mode));
+ const int elements_offset = base_size;
- // Allocate and initialize the elements first. Full initialization is needed
- // because the upcoming JSArray allocation could trigger GC.
- elements =
- AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
+ // Compute space for elements
+ base_size += FixedArray::kHeaderSize;
+ TNode<IntPtrT> size =
+ ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
- if (IsDoubleElementsKind(kind)) {
- FillFixedDoubleArrayWithZero(CAST(elements.value()),
- ParameterToIntPtr(capacity, capacity_mode));
- } else {
- FillFixedArrayWithSmiZero(CAST(elements.value()),
- ParameterToIntPtr(capacity, capacity_mode));
+ // For very large arrays in which the requested allocation exceeds the
+ // maximal size of a regular heap object, we cannot use the allocation
+ // folding trick. Instead, we first allocate the elements in large object
+ // space, and then allocate the JSArray (and possibly the allocation
+ // memento) in new space.
+ if (allocation_flags & kAllowLargeObjectAllocation) {
+ Label next(this);
+ GotoIf(IsRegularHeapObjectSize(size), &next);
+
+ CSA_CHECK(this, IsValidFastJSArrayCapacity(capacity, capacity_mode));
+
+ // Allocate and initialize the elements first. Full initialization is
+ // needed because the upcoming JSArray allocation could trigger GC.
+ elements =
+ AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
+
+ if (IsDoubleElementsKind(kind)) {
+ FillFixedDoubleArrayWithZero(CAST(elements.value()),
+ ParameterToIntPtr(capacity, capacity_mode));
+ } else {
+ FillFixedArrayWithSmiZero(CAST(elements.value()),
+ ParameterToIntPtr(capacity, capacity_mode));
+ }
+
+ // The JSArray and possibly allocation memento next. Note that
+ // allocation_flags are *not* passed on here and the resulting JSArray
+ // will always be in new space.
+ array =
+ AllocateJSArray(array_map, elements.value(), length, allocation_site);
+
+ Goto(&out);
+
+ BIND(&next);
}
- // The JSArray and possibly allocation memento next. Note that
- // allocation_flags are *not* passed on here and the resulting JSArray will
- // always be in new space.
- array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
- allocation_site);
- StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
- elements.value());
+ // Fold all objects into a single new space allocation.
+ array =
+ AllocateUninitializedJSArray(array_map, length, allocation_site, size);
+ elements = UncheckedCast<FixedArrayBase>(
+ InnerAllocate(array.value(), elements_offset));
+ StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
+ elements.value());
+
+ // Setup elements object.
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
+ RootIndex elements_map_index = IsDoubleElementsKind(kind)
+ ? RootIndex::kFixedDoubleArrayMap
+ : RootIndex::kFixedArrayMap;
+ DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
+ StoreMapNoWriteBarrier(elements.value(), elements_map_index);
+
+ TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
+ CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
+ StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
+ capacity_smi);
Goto(&out);
-
- BIND(&next);
}
- // Fold all objects into a single new space allocation.
- array =
- AllocateUninitializedJSArray(array_map, length, allocation_site, size);
- elements = UncheckedCast<FixedArrayBase>(
- InnerAllocate(array.value(), elements_offset));
-
- StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
- elements.value());
-
- // Setup elements object.
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
- RootIndex elements_map_index = IsDoubleElementsKind(kind)
- ? RootIndex::kFixedDoubleArrayMap
- : RootIndex::kFixedArrayMap;
- DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
- StoreMapNoWriteBarrier(elements.value(), elements_map_index);
-
- TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
- CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
- StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
- capacity_smi);
- Goto(&out);
-
BIND(&out);
return {array.value(), elements.value()};
}
@@ -3964,63 +3949,25 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
- int capacity_as_constant;
- if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
- // Array is empty. Use the shared empty fixed array instead of allocating a
- // new one.
- array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
- allocation_site);
- StoreObjectFieldRoot(array, JSArray::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- } else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant,
- capacity_mode)) {
- CHECK_GT(capacity_as_constant, 0);
- // Allocate both array and elements object, and initialize the JSArray.
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, array_map, length, allocation_site, capacity, capacity_mode,
- allocation_flags);
- // Fill in the elements with holes.
- FillFixedArrayWithValue(kind, elements,
- IntPtrOrSmiConstant(0, capacity_mode), capacity,
- RootIndex::kTheHoleValue, capacity_mode);
- } else {
- Label out(this), empty(this), nonempty(this);
- TVARIABLE(JSArray, var_array);
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, array_map, length, allocation_site, capacity, capacity_mode,
+ allocation_flags);
- Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
- &empty, &nonempty);
+ Label out(this), nonempty(this);
- BIND(&empty);
- {
- // Array is empty. Use the shared empty fixed array instead of allocating
- // a new one.
- var_array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
- allocation_site);
- StoreObjectFieldRoot(var_array.value(), JSArray::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- Goto(&out);
- }
-
- BIND(&nonempty);
- {
- // Allocate both array and elements object, and initialize the JSArray.
- TNode<JSArray> array;
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, array_map, length, allocation_site, capacity, capacity_mode,
- allocation_flags);
- var_array = array;
- // Fill in the elements with holes.
- FillFixedArrayWithValue(kind, elements,
- IntPtrOrSmiConstant(0, capacity_mode), capacity,
- RootIndex::kTheHoleValue, capacity_mode);
- Goto(&out);
- }
+ Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
+ &out, &nonempty);
- BIND(&out);
- array = var_array.value();
+ BIND(&nonempty);
+ {
+ FillFixedArrayWithValue(kind, elements,
+ IntPtrOrSmiConstant(0, capacity_mode), capacity,
+ RootIndex::kTheHoleValue, capacity_mode);
+ Goto(&out);
}
+ BIND(&out);
return array;
}
@@ -4035,13 +3982,12 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
Node* native_context = LoadNativeContext(context);
TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- Node* new_elements = ExtractFixedArray(
+ TNode<FixedArrayBase> new_elements = ExtractFixedArray(
LoadElements(array), begin, count, capacity,
ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
- TNode<Object> result = AllocateUninitializedJSArrayWithoutElements(
- array_map, ParameterToTagged(count, mode), allocation_site);
- StoreObjectField(result, JSObject::kElementsOffset, new_elements);
+ TNode<Object> result = AllocateJSArray(
+ array_map, new_elements, ParameterToTagged(count, mode), allocation_site);
return result;
}
@@ -4105,9 +4051,8 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
TNode<Map> array_map =
LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
- TNode<Object> result = AllocateUninitializedJSArrayWithoutElements(
- array_map, CAST(length), allocation_site);
- StoreObjectField(result, JSObject::kElementsOffset, var_new_elements.value());
+ TNode<Object> result = AllocateJSArray(
+ array_map, CAST(var_new_elements.value()), CAST(length), allocation_site);
return result;
}
@@ -4869,8 +4814,9 @@ void CodeStubAssembler::CopyFixedArrayElements(
doubles_to_objects_conversion ||
(barrier_mode == UPDATE_WRITE_BARRIER && IsObjectElementsKind(to_kind));
bool element_offset_matches =
- !needs_write_barrier && (Is64() || IsDoubleElementsKind(from_kind) ==
- IsDoubleElementsKind(to_kind));
+ !needs_write_barrier &&
+ (kTaggedSize == kDoubleSize ||
+ IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind));
Node* double_hole =
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
@@ -5671,20 +5617,28 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
return result.value();
}
-TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
- TNode<Number> value) {
+TNode<UintPtrT> CodeStubAssembler::TryNumberToUintPtr(TNode<Number> value,
+ Label* if_negative) {
TVARIABLE(UintPtrT, result);
Label done(this, &result);
Branch(TaggedIsSmi(value),
[&] {
TNode<Smi> value_smi = CAST(value);
- CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
+ if (if_negative == nullptr) {
+ CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
+ } else {
+ GotoIfNot(TaggedIsPositiveSmi(value), if_negative);
+ }
result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
Goto(&done);
},
[&] {
TNode<HeapNumber> value_hn = CAST(value);
- result = ChangeFloat64ToUintPtr(LoadHeapNumberValue(value_hn));
+ TNode<Float64T> value = LoadHeapNumberValue(value_hn);
+ if (if_negative != nullptr) {
+ GotoIf(Float64LessThan(value, Float64Constant(0.0)), if_negative);
+ }
+ result = ChangeFloat64ToUintPtr(value);
Goto(&done);
});
@@ -6085,12 +6039,6 @@ TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
Int32Constant(kOneByteStringTag));
}
-TNode<BoolT> CodeStubAssembler::HasOnlyOneByteChars(
- TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
- return IsSetWord32(instance_type, kStringEncodingMask | kOneByteDataHintMask);
-}
-
TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
SloppyTNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
@@ -6391,6 +6339,34 @@ TNode<BoolT> CodeStubAssembler::IsSymbol(SloppyTNode<HeapObject> object) {
return IsSymbolMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsInternalizedStringInstanceType(
+ TNode<Int32T> instance_type) {
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return Word32Equal(
+ Word32And(instance_type,
+ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
+ Int32Constant(kStringTag | kInternalizedTag));
+}
+
+TNode<BoolT> CodeStubAssembler::IsUniqueName(TNode<HeapObject> object) {
+ TNode<Int32T> instance_type = LoadInstanceType(object);
+ return Select<BoolT>(
+ IsInternalizedStringInstanceType(instance_type),
+ [=] { return Int32TrueConstant(); },
+ [=] { return IsSymbolInstanceType(instance_type); });
+}
+
+TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
+ TNode<Int32T> instance_type = LoadInstanceType(object);
+ return Select<BoolT>(
+ IsInternalizedStringInstanceType(instance_type),
+ [=] {
+ return IsSetWord32(LoadNameHashField(CAST(object)),
+ Name::kIsNotArrayIndexMask);
+ },
+ [=] { return IsSymbolInstanceType(instance_type); });
+}
+
TNode<BoolT> CodeStubAssembler::IsBigIntInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, BIGINT_TYPE);
@@ -6714,7 +6690,7 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
// cache already.
Label if_entryisundefined(this, Label::kDeferred),
if_entryisnotundefined(this);
- Node* entry = LoadFixedArrayElement(cache, code_index);
+ Node* entry = UnsafeLoadFixedArrayElement(cache, code_index);
Branch(IsUndefined(entry), &if_entryisundefined, &if_entryisnotundefined);
BIND(&if_entryisundefined);
@@ -7166,8 +7142,7 @@ void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
}
TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
- TNode<String> right,
- AllocationFlags flags) {
+ TNode<String> right) {
TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
done(this, &result), done_native(this, &result);
@@ -7203,7 +7178,7 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
&non_cons);
result =
- NewConsString(new_length, var_left.value(), var_right.value(), flags);
+ AllocateConsString(new_length, var_left.value(), var_right.value());
Goto(&done_native);
BIND(&non_cons);
@@ -7394,7 +7369,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
// Cache entry's key must be a heap number
- Node* number_key = LoadFixedArrayElement(CAST(number_string_cache), index);
+ Node* number_key =
+ UnsafeLoadFixedArrayElement(CAST(number_string_cache), index);
GotoIf(TaggedIsSmi(number_key), &runtime);
GotoIfNot(IsHeapNumber(number_key), &runtime);
@@ -7407,8 +7383,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
GotoIfNot(Word32Equal(high, high_compare), &runtime);
// Heap number match, return value from cache entry.
- result = CAST(
- LoadFixedArrayElement(CAST(number_string_cache), index, kTaggedSize));
+ result = CAST(UnsafeLoadFixedArrayElement(CAST(number_string_cache), index,
+ kTaggedSize));
Goto(&done);
}
@@ -7417,13 +7393,13 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
// Load the smi key, make sure it matches the smi we're looking for.
Node* smi_index = BitcastWordToTagged(
WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask));
- Node* smi_key = LoadFixedArrayElement(CAST(number_string_cache), smi_index,
- 0, SMI_PARAMETERS);
+ Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache),
+ smi_index, 0, SMI_PARAMETERS);
GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime);
// Smi match, return value from cache entry.
- result = CAST(LoadFixedArrayElement(CAST(number_string_cache), smi_index,
- kTaggedSize, SMI_PARAMETERS));
+ result = CAST(UnsafeLoadFixedArrayElement(
+ CAST(number_string_cache), smi_index, kTaggedSize, SMI_PARAMETERS));
Goto(&done);
}
@@ -8371,6 +8347,7 @@ void CodeStubAssembler::NameDictionaryLookup(
DCHECK_IMPLIES(mode == kFindInsertionIndex,
inlined_probes == 0 && if_found == nullptr);
Comment("NameDictionaryLookup");
+ CSA_ASSERT(this, IsUniqueName(unique_name));
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
TNode<WordT> mask = IntPtrSub(capacity, IntPtrConstant(1));
@@ -8385,7 +8362,8 @@ void CodeStubAssembler::NameDictionaryLookup(
TNode<IntPtrT> index = EntryToIndex<Dictionary>(entry);
*var_name_index = index;
- TNode<HeapObject> current = CAST(LoadFixedArrayElement(dictionary, index));
+ TNode<HeapObject> current =
+ CAST(UnsafeLoadFixedArrayElement(dictionary, index));
GotoIf(WordEqual(current, undefined), if_not_found);
current = LoadName<Dictionary>(current);
GotoIf(WordEqual(current, unique_name), if_found);
@@ -8496,7 +8474,7 @@ void CodeStubAssembler::NumberDictionaryLookup(
TNode<IntPtrT> entry = var_entry->value();
TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(entry);
- Node* current = LoadFixedArrayElement(dictionary, index);
+ Node* current = UnsafeLoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, undefined), if_not_found);
Label next_probe(this);
{
@@ -8686,6 +8664,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
std::is_base_of<DescriptorArray, Array>::value,
"T must be a descendant of FixedArray or a WeakFixedArray");
Comment("LookupLinear");
+ CSA_ASSERT(this, IsUniqueName(unique_name));
TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
TNode<IntPtrT> factor = IntPtrConstant(Array::kEntrySize);
TNode<IntPtrT> last_exclusive = IntPtrAdd(
@@ -8873,7 +8852,8 @@ void CodeStubAssembler::DescriptorArrayForEach(
void CodeStubAssembler::ForEachEnumerableOwnProperty(
TNode<Context> context, TNode<Map> map, TNode<JSObject> object,
- const ForEachKeyValueFunction& body, Label* bailout) {
+ ForEachEnumerationMode mode, const ForEachKeyValueFunction& body,
+ Label* bailout) {
TNode<Int32T> type = LoadMapInstanceType(map);
TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout);
@@ -8882,17 +8862,48 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
TVARIABLE(BoolT, var_stable, Int32TrueConstant());
- VariableList list({&var_stable}, zone());
+
+ TVARIABLE(BoolT, var_has_symbol, Int32FalseConstant());
+ // false - iterate only string properties, true - iterate only symbol
+ // properties
+ TVARIABLE(BoolT, var_name_filter, Int32FalseConstant());
+ VariableList list({&var_stable, &var_has_symbol, &var_name_filter}, zone());
+ Label descriptor_array_loop(this,
+ {&var_stable, &var_has_symbol, &var_name_filter});
+
+ Goto(&descriptor_array_loop);
+ BIND(&descriptor_array_loop);
DescriptorArrayForEach(
list, Unsigned(Int32Constant(0)), nof_descriptors,
- [=, &var_stable](TNode<IntPtrT> descriptor_key_index) {
+ [=, &var_stable, &var_has_symbol,
+ &var_name_filter](TNode<IntPtrT> descriptor_key_index) {
TNode<Name> next_key =
LoadKeyByKeyIndex(descriptors, descriptor_key_index);
TVARIABLE(Object, var_value, SmiConstant(0));
Label callback(this), next_iteration(this);
+ if (mode == kEnumerationOrder) {
+ // |next_key| is either a string or a symbol
+ // Skip strings or symbols depending on var_name_filter value.
+ Label if_string(this), if_symbol(this), if_name_ok(this);
+
+ Branch(IsSymbol(next_key), &if_symbol, &if_string);
+ BIND(&if_symbol);
+ {
+ var_has_symbol = Int32TrueConstant();
+ // Process symbol property when |var_name_filer| is true.
+ Branch(var_name_filter.value(), &if_name_ok, &next_iteration);
+ }
+ BIND(&if_string);
+ {
+ CSA_ASSERT(this, IsString(next_key));
+ // Process string property when |var_name_filer| is false.
+ Branch(var_name_filter.value(), &next_iteration, &if_name_ok);
+ }
+ BIND(&if_name_ok);
+ }
{
TVARIABLE(Map, var_map);
TVARIABLE(HeapObject, var_meta_storage);
@@ -8985,9 +8996,19 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
Goto(&next_iteration);
}
}
-
BIND(&next_iteration);
});
+
+ if (mode == kEnumerationOrder) {
+ Label done(this);
+ GotoIf(var_name_filter.value(), &done);
+ GotoIfNot(var_has_symbol.value(), &done);
+ // All string properties are processed, now process symbol properties.
+ var_name_filter = Int32TrueConstant();
+ Goto(&descriptor_array_loop);
+
+ BIND(&done);
+ }
}
void CodeStubAssembler::DescriptorLookup(
@@ -9054,6 +9075,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
Label* if_not_found) {
CSA_ASSERT(this, IsSimpleObjectMap(map));
+ CSA_ASSERT(this, IsUniqueNameNoIndex(unique_name));
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
@@ -9116,6 +9138,7 @@ void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
+ CSA_ASSERT(this, IsUniqueNameNoIndex(CAST(unique_name)));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_name_index);
@@ -9147,6 +9170,14 @@ Node* CodeStubAssembler::GetMethod(Node* context, Node* object,
return method;
}
+TNode<Object> CodeStubAssembler::GetIteratorMethod(
+ TNode<Context> context, TNode<HeapObject> heap_obj,
+ Label* if_iteratorundefined) {
+ return CAST(GetMethod(context, heap_obj,
+ isolate()->factory()->iterator_symbol(),
+ if_iteratorundefined));
+}
+
void CodeStubAssembler::LoadPropertyFromFastObject(
Node* object, Node* map, TNode<DescriptorArray> descriptors,
Node* name_index, Variable* var_details, Variable* var_value) {
@@ -9406,6 +9437,7 @@ void CodeStubAssembler::TryGetOwnProperty(
Label* if_bailout, GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
+ CSA_ASSERT(this, IsUniqueNameNoIndex(CAST(unique_name)));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
@@ -9534,7 +9566,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
- TNode<Object> element = LoadFixedArrayElement(elements, intptr_index);
+ TNode<Object> element = UnsafeLoadFixedArrayElement(elements, intptr_index);
TNode<Oddball> the_hole = TheHoleConstant();
Branch(WordEqual(element, the_hole), if_not_found, if_found);
}
@@ -9957,57 +9989,6 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
BIND(&end);
}
-Node* CodeStubAssembler::GetLanguageMode(
- TNode<SharedFunctionInfo> shared_function_info, Node* context) {
- VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
- SmiConstant(LanguageMode::kStrict));
- Label language_mode_determined(this), language_mode_sloppy(this);
-
- // Get the language mode from SFI
- TNode<Uint32T> closure_is_strict =
- DecodeWord32<SharedFunctionInfo::IsStrictBit>(LoadObjectField(
- shared_function_info, SharedFunctionInfo::kFlagsOffset,
- MachineType::Uint32()));
- // It is already strict, we need not check context's language mode.
- GotoIf(closure_is_strict, &language_mode_determined);
-
- // SFI::LanguageMode is sloppy, check if context has a stricter mode.
- TNode<ScopeInfo> scope_info =
- CAST(LoadObjectField(context, Context::kScopeInfoOffset));
- // If no flags field assume sloppy
- GotoIf(SmiLessThanOrEqual(LoadFixedArrayBaseLength(scope_info),
- SmiConstant(ScopeInfo::Fields::kFlags)),
- &language_mode_sloppy);
- TNode<Smi> flags = CAST(LoadFixedArrayElement(
- scope_info, SmiConstant(ScopeInfo::Fields::kFlags)));
- TNode<Uint32T> context_is_strict =
- DecodeWord32<ScopeInfo::LanguageModeField>(SmiToInt32(flags));
- GotoIf(context_is_strict, &language_mode_determined);
- Goto(&language_mode_sloppy);
-
- // Both Context::ScopeInfo::LanguageMode and SFI::LanguageMode are sloppy.
- BIND(&language_mode_sloppy);
- var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
- Goto(&language_mode_determined);
-
- BIND(&language_mode_determined);
- return var_language_mode.value();
-}
-
-Node* CodeStubAssembler::GetLanguageMode(TNode<JSFunction> closure,
- Node* context) {
- TNode<SharedFunctionInfo> sfi =
- CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- return GetLanguageMode(sfi, context);
-}
-
-Node* CodeStubAssembler::GetLanguageMode(TNode<FeedbackVector> vector,
- Node* context) {
- TNode<SharedFunctionInfo> sfi =
- CAST(LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset));
- return GetLanguageMode(sfi, context);
-}
-
void CodeStubAssembler::ReportFeedbackUpdate(
SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id,
const char* reason) {
@@ -10091,8 +10072,9 @@ TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
return var_intptr_key.value();
}
-Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
- Node* value, Label* bailout) {
+Node* CodeStubAssembler::EmitKeyedSloppyArguments(
+ Node* receiver, Node* key, Node* value, Label* bailout,
+ ArgumentsAccessMode access_mode) {
// Mapped arguments are actual arguments. Unmapped arguments are values added
// to the arguments object after it was created for the call. Mapped arguments
// are stored in the context at indexes given by elements[key + 2]. Unmapped
@@ -10119,8 +10101,6 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
// index into the context array given at elements[0]. Return the value at
// context[t].
- bool is_load = value == nullptr;
-
GotoIfNot(TaggedIsSmi(key), bailout);
key = SmiUntag(key);
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
@@ -10129,8 +10109,11 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
TNode<IntPtrT> elements_length = LoadAndUntagFixedArrayBaseLength(elements);
VARIABLE(var_result, MachineRepresentation::kTagged);
- if (!is_load) {
+ if (access_mode == ArgumentsAccessMode::kStore) {
var_result.Bind(value);
+ } else {
+ DCHECK(access_mode == ArgumentsAccessMode::kLoad ||
+ access_mode == ArgumentsAccessMode::kHas);
}
Label if_mapped(this), if_unmapped(this), end(this, &var_result);
Node* intptr_two = IntPtrConstant(2);
@@ -10146,10 +10129,14 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
{
TNode<IntPtrT> mapped_index_intptr = SmiUntag(CAST(mapped_index));
TNode<Context> the_context = CAST(LoadFixedArrayElement(elements, 0));
- if (is_load) {
+ if (access_mode == ArgumentsAccessMode::kLoad) {
Node* result = LoadContextElement(the_context, mapped_index_intptr);
CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
+ } else if (access_mode == ArgumentsAccessMode::kHas) {
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(LoadContextElement(
+ the_context, mapped_index_intptr))));
+ var_result.Bind(TrueConstant());
} else {
StoreContextElement(the_context, mapped_index_intptr, value);
}
@@ -10166,17 +10153,31 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
TNode<IntPtrT> backing_store_length =
LoadAndUntagFixedArrayBaseLength(backing_store);
- GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
-
- // The key falls into unmapped range.
- if (is_load) {
+ if (access_mode == ArgumentsAccessMode::kHas) {
+ Label out_of_bounds(this);
+ GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length),
+ &out_of_bounds);
Node* result = LoadFixedArrayElement(backing_store, key);
- GotoIf(WordEqual(result, TheHoleConstant()), bailout);
- var_result.Bind(result);
+ var_result.Bind(
+ SelectBooleanConstant(WordNotEqual(result, TheHoleConstant())));
+ Goto(&end);
+
+ BIND(&out_of_bounds);
+ var_result.Bind(FalseConstant());
+ Goto(&end);
} else {
- StoreFixedArrayElement(backing_store, key, value);
+ GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
+
+ // The key falls into unmapped range.
+ if (access_mode == ArgumentsAccessMode::kLoad) {
+ Node* result = LoadFixedArrayElement(backing_store, key);
+ GotoIf(WordEqual(result, TheHoleConstant()), bailout);
+ var_result.Bind(result);
+ } else {
+ StoreFixedArrayElement(backing_store, key, value);
+ }
+ Goto(&end);
}
- Goto(&end);
}
BIND(&end);
@@ -10703,9 +10704,16 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
TNode<IntPtrT> page_flags =
UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), object_page,
IntPtrConstant(Page::kFlagsOffset)));
- GotoIf(WordEqual(WordAnd(page_flags,
- IntPtrConstant(MemoryChunk::kIsInNewSpaceMask)),
- IntPtrConstant(0)),
+ GotoIf(WordEqual(
+ WordAnd(page_flags,
+ IntPtrConstant(MemoryChunk::kIsInYoungGenerationMask)),
+ IntPtrConstant(0)),
+ &no_memento_found);
+ // TODO(ulan): Support allocation memento for a large object by allocating
+ // additional word for the memento after the large object.
+ GotoIf(WordNotEqual(WordAnd(page_flags,
+ IntPtrConstant(MemoryChunk::kIsLargePageMask)),
+ IntPtrConstant(0)),
&no_memento_found);
}
@@ -10791,7 +10799,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// an initial write barrier backed store makes this pointer strong until the
// next GC, and allocation sites are designed to survive several GCs anyway.
StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
- StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list, site);
+ StoreFullTaggedNoWriteBarrier(site_list, site);
StoreFeedbackVectorSlot(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0,
SMI_PARAMETERS);
@@ -10973,63 +10981,63 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
- Branch(TaggedIsSmi(left),
- [&] {
- TNode<Smi> smi_left = CAST(left);
-
- Branch(TaggedIsSmi(right),
- [&] {
- TNode<Smi> smi_right = CAST(right);
-
- // Both {left} and {right} are Smi, so just perform a fast
- // Smi comparison.
- switch (op) {
- case Operation::kEqual:
- BranchIfSmiEqual(smi_left, smi_right, if_true,
- if_false);
- break;
- case Operation::kLessThan:
- BranchIfSmiLessThan(smi_left, smi_right, if_true,
- if_false);
- break;
- case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(smi_left, smi_right, if_true,
- if_false);
- break;
- case Operation::kGreaterThan:
- BranchIfSmiLessThan(smi_right, smi_left, if_true,
- if_false);
- break;
- case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(smi_right, smi_left, if_true,
- if_false);
- break;
- default:
- UNREACHABLE();
- }
- },
- [&] {
- CSA_ASSERT(this, IsHeapNumber(right));
- var_left_float = SmiToFloat64(smi_left);
- var_right_float = LoadHeapNumberValue(right);
- Goto(&do_float_comparison);
- });
- },
- [&] {
- CSA_ASSERT(this, IsHeapNumber(left));
- var_left_float = LoadHeapNumberValue(left);
-
- Branch(TaggedIsSmi(right),
- [&] {
- var_right_float = SmiToFloat64(right);
- Goto(&do_float_comparison);
- },
- [&] {
- CSA_ASSERT(this, IsHeapNumber(right));
- var_right_float = LoadHeapNumberValue(right);
- Goto(&do_float_comparison);
- });
- });
+ Branch(
+ TaggedIsSmi(left),
+ [&] {
+ TNode<Smi> smi_left = CAST(left);
+
+ Branch(
+ TaggedIsSmi(right),
+ [&] {
+ TNode<Smi> smi_right = CAST(right);
+
+ // Both {left} and {right} are Smi, so just perform a fast
+ // Smi comparison.
+ switch (op) {
+ case Operation::kEqual:
+ BranchIfSmiEqual(smi_left, smi_right, if_true, if_false);
+ break;
+ case Operation::kLessThan:
+ BranchIfSmiLessThan(smi_left, smi_right, if_true, if_false);
+ break;
+ case Operation::kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(smi_left, smi_right, if_true,
+ if_false);
+ break;
+ case Operation::kGreaterThan:
+ BranchIfSmiLessThan(smi_right, smi_left, if_true, if_false);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(smi_right, smi_left, if_true,
+ if_false);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_left_float = SmiToFloat64(smi_left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
+ });
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(left));
+ var_left_float = LoadHeapNumberValue(left);
+
+ Branch(
+ TaggedIsSmi(right),
+ [&] {
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
+ });
+ });
BIND(&do_float_comparison);
{
@@ -13094,36 +13102,34 @@ CodeStubArguments::CodeStubArguments(
arguments_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
Node* offset = assembler_->ElementOffsetFromIndex(
- argc_, PACKED_ELEMENTS, param_mode,
+ argc_, SYSTEM_POINTER_ELEMENTS, param_mode,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
kSystemPointerSize);
- arguments_ = assembler_->UncheckedCast<RawPtr<Object>>(
- assembler_->IntPtrAdd(fp_, offset));
+ arguments_ =
+ assembler_->UncheckedCast<WordT>(assembler_->IntPtrAdd(fp_, offset));
}
TNode<Object> CodeStubArguments::GetReceiver() const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
- return assembler_->UncheckedCast<Object>(
- assembler_->Load(MachineType::AnyTagged(), arguments_,
- assembler_->IntPtrConstant(kSystemPointerSize)));
+ return assembler_->UncheckedCast<Object>(assembler_->LoadFullTagged(
+ arguments_, assembler_->IntPtrConstant(kSystemPointerSize)));
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
- assembler_->StoreNoWriteBarrier(
- MachineRepresentation::kTagged, arguments_,
- assembler_->IntPtrConstant(kSystemPointerSize), object);
+ assembler_->StoreFullTaggedNoWriteBarrier(
+ arguments_, assembler_->IntPtrConstant(kSystemPointerSize), object);
}
-TNode<RawPtr<Object>> CodeStubArguments::AtIndexPtr(
+TNode<WordT> CodeStubArguments::AtIndexPtr(
Node* index, CodeStubAssembler::ParameterMode mode) const {
typedef compiler::Node Node;
Node* negated_index = assembler_->IntPtrOrSmiSub(
assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
- Node* offset = assembler_->ElementOffsetFromIndex(negated_index,
- PACKED_ELEMENTS, mode, 0);
- return assembler_->UncheckedCast<RawPtr<Object>>(assembler_->IntPtrAdd(
- assembler_->UncheckedCast<IntPtrT>(arguments_), offset));
+ Node* offset = assembler_->ElementOffsetFromIndex(
+ negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0);
+ return assembler_->IntPtrAdd(assembler_->UncheckedCast<IntPtrT>(arguments_),
+ offset);
}
TNode<Object> CodeStubArguments::AtIndex(
@@ -13132,7 +13138,7 @@ TNode<Object> CodeStubArguments::AtIndex(
CSA_ASSERT(assembler_,
assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode));
return assembler_->UncheckedCast<Object>(
- assembler_->Load(MachineType::AnyTagged(), AtIndexPtr(index, mode)));
+ assembler_->LoadFullTagged(AtIndexPtr(index, mode)));
}
TNode<Object> CodeStubArguments::AtIndex(int index) const {
@@ -13752,7 +13758,13 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
}
}
+TNode<String> CodeStubAssembler::TaggedToDirectString(TNode<Object> value,
+ Label* fail) {
+ ToDirectStringAssembler to_direct(state(), value);
+ to_direct.TryToDirect(fail);
+ to_direct.PointerToData(fail);
+ return CAST(value);
+}
+
} // namespace internal
-// TODO(petermarshall): Remove. This is a workaround for crbug.com/v8/8719
-namespace {} // namespace
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 4dfd176eaa..86cc275c14 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -322,6 +322,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<Smi>(value);
}
+ TNode<String> TaggedToDirectString(TNode<Object> value, Label* fail);
+
TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
GotoIfNot(IsNumber(value), fail);
return UncheckedCast<Number>(value);
@@ -338,6 +340,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<JSArray>(heap_object);
}
+ TNode<JSArrayBuffer> HeapObjectToJSArrayBuffer(TNode<HeapObject> heap_object,
+ Label* fail) {
+ GotoIfNot(IsJSArrayBuffer(heap_object), fail);
+ return UncheckedCast<JSArrayBuffer>(heap_object);
+ }
+
TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
TNode<Object> value, Label* fail) {
GotoIf(TaggedIsSmi(value), fail);
@@ -396,6 +404,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; }
uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; }
+ intptr_t ConstexprIntPtrAdd(intptr_t a, intptr_t b) { return a + b; }
+ uintptr_t ConstexprUintPtrAdd(uintptr_t a, uintptr_t b) { return a + b; }
+ intptr_t ConstexprWordNot(intptr_t a) { return ~a; }
+ uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; }
TNode<Object> NoContextConstant();
@@ -662,10 +674,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
template <class... TArgs>
+ TNode<JSReceiver> ConstructWithTarget(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<JSReceiver> new_target,
+ TArgs... args) {
+ return CAST(ConstructJSWithTarget(CodeFactory::Construct(isolate()),
+ context, target, new_target,
+ implicit_cast<TNode<Object>>(args)...));
+ }
+ template <class... TArgs>
TNode<JSReceiver> Construct(TNode<Context> context,
TNode<JSReceiver> new_target, TArgs... args) {
- return CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
- new_target, implicit_cast<TNode<Object>>(args)...));
+ return ConstructWithTarget(context, new_target, new_target, args...);
}
template <class A, class F, class G>
@@ -819,8 +839,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> LoadAndUntagToWord32ObjectField(Node* object, int offset);
// Load a SMI and untag it.
TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
- // Load a SMI root, untag it, and convert to Word32.
- TNode<Int32T> LoadAndUntagToWord32Root(RootIndex root_index);
TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
int offset) {
@@ -995,13 +1013,34 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
+ CheckBounds check_bounds = CheckBounds::kAlways);
- TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
- TNode<IntPtrT> index,
- LoadSensitivity needs_poisoning) {
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ TNode<Object> UnsafeLoadFixedArrayElement(
+ TNode<FixedArray> object, Node* index, int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ return LoadFixedArrayElement(object, index, additional_offset,
+ parameter_mode, needs_poisoning,
+ CheckBounds::kDebugOnly);
+ }
+
+ TNode<Object> LoadFixedArrayElement(
+ TNode<FixedArray> object, TNode<IntPtrT> index,
+ LoadSensitivity needs_poisoning,
+ CheckBounds check_bounds = CheckBounds::kAlways) {
return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
- needs_poisoning);
+ needs_poisoning, check_bounds);
+ }
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
+ TNode<IntPtrT> index,
+ LoadSensitivity needs_poisoning) {
+ return LoadFixedArrayElement(object, index, needs_poisoning,
+ CheckBounds::kDebugOnly);
}
TNode<Object> LoadFixedArrayElement(
@@ -1018,6 +1057,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
additional_offset, INTPTR_PARAMETERS,
needs_poisoning);
}
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ TNode<Object> UnsafeLoadFixedArrayElement(
+ TNode<FixedArray> object, int index, int additional_offset = 0,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ return LoadFixedArrayElement(object, IntPtrConstant(index),
+ additional_offset, INTPTR_PARAMETERS,
+ needs_poisoning, CheckBounds::kDebugOnly);
+ }
TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
TNode<Smi> index) {
return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
@@ -1113,6 +1161,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
MachineType machine_type = MachineType::Float64());
TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
TNode<FixedTypedArrayBase> typed_array);
+ TNode<RawPtrT> LoadFixedTypedArrayOnHeapBackingStore(
+ TNode<FixedTypedArrayBase> typed_array);
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -1174,13 +1224,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
SloppyTNode<SharedFunctionInfo> shared);
- TNode<Int32T> LoadSharedFunctionInfoFormalParameterCount(
- TNode<SharedFunctionInfo> function) {
- return TNode<Int32T>::UncheckedCast(LoadObjectField(
- function, SharedFunctionInfo::kFormalParameterCountOffset,
- MachineType::Uint16()));
- }
-
void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<Word32T> value);
@@ -1205,6 +1248,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
StoreObjectFieldNoWriteBarrier(object, offset, value,
MachineRepresentationOf<T>::value);
}
+ template <class T = Object>
+ void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object, int offset,
+ TNode<T> value) {
+ StoreObjectFieldNoWriteBarrier(object, offset, value,
+ MachineRepresentationOf<T>::value);
+ }
// Store the Map of an HeapObject.
void StoreMap(Node* object, Node* map);
@@ -1214,14 +1263,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Store an array element to a FixedArray.
void StoreFixedArrayElement(
TNode<FixedArray> object, int index, SloppyTNode<Object> value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ CheckBounds check_bounds = CheckBounds::kAlways) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
- barrier_mode);
+ barrier_mode, 0, INTPTR_PARAMETERS,
+ check_bounds);
+ }
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ void UnsafeStoreFixedArrayElement(
+ TNode<FixedArray> object, int index, SloppyTNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ return StoreFixedArrayElement(object, index, value, barrier_mode,
+ CheckBounds::kDebugOnly);
}
void StoreFixedArrayElement(TNode<FixedArray> object, int index,
- TNode<Smi> value) {
+ TNode<Smi> value,
+ CheckBounds check_bounds = CheckBounds::kAlways) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
- SKIP_WRITE_BARRIER);
+ SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS,
+ check_bounds);
+ }
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ void UnsafeStoreFixedArrayElement(TNode<FixedArray> object, int index,
+ TNode<Smi> value) {
+ return StoreFixedArrayElement(object, index, value,
+ CheckBounds::kDebugOnly);
}
void StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
@@ -1237,12 +1305,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS) {
- FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode);
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ CheckBounds check_bounds = CheckBounds::kAlways) {
+ if (NeedsBoundsCheck(check_bounds)) {
+ FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode);
+ }
StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
additional_offset, parameter_mode);
}
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ void UnsafeStoreFixedArrayElement(
+ TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS) {
+ return StoreFixedArrayElement(array, index, value, barrier_mode,
+ additional_offset, parameter_mode,
+ CheckBounds::kDebugOnly);
+ }
+
void StorePropertyArrayElement(
TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
@@ -1262,10 +1345,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> value) {
StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
}
+ void StoreFixedArrayElement(TNode<FixedArray> array, TNode<Smi> index,
+ TNode<Smi> value) {
+ StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0,
+ SMI_PARAMETERS);
+ }
void StoreFixedDoubleArrayElement(
TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ CheckBounds check_bounds = CheckBounds::kAlways);
+ // This doesn't emit a bounds-check. As part of the security-performance
+ // tradeoff, only use it if it is performance critical.
+ void UnsafeStoreFixedDoubleArrayElement(
+ TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS) {
+ return StoreFixedDoubleArrayElement(object, index, value, parameter_mode,
+ CheckBounds::kDebugOnly);
+ }
void StoreFixedDoubleArrayElementSmi(TNode<FixedDoubleArray> object,
TNode<Smi> index,
@@ -1366,26 +1463,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<String> parent,
TNode<Smi> offset);
- // Allocate a one-byte ConsString with the given length, first and second
- // parts. |length| is expected to be tagged, and |first| and |second| are
- // expected to be one-byte strings.
- TNode<String> AllocateOneByteConsString(TNode<Uint32T> length,
- TNode<String> first,
- TNode<String> second,
- AllocationFlags flags = kNone);
- // Allocate a two-byte ConsString with the given length, first and second
- // parts. |length| is expected to be tagged, and |first| and |second| are
- // expected to be two-byte strings.
- TNode<String> AllocateTwoByteConsString(TNode<Uint32T> length,
- TNode<String> first,
- TNode<String> second,
- AllocationFlags flags = kNone);
-
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |left| and |right|.
- TNode<String> NewConsString(TNode<Uint32T> length, TNode<String> left,
- TNode<String> right,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
+ TNode<String> right);
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
@@ -1439,9 +1520,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
ParameterMode capacity_mode);
- // Allocate a JSArray without elements and initialize the header fields.
- TNode<JSArray> AllocateUninitializedJSArrayWithoutElements(
- TNode<Map> array_map, TNode<Smi> length, Node* allocation_site = nullptr);
//
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
@@ -1473,6 +1551,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
INTPTR_PARAMETERS);
}
+ // Allocate a JSArray and initialize the header fields.
+ TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
+ TNode<FixedArrayBase> elements,
+ TNode<Smi> length,
+ Node* allocation_site = nullptr);
+
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
// |convert_holes| tells the function to convert holes into undefined or not.
@@ -1506,6 +1590,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
fixed_array_map);
}
+ TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
+ return UncheckedCast<FixedArray>(AllocateFixedArray(
+ PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone));
+ }
+
TNode<FixedArray> AllocateZeroedFixedArray(TNode<IntPtrT> capacity) {
TNode<FixedArray> result = UncheckedCast<FixedArray>(
AllocateFixedArray(PACKED_ELEMENTS, capacity,
@@ -1532,6 +1621,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return result;
}
+ TNode<FixedDoubleArray> AllocateFixedDoubleArrayWithHoles(
+ TNode<IntPtrT> capacity, AllocationFlags flags) {
+ TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
+ AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, flags));
+ FillFixedArrayWithValue(PACKED_DOUBLE_ELEMENTS, result, IntPtrConstant(0),
+ capacity, RootIndex::kTheHoleValue);
+ return result;
+ }
+
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
@@ -1661,12 +1759,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<FixedDoubleArray>(base);
}
- TNode<FixedArray> HeapObjectToSloppyArgumentsElements(TNode<HeapObject> base,
- Label* cast_fail) {
+ TNode<SloppyArgumentsElements> HeapObjectToSloppyArgumentsElements(
+ TNode<HeapObject> base, Label* cast_fail) {
GotoIf(WordNotEqual(LoadMap(base),
LoadRoot(RootIndex::kSloppyArgumentsElementsMap)),
cast_fail);
- return UncheckedCast<FixedArray>(base);
+ return UncheckedCast<SloppyArgumentsElements>(base);
}
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
@@ -1895,7 +1993,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
- TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
+ TNode<UintPtrT> TryNumberToUintPtr(TNode<Number> value, Label* if_negative);
+ TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value) {
+ return TryNumberToUintPtr(value, nullptr);
+ }
void TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric);
@@ -2040,7 +2141,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> HasOnlyOneByteChars(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
@@ -2073,6 +2173,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsSymbol(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
+ TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
@@ -2088,6 +2191,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
+ TNode<BoolT> IsMockArrayBufferAllocatorFlag() {
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::address_of_mock_arraybuffer_allocator_flag())));
+ return Word32NotEqual(Word32And(flag_value, Int32Constant(0xFF)),
+ Int32Constant(0));
+ }
+
// True iff |object| is a Smi or a HeapNumber.
TNode<BoolT> IsNumber(SloppyTNode<Object> object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
@@ -2159,7 +2271,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Return a new string object produced by concatenating |first| with |second|.
TNode<String> StringAdd(Node* context, TNode<String> first,
- TNode<String> second, AllocationFlags flags = kNone);
+ TNode<String> second);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
@@ -2392,6 +2504,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Various building blocks for stubs doing property lookups.
// |if_notinternalized| is optional; |if_bailout| will be used by default.
+ // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken
+ // even if |key| is an array index. |if_keyisunique| will never
+ // be taken for array indices.
void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
Label* if_keyisunique, Variable* var_unique, Label* if_bailout,
Label* if_notinternalized = nullptr);
@@ -2505,7 +2620,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class Dictionary>
TNode<Smi> GetCapacity(TNode<Dictionary> dictionary) {
- return CAST(LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex));
+ return CAST(
+ UnsafeLoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex));
}
template <class Dictionary>
@@ -2578,7 +2694,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum GetOwnPropertyMode { kCallJSGetter, kReturnAccessorPair };
// Tries to get {object}'s own {unique_name} property value. If the property
// is an accessor then it also calls a getter. If the property is a double
- // field it re-wraps value in an immutable heap number.
+ // field it re-wraps value in an immutable heap number. {unique_name} must be
+ // a unique name (Symbol or InternalizedString) that is not an array index.
void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map,
Node* instance_type, Node* unique_name,
Label* if_found, Variable* var_value,
@@ -2617,6 +2734,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
+ TNode<Object> GetIteratorMethod(TNode<Context> context,
+ TNode<HeapObject> heap_obj,
+ Label* if_iteratorundefined);
+
template <class... TArgs>
TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
@@ -2740,11 +2861,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
- // Returns the stricter of the Context::ScopeInfo::LanguageMode and
- // the language mode on the SFI.
- Node* GetLanguageMode(TNode<SharedFunctionInfo> sfi, Node* context);
- Node* GetLanguageMode(TNode<JSFunction> closure, Node* context);
- Node* GetLanguageMode(TNode<FeedbackVector> vector, Node* context);
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
@@ -2766,16 +2882,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
+ enum class ArgumentsAccessMode { kLoad, kStore, kHas };
+ // Emits keyed sloppy arguments has. Returns whether the key is in the
+ // arguments.
+ Node* HasKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
+ return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout,
+ ArgumentsAccessMode::kHas);
+ }
+
// Emits keyed sloppy arguments load. Returns either the loaded value.
Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
- return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
+ return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout,
+ ArgumentsAccessMode::kLoad);
}
// Emits keyed sloppy arguments store.
void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout) {
DCHECK_NOT_NULL(value);
- EmitKeyedSloppyArguments(receiver, key, value, bailout);
+ EmitKeyedSloppyArguments(receiver, key, value, bailout,
+ ArgumentsAccessMode::kStore);
}
// Loads script context from the script context table.
@@ -3093,6 +3219,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprBoolNot(bool value) { return !value; }
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
+ bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
void PerformStackCheck(TNode<Context> context);
@@ -3215,11 +3342,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
typedef std::function<void(TNode<Name> key, TNode<Object> value)>
ForEachKeyValueFunction;
+ enum ForEachEnumerationMode {
+ // String and then Symbol properties according to the spec
+ // ES#sec-object.assign
+ kEnumerationOrder,
+ // Order of property addition
+ kPropertyAdditionOrder,
+ };
+
// For each JSObject property (in DescriptorArray order), check if the key is
// enumerable, and if so, load the value from the receiver and evaluate the
// closure.
void ForEachEnumerableOwnProperty(TNode<Context> context, TNode<Map> map,
TNode<JSObject> object,
+ ForEachEnumerationMode mode,
const ForEachKeyValueFunction& body,
Label* bailout);
@@ -3273,16 +3409,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Emits keyed sloppy arguments load if the |value| is nullptr or store
// otherwise. Returns either the loaded value or |value|.
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
- Label* bailout);
+ Label* bailout,
+ ArgumentsAccessMode access_mode);
TNode<String> AllocateSlicedString(RootIndex map_root_index,
TNode<Uint32T> length,
TNode<String> parent, TNode<Smi> offset);
- TNode<String> AllocateConsString(RootIndex map_root_index,
- TNode<Uint32T> length, TNode<String> first,
- TNode<String> second, AllocationFlags flags);
-
// Allocate a MutableHeapNumber without initializing its value.
TNode<MutableHeapNumber> AllocateMutableHeapNumber();
@@ -3360,9 +3493,10 @@ class CodeStubArguments {
// further with passing all the JS arguments as is.
void SetReceiver(TNode<Object> object) const;
- TNode<RawPtr<Object>> AtIndexPtr(
- Node* index, CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) const;
+ // Computes address of the index'th argument.
+ TNode<WordT> AtIndexPtr(Node* index,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) const;
// |index| is zero-based and does not include the receiver
TNode<Object> AtIndex(Node* index,
@@ -3419,7 +3553,7 @@ class CodeStubArguments {
CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
Node* argc_;
- TNode<RawPtr<Object>> arguments_;
+ TNode<WordT> arguments_;
Node* fp_;
};
@@ -3472,7 +3606,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
const Flags flags_;
};
-DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
+DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-tracer.h b/deps/v8/src/code-tracer.h
index 3ed07be77e..efa3dee8f0 100644
--- a/deps/v8/src/code-tracer.h
+++ b/deps/v8/src/code-tracer.h
@@ -9,6 +9,7 @@
#include "src/flags.h"
#include "src/globals.h"
#include "src/utils.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index affb1ddd37..08c815787f 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -15,6 +15,7 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/task-utils.h"
+#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 7d0440f598..b84949acf7 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -130,11 +130,14 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
CompilationJob::Status status = job->ExecuteJob();
USE(status); // Prevent an unused-variable error.
- // The function may have already been optimized by OSR. Simply continue.
- // Use a mutex to make sure that functions marked for install
- // are always also queued.
- base::MutexGuard access_output_queue_(&output_queue_mutex_);
- output_queue_.push(job);
+ {
+ // The function may have already been optimized by OSR. Simply continue.
+ // Use a mutex to make sure that functions marked for install
+ // are always also queued.
+ base::MutexGuard access_output_queue_(&output_queue_mutex_);
+ output_queue_.push(job);
+ }
+
isolate_->stack_guard()->RequestInstallCode();
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index a4a89d13ee..ea471e97d1 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -22,7 +22,7 @@
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/globals.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
@@ -40,6 +40,7 @@
#include "src/snapshot/code-serializer.h"
#include "src/unoptimized-compilation-info.h"
#include "src/vm-state-inl.h"
+#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
namespace internal {
@@ -412,6 +413,8 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
shared_info->DisableOptimization(literal->dont_optimize_reason());
}
+ shared_info->set_is_safe_to_skip_arguments_adaptor(
+ literal->SafeToSkipArgumentsAdaptor());
}
CompilationJob::Status FinalizeUnoptimizedCompilationJob(
@@ -755,6 +758,15 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
+ // If code was pending optimization for testing, delete remove the strong root
+ // that was preventing the bytecode from being flushed between marking and
+ // optimization.
+ if (isolate->heap()->pending_optimize_for_test_bytecode() ==
+ shared->GetBytecodeArray()) {
+ isolate->heap()->SetPendingOptimizeForTestBytecode(
+ ReadOnlyRoots(isolate).undefined_value());
+ }
+
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_offset)
.ToHandle(&cached_code)) {
@@ -1118,6 +1130,89 @@ bool Compiler::ParseAndAnalyze(ParseInfo* parse_info,
return Compiler::Analyze(parse_info);
}
+// static
+bool Compiler::CollectSourcePositions(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info) {
+ DCHECK(shared_info->is_compiled());
+ DCHECK(shared_info->HasBytecodeArray());
+ DCHECK(!shared_info->GetBytecodeArray()->HasSourcePositionTable());
+
+ // TODO(v8:8510): Push the CLEAR_EXCEPTION flag or something like it down into
+ // the parser so it aborts without setting a pending exception, which then
+ // gets thrown. This would avoid the situation where potentially we'd reparse
+ // several times (running out of stack each time) before hitting this limit.
+ if (GetCurrentStackPosition() < isolate->stack_guard()->real_climit())
+ return false;
+
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ DCHECK(!isolate->has_pending_exception());
+ VMState<BYTECODE_COMPILER> state(isolate);
+ PostponeInterruptsScope postpone(isolate);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CollectSourcePositions");
+ HistogramTimerScope timer(isolate->counters()->collect_source_positions());
+
+ // Set up parse info.
+ ParseInfo parse_info(isolate, shared_info);
+ parse_info.set_lazy_compile();
+ parse_info.set_collect_source_positions();
+ if (FLAG_allow_natives_syntax) parse_info.set_allow_natives_syntax();
+
+ // Parse and update ParseInfo with the results.
+ if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
+ return FailWithPendingException(
+ isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+ }
+
+ // Generate the unoptimized bytecode.
+ // TODO(v8:8510): Consider forcing preparsing of inner functions to avoid
+ // wasting time fully parsing them when they won't ever be used.
+ UnoptimizedCompilationJobList inner_function_jobs;
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(&parse_info, isolate->allocator(),
+ &inner_function_jobs));
+ if (!outer_function_job) {
+ return FailWithPendingException(
+ isolate, &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+ }
+
+ DCHECK(outer_function_job->compilation_info()->collect_source_positions());
+
+ // TODO(v8:8510) Avoid re-allocating bytecode array/constant pool and
+ // re-internalizeing the ast values. Maybe we could use the
+ // unoptimized_compilation_flag to signal that all we need is the source
+ // position table (and we could do the DCHECK that the bytecode array is the
+ // same in the bytecode-generator, by comparing the real bytecode array on the
+ // SFI with the off-heap bytecode array).
+
+ // Internalize ast values onto the heap.
+ parse_info.ast_value_factory()->Internalize(isolate);
+
+ {
+ // Allocate scope infos for the literal.
+ DeclarationScope::AllocateScopeInfos(&parse_info, isolate);
+ CHECK_EQ(outer_function_job->FinalizeJob(shared_info, isolate),
+ CompilationJob::SUCCEEDED);
+ }
+
+ // Update the source position table on the original bytecode.
+ Handle<BytecodeArray> bytecode =
+ handle(shared_info->GetBytecodeArray(), isolate);
+ DCHECK(bytecode->IsBytecodeEqual(
+ *outer_function_job->compilation_info()->bytecode_array()));
+ DCHECK(outer_function_job->compilation_info()->has_bytecode_array());
+ bytecode->set_source_position_table(outer_function_job->compilation_info()
+ ->bytecode_array()
+ ->SourcePositionTable());
+
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(shared_info->is_compiled_scope().is_compiled());
+ return true;
+}
+
bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
@@ -1761,7 +1856,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<SharedFunctionInfo> inner_result;
if (CodeSerializer::Deserialize(isolate, cached_data, source,
origin_options)
- .ToHandle(&inner_result)) {
+ .ToHandle(&inner_result) &&
+ inner_result->is_compiled()) {
// Promote to per-isolate compilation cache.
is_compiled_scope = inner_result->is_compiled_scope();
DCHECK(is_compiled_scope.is_compiled());
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index e33d9fdf04..b370cdebf5 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -64,6 +64,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
IsCompiledScope* is_compiled_scope);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
+ // Collect source positions for a function that has already been compiled to
+ // bytecode, but for which source positions were not collected (e.g. because
+ // they were not immediately needed).
+ static bool CollectSourcePositions(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Isolate* isolate);
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 8665788162..39beced3f3 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -9,6 +9,7 @@ sigurds@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
+mslekova@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 9d24c08dde..b21f499b1b 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -22,10 +22,9 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalTaggedValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+FieldAccess AccessBuilder::ForExternalIntPtr() {
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), Type::Any(), MachineType::IntPtr(),
kNoWriteBarrier};
return access;
}
@@ -915,6 +914,16 @@ ElementAccess AccessBuilder::ForFixedArrayElement(
}
// static
+ElementAccess AccessBuilder::ForStackArgument() {
+ ElementAccess access = {
+ kUntaggedBase,
+ CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize,
+ Type::NonInternal(), MachineType::Pointer(),
+ WriteBarrierKind::kNoWriteBarrier};
+ return access;
+}
+
+// static
ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
TypeCache::Get()->kFloat64, MachineType::Float64(),
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 56bc5afe89..ef8d8c5e4c 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -24,8 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to a tagged field identified by an external reference.
- static FieldAccess ForExternalTaggedValue();
+ // Provides access to an intptr field identified by an external reference.
+ static FieldAccess ForExternalIntPtr();
// Provides access to an uint8 field identified by an external reference.
static FieldAccess ForExternalUint8Value();
@@ -283,6 +283,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
ElementsKind kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ // Provides access to stack arguments
+ static ElementAccess ForStackArgument();
+
// Provides access to FixedDoubleArray elements.
static ElementAccess ForFixedDoubleArrayElement();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 4e67c35cdf..ac46eeb07b 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -24,21 +24,6 @@ namespace compiler {
namespace {
-bool CanInlineElementAccess(Handle<Map> map) {
- if (!map->IsJSObjectMap()) return false;
- if (map->is_access_check_needed()) return false;
- if (map->has_indexed_interceptor()) return false;
- ElementsKind const elements_kind = map->elements_kind();
- if (IsFastElementsKind(elements_kind)) return true;
- if (IsFixedTypedArrayElementsKind(elements_kind) &&
- elements_kind != BIGUINT64_ELEMENTS &&
- elements_kind != BIGINT64_ELEMENTS) {
- return true;
- }
- return false;
-}
-
-
bool CanInlinePropertyAccess(Handle<Map> map) {
// We can inline property access to prototypes of all primitives, except
// the special Oddball ones that have no wrapper counterparts (i.e. Null,
@@ -63,6 +48,8 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os << "Store";
case AccessMode::kStoreInLiteral:
return os << "StoreInLiteral";
+ case AccessMode::kHas:
+ return os << "Has";
}
UNREACHABLE();
}
@@ -71,7 +58,9 @@ ElementAccessInfo::ElementAccessInfo() = default;
ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
ElementsKind elements_kind)
- : elements_kind_(elements_kind), receiver_maps_(receiver_maps) {}
+ : elements_kind_(elements_kind), receiver_maps_(receiver_maps) {
+ CHECK(!receiver_maps.empty());
+}
// static
PropertyAccessInfo PropertyAccessInfo::NotFound(MapHandles const& receiver_maps,
@@ -175,6 +164,7 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
if (this->field_index_.GetFieldAccessStubKey() ==
that->field_index_.GetFieldAccessStubKey()) {
switch (access_mode) {
+ case AccessMode::kHas:
case AccessMode::kLoad: {
if (this->field_representation_ != that->field_representation_) {
if (!IsAnyTagged(this->field_representation_) ||
@@ -249,19 +239,15 @@ Handle<Cell> PropertyAccessInfo::export_cell() const {
AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
CompilationDependencies* dependencies,
- Handle<Context> native_context, Zone* zone)
+ Zone* zone)
: broker_(broker),
dependencies_(dependencies),
- native_context_(native_context),
- isolate_(native_context->GetIsolate()),
type_cache_(TypeCache::Get()),
- zone_(zone) {
- DCHECK(native_context->IsNativeContext());
-}
-
+ zone_(zone) {}
bool AccessInfoFactory::ComputeElementAccessInfo(
- Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
+ Handle<Map> map, AccessMode access_mode,
+ ElementAccessInfo* access_info) const {
// Check if it is safe to inline element access for the {map}.
if (!CanInlineElementAccess(map)) return false;
ElementsKind const elements_kind = map->elements_kind();
@@ -270,54 +256,50 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
}
bool AccessInfoFactory::ComputeElementAccessInfos(
- MapHandles const& maps, AccessMode access_mode,
- ZoneVector<ElementAccessInfo>* access_infos) {
- if (access_mode == AccessMode::kLoad) {
+ FeedbackNexus nexus, MapHandles const& maps, AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos) const {
+ ProcessedFeedback processed(broker()->zone());
+ ProcessFeedbackMapsForElementAccess(isolate(), maps, &processed);
+
+ if (FLAG_concurrent_inlining) {
+ if (broker()->HasFeedback(nexus)) {
+ // We have already processed the feedback for this nexus during
+ // serialization. Use that data instead of the data computed above.
+ ProcessedFeedback const& preprocessed =
+ broker()->GetOrCreateFeedback(nexus);
+ TRACE_BROKER(broker(),
+ "ComputeElementAccessInfos: using preprocessed feedback "
+ << "(slot " << nexus.slot() << " of "
+ << Brief(*nexus.vector_handle()) << "; "
+ << preprocessed.receiver_maps.size() << "/"
+ << preprocessed.transitions.size() << " vs "
+ << processed.receiver_maps.size() << "/"
+ << processed.transitions.size() << ").\n");
+ processed.receiver_maps = preprocessed.receiver_maps;
+ processed.transitions = preprocessed.transitions;
+ } else {
+ TRACE_BROKER(broker(),
+ "ComputeElementAccessInfos: missing preprocessed feedback "
+ << "(slot " << nexus.slot() << " of "
+ << Brief(*nexus.vector_handle()) << ").\n");
+ }
+ }
+
+ if (processed.receiver_maps.empty()) return false;
+
+ if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
// For polymorphic loads of similar elements kinds (i.e. all tagged or all
// double), always use the "worst case" code without a transition. This is
// much faster than transitioning the elements to the worst case, trading a
// TransitionElementsKind for a CheckMaps, avoiding mutation of the array.
ElementAccessInfo access_info;
- if (ConsolidateElementLoad(maps, &access_info)) {
+ if (ConsolidateElementLoad(processed, &access_info)) {
access_infos->push_back(access_info);
return true;
}
}
- // Collect possible transition targets.
- MapHandles possible_transition_targets;
- possible_transition_targets.reserve(maps.size());
- for (Handle<Map> map : maps) {
- if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
- if (CanInlineElementAccess(map) &&
- IsFastElementsKind(map->elements_kind()) &&
- GetInitialFastElementsKind() != map->elements_kind()) {
- possible_transition_targets.push_back(map);
- }
- }
- }
-
- // Separate the actual receiver maps and the possible transition sources.
- MapHandles receiver_maps;
- receiver_maps.reserve(maps.size());
- MapTransitionList transitions(maps.size());
- for (Handle<Map> map : maps) {
- if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
- // Don't generate elements kind transitions from stable maps.
- Map transition_target = map->is_stable()
- ? Map()
- : map->FindElementsKindTransitionedMap(
- isolate(), possible_transition_targets);
- if (transition_target.is_null()) {
- receiver_maps.push_back(map);
- } else {
- transitions.push_back(
- std::make_pair(map, handle(transition_target, isolate())));
- }
- }
- }
-
- for (Handle<Map> receiver_map : receiver_maps) {
+ for (Handle<Map> receiver_map : processed.receiver_maps) {
// Compute the element access information.
ElementAccessInfo access_info;
if (!ComputeElementAccessInfo(receiver_map, access_mode, &access_info)) {
@@ -325,9 +307,9 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
}
// Collect the possible transitions for the {receiver_map}.
- for (auto transition : transitions) {
+ for (auto transition : processed.transitions) {
if (transition.second.is_identical_to(receiver_map)) {
- access_info.transitions().push_back(transition);
+ access_info.AddTransitionSource(transition.first);
}
}
@@ -337,24 +319,139 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
return true;
}
+bool AccessInfoFactory::ComputeDataFieldAccessInfo(
+ Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
+ int number, AccessMode access_mode, PropertyAccessInfo* access_info) const {
+ DCHECK_NE(number, DescriptorArray::kNotFound);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ PropertyDetails const details = descriptors->GetDetails(number);
+ int index = descriptors->GetFieldIndex(number);
+ Representation details_representation = details.representation();
+ FieldIndex field_index =
+ FieldIndex::ForPropertyIndex(*map, index, details_representation);
+ Type field_type = Type::NonInternal();
+ MachineRepresentation field_representation = MachineRepresentation::kTagged;
+ MaybeHandle<Map> field_map;
+ if (details_representation.IsSmi()) {
+ field_type = Type::SignedSmall();
+ field_representation = MachineRepresentation::kTaggedSigned;
+ } else if (details_representation.IsDouble()) {
+ field_type = type_cache_->kFloat64;
+ field_representation = MachineRepresentation::kFloat64;
+ } else if (details_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_representation = MachineRepresentation::kTaggedPointer;
+ Handle<FieldType> descriptors_field_type(descriptors->GetFieldType(number),
+ isolate());
+ if (descriptors_field_type->IsNone()) {
+ // Store is not safe if the field type was cleared.
+ if (access_mode == AccessMode::kStore) return false;
+
+ // The field type was cleared by the GC, so we don't know anything
+ // about the contents now.
+ } else if (descriptors_field_type->IsClass()) {
+ MapRef map_ref(broker(), map);
+ map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
+ dependencies()->DependOnFieldType(map_ref, number);
+ // Remember the field map, and try to infer a useful type.
+ Handle<Map> map(descriptors_field_type->AsClass(), isolate());
+ field_type = Type::For(MapRef(broker(), map));
+ field_map = MaybeHandle<Map>(map);
+ }
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ details.constness(), MapHandles{receiver_map}, field_index,
+ field_representation, field_type, field_map, holder);
+ return true;
+}
+
+bool AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
+ Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
+ MaybeHandle<JSObject> holder, int number, AccessMode access_mode,
+ PropertyAccessInfo* access_info) const {
+ DCHECK_NE(number, DescriptorArray::kNotFound);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ SLOW_DCHECK(number == descriptors->Search(*name, *map));
+ if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
+ DCHECK(map->is_prototype_map());
+ Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(map->prototype_info()),
+ isolate());
+ Handle<JSModuleNamespace> module_namespace(
+ JSModuleNamespace::cast(proto_info->module_namespace()), isolate());
+ Handle<Cell> cell(
+ Cell::cast(module_namespace->module()->exports()->Lookup(
+ ReadOnlyRoots(isolate()), name, Smi::ToInt(name->GetHash()))),
+ isolate());
+ if (cell->value()->IsTheHole(isolate())) {
+ // This module has not been fully initialized yet.
+ return false;
+ }
+ *access_info =
+ PropertyAccessInfo::ModuleExport(MapHandles{receiver_map}, cell);
+ return true;
+ }
+ if (access_mode == AccessMode::kHas) {
+ // HasProperty checks don't call getter/setters, existence is sufficient.
+ *access_info = PropertyAccessInfo::AccessorConstant(
+ MapHandles{receiver_map}, Handle<Object>(), holder);
+ return true;
+ }
+ Handle<Object> accessors(descriptors->GetStrongValue(number), isolate());
+ if (!accessors->IsAccessorPair()) return false;
+ Handle<Object> accessor(access_mode == AccessMode::kLoad
+ ? Handle<AccessorPair>::cast(accessors)->getter()
+ : Handle<AccessorPair>::cast(accessors)->setter(),
+ isolate());
+ if (!accessor->IsJSFunction()) {
+ CallOptimization optimization(isolate(), accessor);
+ if (!optimization.is_simple_api_call()) return false;
+ if (optimization.IsCrossContextLazyAccessorPair(
+ *broker()->native_context().object(), *map)) {
+ return false;
+ }
+
+ CallOptimization::HolderLookup lookup;
+ holder = optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return false;
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
+ holder.is_null());
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+ }
+ if (access_mode == AccessMode::kLoad) {
+ Handle<Name> cached_property_name;
+ if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), accessor)
+ .ToHandle(&cached_property_name)) {
+ if (ComputePropertyAccessInfo(map, cached_property_name, access_mode,
+ access_info)) {
+ return true;
+ }
+ }
+ }
+ *access_info = PropertyAccessInfo::AccessorConstant(MapHandles{receiver_map},
+ accessor, holder);
+ return true;
+}
bool AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode,
- PropertyAccessInfo* access_info) {
+ PropertyAccessInfo* access_info) const {
CHECK(name->IsUniqueName());
+ if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) return false;
+
// Check if it is safe to inline property access for the {map}.
if (!CanInlinePropertyAccess(map)) return false;
- // Compute the receiver type.
- Handle<Map> receiver_map = map;
-
// We support fast inline cases for certain JSObject getters.
- if (access_mode == AccessMode::kLoad &&
+ if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
LookupSpecialFieldAccessor(map, name, access_info)) {
return true;
}
+ // Remember the receiver map. We use {map} as loop variable.
+ Handle<Map> receiver_map = map;
MaybeHandle<JSObject> holder;
do {
// Lookup the named property on the {map}.
@@ -368,62 +465,23 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (details.IsReadOnly()) {
return false;
}
- // Check for store to data property on a prototype.
if (details.kind() == kData && !holder.is_null()) {
- // Store to property not found on the receiver but on a prototype, we
- // need to transition to a new data property.
- // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ // This is a store to a property not found on the receiver but on a
+ // prototype. According to ES6 section 9.1.9 [[Set]], we need to
+ // create a new data property on the receiver. We can still optimize
+ // if such a transition already exists.
return LookupTransition(receiver_map, name, holder, access_info);
}
}
if (details.location() == kField) {
if (details.kind() == kData) {
- int index = descriptors->GetFieldIndex(number);
- Representation details_representation = details.representation();
- FieldIndex field_index =
- FieldIndex::ForPropertyIndex(*map, index, details_representation);
- Type field_type = Type::NonInternal();
- MachineRepresentation field_representation =
- MachineRepresentation::kTagged;
- MaybeHandle<Map> field_map;
- if (details_representation.IsSmi()) {
- field_type = Type::SignedSmall();
- field_representation = MachineRepresentation::kTaggedSigned;
- } else if (details_representation.IsDouble()) {
- field_type = type_cache_->kFloat64;
- field_representation = MachineRepresentation::kFloat64;
- } else if (details_representation.IsHeapObject()) {
- // Extract the field type from the property details (make sure its
- // representation is TaggedPointer to reflect the heap object case).
- field_representation = MachineRepresentation::kTaggedPointer;
- Handle<FieldType> descriptors_field_type(
- descriptors->GetFieldType(number), isolate());
- if (descriptors_field_type->IsNone()) {
- // Store is not safe if the field type was cleared.
- if (access_mode == AccessMode::kStore) return false;
-
- // The field type was cleared by the GC, so we don't know anything
- // about the contents now.
- } else if (descriptors_field_type->IsClass()) {
- MapRef map_ref(broker(), map);
- map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldType(map_ref, number);
- // Remember the field map, and try to infer a useful type.
- Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(MapRef(broker(), map));
- field_map = MaybeHandle<Map>(map);
- }
- }
- *access_info = PropertyAccessInfo::DataField(
- details.constness(), MapHandles{receiver_map}, field_index,
- field_representation, field_type, field_map, holder);
- return true;
+ return ComputeDataFieldAccessInfo(receiver_map, map, holder, number,
+ access_mode, access_info);
} else {
DCHECK_EQ(kAccessor, details.kind());
// TODO(turbofan): Add support for general accessors?
return false;
}
-
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
@@ -434,71 +492,16 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return true;
} else {
DCHECK_EQ(kAccessor, details.kind());
- if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
- DCHECK(map->is_prototype_map());
- Handle<PrototypeInfo> proto_info(
- PrototypeInfo::cast(map->prototype_info()), isolate());
- Handle<JSModuleNamespace> module_namespace(
- JSModuleNamespace::cast(proto_info->module_namespace()),
- isolate());
- Handle<Cell> cell(
- Cell::cast(module_namespace->module()->exports()->Lookup(
- ReadOnlyRoots(isolate()), name,
- Smi::ToInt(name->GetHash()))),
- isolate());
- if (cell->value()->IsTheHole(isolate())) {
- // This module has not been fully initialized yet.
- return false;
- }
- *access_info = PropertyAccessInfo::ModuleExport(
- MapHandles{receiver_map}, cell);
- return true;
- }
- Handle<Object> accessors(descriptors->GetStrongValue(number),
- isolate());
- if (!accessors->IsAccessorPair()) return false;
- Handle<Object> accessor(
- access_mode == AccessMode::kLoad
- ? Handle<AccessorPair>::cast(accessors)->getter()
- : Handle<AccessorPair>::cast(accessors)->setter(),
- isolate());
- if (!accessor->IsJSFunction()) {
- CallOptimization optimization(isolate(), accessor);
- if (!optimization.is_simple_api_call()) return false;
- if (optimization.IsCrossContextLazyAccessorPair(*native_context_,
- *map)) {
- return false;
- }
-
- CallOptimization::HolderLookup lookup;
- holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
- if (lookup == CallOptimization::kHolderNotFound) return false;
- DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
- holder.is_null());
- DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound,
- !holder.is_null());
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- }
- if (access_mode == AccessMode::kLoad) {
- Handle<Name> cached_property_name;
- if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
- accessor)
- .ToHandle(&cached_property_name)) {
- if (ComputePropertyAccessInfo(map, cached_property_name,
- access_mode, access_info)) {
- return true;
- }
- }
- }
- *access_info = PropertyAccessInfo::AccessorConstant(
- MapHandles{receiver_map}, accessor, holder);
- return true;
+ return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
+ holder, number,
+ access_mode, access_info);
}
}
UNREACHABLE();
}
+ // The property wasn't found on {map}. Look on the prototype if appropriate.
+
// Don't search on the prototype chain for special indices in case of
// integer indexed exotic objects (see ES6 section 9.4.5).
if (map->IsJSTypedArrayMap() && name->IsString() &&
@@ -506,7 +509,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
- // Don't search on the prototype when storing in literals
+ // Don't search on the prototype when storing in literals.
if (access_mode == AccessMode::kStoreInLiteral) {
return LookupTransition(receiver_map, name, holder, access_info);
}
@@ -519,7 +522,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context())
+ if (Map::GetConstructorFunction(map, broker()->native_context().object())
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
DCHECK(map->prototype()->IsJSObject());
@@ -554,7 +557,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
bool AccessInfoFactory::ComputePropertyAccessInfo(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
- PropertyAccessInfo* access_info) {
+ PropertyAccessInfo* access_info) const {
ZoneVector<PropertyAccessInfo> access_infos(zone());
if (ComputePropertyAccessInfos(maps, name, access_mode, &access_infos) &&
access_infos.size() == 1) {
@@ -566,23 +569,27 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
bool AccessInfoFactory::ComputePropertyAccessInfos(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
- ZoneVector<PropertyAccessInfo>* access_infos) {
+ ZoneVector<PropertyAccessInfo>* access_infos) const {
+ ZoneVector<PropertyAccessInfo> infos(zone());
+ infos.reserve(maps.size());
for (Handle<Map> map : maps) {
- if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
- PropertyAccessInfo access_info;
- if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
- return false;
- }
- // Try to merge the {access_info} with an existing one.
- bool merged = false;
- for (PropertyAccessInfo& other_info : *access_infos) {
- if (other_info.Merge(&access_info, access_mode, zone())) {
- merged = true;
- break;
- }
+ PropertyAccessInfo access_info;
+ if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
+ return false;
+ }
+ infos.push_back(access_info);
+ }
+
+ // Merge as many as possible and push into {access_infos}.
+ for (auto it = infos.begin(), end = infos.end(); it != end; ++it) {
+ bool merged = false;
+ for (auto ot = it + 1; ot != end; ++ot) {
+ if (ot->Merge(&(*it), access_mode, zone())) {
+ merged = true;
+ break;
}
- if (!merged) access_infos->push_back(access_info);
}
+ if (!merged) access_infos->push_back(*it);
}
return true;
}
@@ -610,12 +617,16 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
} // namespace
-bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
- ElementAccessInfo* access_info) {
- if (maps.empty()) return false;
- InstanceType instance_type = maps.front()->instance_type();
- ElementsKind elements_kind = maps.front()->elements_kind();
- for (Handle<Map> map : maps) {
+bool AccessInfoFactory::ConsolidateElementLoad(
+ ProcessedFeedback const& processed, ElementAccessInfo* access_info) const {
+ CHECK(!processed.receiver_maps.empty());
+
+ // We want to look at each map but the maps are split across
+ // {processed.receiver_maps} and {processed.transitions}.
+
+ InstanceType instance_type = processed.receiver_maps.front()->instance_type();
+ ElementsKind elements_kind = processed.receiver_maps.front()->elements_kind();
+ auto processMap = [&](Handle<Map> map) {
if (!CanInlineElementAccess(map) || map->instance_type() != instance_type) {
return false;
}
@@ -623,16 +634,31 @@ bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
.To(&elements_kind)) {
return false;
}
+ return true;
+ };
+
+ for (Handle<Map> map : processed.receiver_maps) {
+ if (!processMap(map)) return false;
+ }
+
+ MapHandles maps(processed.receiver_maps.begin(),
+ processed.receiver_maps.end());
+ for (auto& pair : processed.transitions) {
+ if (!processMap(pair.first) || !processMap(pair.second)) return false;
+ maps.push_back(pair.first);
+ maps.push_back(pair.second);
}
+ // {maps} may now contain duplicate entries, but that shouldn't matter.
+
*access_info = ElementAccessInfo(maps, elements_kind);
return true;
}
bool AccessInfoFactory::LookupSpecialFieldAccessor(
- Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
+ Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) const {
// Check for String::length field accessor.
if (map->IsStringMap()) {
- if (Name::Equals(isolate(), name, factory()->length_string())) {
+ if (Name::Equals(isolate(), name, isolate()->factory()->length_string())) {
*access_info = PropertyAccessInfo::StringLength(MapHandles{map});
return true;
}
@@ -644,7 +670,8 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
Type field_type = Type::NonInternal();
MachineRepresentation field_representation = MachineRepresentation::kTagged;
if (map->IsJSArrayMap()) {
- DCHECK(Name::Equals(isolate(), factory()->length_string(), name));
+ DCHECK(
+ Name::Equals(isolate(), isolate()->factory()->length_string(), name));
// The JSArray::length property is a smi in the range
// [0, FixedDoubleArray::kMaxLength] in case of fast double
// elements, a smi in the range [0, FixedArray::kMaxLength]
@@ -669,10 +696,9 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
return false;
}
-
-bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
- MaybeHandle<JSObject> holder,
- PropertyAccessInfo* access_info) {
+bool AccessInfoFactory::LookupTransition(
+ Handle<Map> map, Handle<Name> name, MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info) const {
// Check if the {map} has a data transition with the given {name}.
Map transition =
TransitionsAccessor(isolate(), map).SearchTransition(*name, kData, NONE);
@@ -729,8 +755,6 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
}
-Factory* AccessInfoFactory::factory() const { return isolate()->factory(); }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 4673ce9306..c1b87909d1 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -8,6 +8,7 @@
#include <iosfwd>
#include "src/compiler/types.h"
+#include "src/feedback-vector.h"
#include "src/field-index.h"
#include "src/machine-type.h"
#include "src/objects.h"
@@ -26,16 +27,14 @@ namespace compiler {
class CompilationDependencies;
class Type;
class TypeCache;
+struct ProcessedFeedback;
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
-enum class AccessMode { kLoad, kStore, kStoreInLiteral };
+enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
std::ostream& operator<<(std::ostream&, AccessMode);
-// Mapping of transition source to transition target.
-typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
-
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
@@ -45,13 +44,17 @@ class ElementAccessInfo final {
ElementsKind elements_kind() const { return elements_kind_; }
MapHandles const& receiver_maps() const { return receiver_maps_; }
- MapTransitionList& transitions() { return transitions_; }
- MapTransitionList const& transitions() const { return transitions_; }
+ MapHandles const& transition_sources() const { return transition_sources_; }
+
+ void AddTransitionSource(Handle<Map> map) {
+ CHECK_EQ(receiver_maps_.size(), 1);
+ transition_sources_.push_back(map);
+ }
private:
ElementsKind elements_kind_;
MapHandles receiver_maps_;
- MapTransitionList transitions_;
+ MapHandles transition_sources_;
};
// This class encapsulates all information required to access a certain
@@ -144,44 +147,49 @@ class PropertyAccessInfo final {
class AccessInfoFactory final {
public:
AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies,
-
- Handle<Context> native_context, Zone* zone);
+ Zone* zone);
bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
- ElementAccessInfo* access_info);
- bool ComputeElementAccessInfos(MapHandles const& maps, AccessMode access_mode,
- ZoneVector<ElementAccessInfo>* access_infos);
+ ElementAccessInfo* access_info) const;
+ bool ComputeElementAccessInfos(
+ FeedbackNexus nexus, MapHandles const& maps, AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos) const;
+
bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
AccessMode access_mode,
- PropertyAccessInfo* access_info);
+ PropertyAccessInfo* access_info) const;
bool ComputePropertyAccessInfo(MapHandles const& maps, Handle<Name> name,
AccessMode access_mode,
- PropertyAccessInfo* access_info);
- bool ComputePropertyAccessInfos(MapHandles const& maps, Handle<Name> name,
- AccessMode access_mode,
- ZoneVector<PropertyAccessInfo>* access_infos);
+ PropertyAccessInfo* access_info) const;
+ bool ComputePropertyAccessInfos(
+ MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos) const;
private:
- bool ConsolidateElementLoad(MapHandles const& maps,
- ElementAccessInfo* access_info);
+ bool ConsolidateElementLoad(ProcessedFeedback const& processed,
+ ElementAccessInfo* access_info) const;
bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
- PropertyAccessInfo* access_info);
+ PropertyAccessInfo* access_info) const;
bool LookupTransition(Handle<Map> map, Handle<Name> name,
MaybeHandle<JSObject> holder,
- PropertyAccessInfo* access_info);
+ PropertyAccessInfo* access_info) const;
+ bool ComputeDataFieldAccessInfo(Handle<Map> receiver_map, Handle<Map> map,
+ MaybeHandle<JSObject> holder, int number,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info) const;
+ bool ComputeAccessorDescriptorAccessInfo(
+ Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
+ MaybeHandle<JSObject> holder, int number, AccessMode access_mode,
+ PropertyAccessInfo* access_info) const;
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
- Factory* factory() const;
- Isolate* isolate() const { return isolate_; }
- Handle<Context> native_context() const { return native_context_; }
+ Isolate* isolate() const { return broker()->isolate(); }
Zone* zone() const { return zone_; }
JSHeapBroker* const broker_;
CompilationDependencies* const dependencies_;
- Handle<Context> const native_context_;
- Isolate* const isolate_;
- TypeCache const* type_cache_;
+ TypeCache const* const type_cache_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 9d353050fd..ba35077957 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -1051,11 +1051,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
- __ vmov(d0, d2);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -3051,8 +3049,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3064,16 +3062,16 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (shrink_slots > 0) {
+ if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -3083,22 +3081,19 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ ldr(scratch, MemOperand(scratch));
- __ add(scratch, scratch, Operand(shrink_slots * kSystemPointerSize));
+ __ add(scratch, scratch, Operand(required_slots * kSystemPointerSize));
__ cmp(sp, scratch);
__ b(cs, &done);
}
- __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Move(cp, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r2);
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
@@ -3111,11 +3106,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved and return slots, which are pushed below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= frame()->GetReturnSlotCount();
- shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
- if (shrink_slots > 0) {
- __ sub(sp, sp, Operand(shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= frame()->GetReturnSlotCount();
+ required_slots -= 2 * base::bits::CountPopulation(saves_fp);
+ if (required_slots > 0) {
+ __ sub(sp, sp, Operand(required_slots * kSystemPointerSize));
}
}
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 9890b58e3d..3022ab019f 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -565,7 +565,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ Ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ LoadTaggedPointerField(
+ scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
@@ -693,12 +694,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Check the function's context matches the context argument.
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
- __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(x2,
+ FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -856,7 +859,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = new (zone()) OutOfLineRecordWrite(
this, object, index, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
- __ Str(value, MemOperand(object, index));
+ __ StoreTaggedField(value, MemOperand(object, index));
__ CheckPageFlagSet(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
ool->entry());
@@ -918,10 +921,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -1555,6 +1557,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
+ case kArm64LdrDecompressTaggedSigned:
+ __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdrDecompressTaggedPointer:
+ __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdrDecompressAnyTagged:
+ __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
@@ -1576,6 +1590,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
+ case kArm64StrCompressTagged:
+ __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
+ break;
case kArm64DsbIsb:
__ Dsb(FullSystem, BarrierAll);
__ Isb();
@@ -2385,8 +2402,8 @@ void CodeGenerator::AssembleConstructFrame() {
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
@@ -2417,11 +2434,11 @@ void CodeGenerator::AssembleConstructFrame() {
// to allocate the remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -2430,14 +2447,14 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ Ldr(scratch, MemOperand(scratch));
- __ Add(scratch, scratch, shrink_slots * kSystemPointerSize);
+ __ Add(scratch, scratch, required_slots * kSystemPointerSize);
__ Cmp(sp, scratch);
__ B(hs, &done);
}
@@ -2453,10 +2470,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ Str(kWasmInstanceRegister,
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
}
- __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Mov(cp, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, x2);
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
@@ -2468,9 +2483,9 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved slots, which are pushed below.
- shrink_slots -= saves.Count();
- shrink_slots -= saves_fp.Count();
- shrink_slots -= returns;
+ required_slots -= saves.Count();
+ required_slots -= saves_fp.Count();
+ required_slots -= returns;
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, i.e. claiming the extra slot that
@@ -2479,16 +2494,17 @@ void CodeGenerator::AssembleConstructFrame() {
switch (call_descriptor->kind()) {
case CallDescriptor::kCallJSFunction:
if (call_descriptor->PushArgumentCount()) {
- __ Claim(shrink_slots + 1); // Claim extra slot for argc.
+ __ Claim(required_slots + 1); // Claim extra slot for argc.
__ Str(kJavaScriptCallArgCountRegister,
MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
} else {
- __ Claim(shrink_slots);
+ __ Claim(required_slots);
}
break;
case CallDescriptor::kCallCodeObject: {
UseScratchRegisterScope temps(tasm());
- __ Claim(shrink_slots + 1); // Claim extra slot for frame type marker.
+ __ Claim(required_slots +
+ 1); // Claim extra slot for frame type marker.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -2496,7 +2512,8 @@ void CodeGenerator::AssembleConstructFrame() {
} break;
case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm());
- __ Claim(shrink_slots + 2); // Claim extra slots for marker + instance.
+ __ Claim(required_slots +
+ 2); // Claim extra slots for marker + instance.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -2506,11 +2523,14 @@ void CodeGenerator::AssembleConstructFrame() {
} break;
case CallDescriptor::kCallWasmImportWrapper: {
UseScratchRegisterScope temps(tasm());
- __ ldr(kJSFunctionRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
- __ ldr(kWasmInstanceRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
- __ Claim(shrink_slots + 2); // Claim extra slots for marker + instance.
+ __ LoadTaggedPointerField(
+ kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ LoadTaggedPointerField(
+ kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Claim(required_slots +
+ 2); // Claim extra slots for marker + instance.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -2519,7 +2539,7 @@ void CodeGenerator::AssembleConstructFrame() {
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
case CallDescriptor::kCallAddress:
- __ Claim(shrink_slots);
+ __ Claim(required_slots);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 6627b4e6a1..accf8b6621 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -158,7 +158,11 @@ namespace compiler {
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
V(Arm64Str) \
+ V(Arm64StrCompressTagged) \
V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 57f23b31fb..4c810ab13a 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -294,6 +294,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
+ case kArm64LdrDecompressTaggedSigned:
+ case kArm64LdrDecompressTaggedPointer:
+ case kArm64LdrDecompressAnyTagged:
case kArm64Peek:
return kIsLoadOperation;
@@ -307,6 +310,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Strh:
case kArm64StrW:
case kArm64Str:
+ case kArm64StrCompressTagged:
case kArm64DsbIsb:
return kHasSideEffect;
@@ -415,6 +419,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ror32:
return 1;
+ case kArm64LdrDecompressTaggedSigned:
+ case kArm64LdrDecompressTaggedPointer:
+ case kArm64LdrDecompressAnyTagged:
case kArm64Ldr:
case kArm64LdrD:
case kArm64LdrS:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index bb2f5c7af2..e4312de895 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -620,9 +620,24 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kArm64LdrDecompressTaggedSigned;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kArm64LdrDecompressTaggedPointer;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kArm64LdrDecompressAnyTagged;
+ immediate_mode = kLoadStoreImm32;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
@@ -726,9 +741,18 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64StrW;
immediate_mode = kLoadStoreImm32;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ opcode = kArm64StrCompressTagged;
+ immediate_mode = kLoadStoreImm32;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 2685b109d9..567d7920a9 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -144,9 +144,9 @@ class InstructionOperandConverter {
Constant ToConstant(InstructionOperand* op) {
if (op->IsImmediate()) {
- return gen_->code()->GetImmediate(ImmediateOperand::cast(op));
+ return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op));
}
- return gen_->code()->GetConstant(
+ return gen_->instructions()->GetConstant(
ConstantOperand::cast(op)->virtual_register());
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 47d416030c..6214293659 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -43,19 +43,19 @@ class CodeGenerator::JumpTable final : public ZoneObject {
CodeGenerator::CodeGenerator(
Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
- base::Optional<OsrHelper> osr_helper, int start_source_position,
- JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
- const AssemblerOptions& options, int32_t builtin_index,
- std::unique_ptr<AssemblerBuffer> buffer)
+ InstructionSequence* instructions, OptimizedCompilationInfo* info,
+ Isolate* isolate, base::Optional<OsrHelper> osr_helper,
+ int start_source_position, JumpOptimizationInfo* jump_opt,
+ PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ int32_t builtin_index, std::unique_ptr<AssemblerBuffer> buffer)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
linkage_(linkage),
- code_(code),
+ instructions_(instructions),
unwinding_info_writer_(zone()),
info_(info),
- labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
+ labels_(zone()->NewArray<Label>(instructions->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
@@ -80,7 +80,7 @@ CodeGenerator::CodeGenerator(
poisoning_level_(poisoning_level),
block_starts_(zone()),
instr_starts_(zone()) {
- for (int i = 0; i < code->InstructionBlockCount(); ++i) {
+ for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
CreateFrameAccessState(frame);
@@ -130,6 +130,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
return kSuccess;
}
+void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
+ tasm()->MaybeEmitOutOfLineConstantPool();
+}
+
void CodeGenerator::AssembleCode() {
OptimizedCompilationInfo* info = this->info();
@@ -143,8 +147,8 @@ void CodeGenerator::AssembleCode() {
}
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
- if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
- info->code_kind() == Code::BYTECODE_HANDLER)) {
+ if (FLAG_debug_code && (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
+ info->code_kind() == Code::BYTECODE_HANDLER)) {
tasm()->RecordComment("-- Prologue: check code start register --");
AssembleCodeStartRegisterCheck();
}
@@ -182,15 +186,15 @@ void CodeGenerator::AssembleCode() {
}
unwinding_info_writer_.SetNumberOfInstructionBlocks(
- code()->InstructionBlockCount());
+ instructions()->InstructionBlockCount());
if (info->trace_turbo_json_enabled()) {
- block_starts_.assign(code()->instruction_blocks().size(), -1);
- instr_starts_.assign(code()->instructions().size(), -1);
+ block_starts_.assign(instructions()->instruction_blocks().size(), -1);
+ instr_starts_.assign(instructions()->instructions().size(), -1);
}
// Assemble instructions in assembly order.
- for (const InstructionBlock* block : code()->ao_blocks()) {
+ for (const InstructionBlock* block : instructions()->ao_blocks()) {
// Align loop headers on vendor recommended boundaries.
if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
tasm()->CodeTargetAlign();
@@ -244,7 +248,7 @@ void CodeGenerator::AssembleCode() {
}
if (result_ != kSuccess) return;
unwinding_info_writer_.EndInstructionBlock(block);
- }
+ }
// Assemble all out-of-line code.
if (ools_) {
@@ -277,6 +281,10 @@ void CodeGenerator::AssembleCode() {
if (result_ != kSuccess) return;
}
+ // TODO(jgruber): Move all inlined metadata generation into a new,
+ // architecture-independent version of FinishCode. Currently, this includes
+ // the safepoint table, handler table, constant pool, and code comments, in
+ // that order.
FinishCode();
// Emit the jump tables.
@@ -289,8 +297,8 @@ void CodeGenerator::AssembleCode() {
}
// The PerfJitLogger logs code up until here, excluding the safepoint
- // table. Resolve the unwinding info now so it is aware of the same code size
- // as reported by perf.
+ // table. Resolve the unwinding info now so it is aware of the same code
+ // size as reported by perf.
unwinding_info_writer_.Finish(tasm()->pc_offset());
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
@@ -305,6 +313,7 @@ void CodeGenerator::AssembleCode() {
}
}
+ tasm()->MaybeEmitOutOfLineConstantPool();
tasm()->FinalizeJumpOptimizationInfo();
result_ = kSuccess;
@@ -315,9 +324,9 @@ void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
// instruction. If yes, then perform the masking based on the flags.
if (block->PredecessorCount() != 1) return;
RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = code()->InstructionBlockAt(pred_rpo);
+ const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
if (pred->code_start() == pred->code_end()) return;
- Instruction* instr = code()->InstructionAt(pred->code_end() - 1);
+ Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
switch (mode) {
case kFlags_branch_and_poison: {
@@ -386,7 +395,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
// Allocate and install the code.
CodeDesc desc;
- tasm()->GetCode(isolate(), &desc);
+ tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
if (unwinding_info_writer_.eh_frame_writer()) {
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
}
@@ -394,8 +403,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
source_positions, deopt_data, kMovable, true,
- frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
- handler_table_offset_);
+ frame()->GetTotalFrameSlotCount());
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
@@ -414,10 +422,10 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
}
bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
- return code()
+ return instructions()
->InstructionBlockAt(current_block_)
->ao_number()
- .IsNext(code()->InstructionBlockAt(block)->ao_number());
+ .IsNext(instructions()->InstructionBlockAt(block)->ao_number());
}
void CodeGenerator::RecordSafepoint(ReferenceMap* references,
@@ -461,7 +469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
if (info()->trace_turbo_json_enabled()) {
instr_starts_[i] = tasm()->pc_offset();
}
- Instruction* instr = code()->InstructionAt(i);
+ Instruction* instr = instructions()->InstructionAt(i);
CodeGenResult result = AssembleInstruction(instr, block);
if (result != kSuccess) return result;
}
@@ -629,7 +637,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
DCHECK_IMPLIES(
block->must_deconstruct_frame(),
- instr != code()->InstructionAt(block->last_instruction_index()) ||
+ instr != instructions()->InstructionAt(block->last_instruction_index()) ||
instr->IsRet() || instr->IsJump());
if (instr->IsJump() && block->must_deconstruct_frame()) {
AssembleDeconstructFrame();
@@ -701,7 +709,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
SourcePosition source_position = SourcePosition::Unknown();
if (instr->IsNop() && instr->AreMovesRedundant()) return;
- if (!code()->GetSourcePosition(instr, &source_position)) return;
+ if (!instructions()->GetSourcePosition(instr, &source_position)) return;
AssembleSourcePosition(source_position);
}
@@ -891,7 +899,7 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
Instruction* instr, size_t frame_state_offset) {
InstructionOperandConverter i(this, instr);
int const state_id = i.InputInt32(frame_state_offset);
- return code()->GetDeoptimizationEntry(state_id);
+ return instructions()->GetDeoptimizationEntry(state_id);
}
DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 4ab5dc11d5..5be89d29f5 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -89,7 +89,7 @@ class DeoptimizationLiteral {
class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code,
+ InstructionSequence* instructions,
OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper,
int start_source_position,
@@ -108,7 +108,7 @@ class CodeGenerator final : public GapResolver::Assembler {
OwnedVector<trap_handler::ProtectedInstructionData>
GetProtectedInstructions();
- InstructionSequence* code() const { return code_; }
+ InstructionSequence* instructions() const { return instructions_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
const Frame* frame() const { return frame_access_state_->frame(); }
Isolate* isolate() const { return isolate_; }
@@ -134,6 +134,7 @@ class CodeGenerator final : public GapResolver::Assembler {
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
+ SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
size_t GetHandlerTableOffset() const { return handler_table_offset_; }
@@ -308,6 +309,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int first_unused_stack_slot);
void FinishCode();
+ void MaybeEmitOutOfLineConstantPool();
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
@@ -400,7 +402,7 @@ class CodeGenerator final : public GapResolver::Assembler {
Isolate* isolate_;
FrameAccessState* frame_access_state_;
Linkage* const linkage_;
- InstructionSequence* const code_;
+ InstructionSequence* const instructions_;
UnwindingInfoWriter unwinding_info_writer_;
OptimizedCompilationInfo* const info_;
Label* const labels_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 9dc6e50e4e..d53194284c 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -982,20 +982,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- // TODO(bmeurer): Improve integration of the stub.
- if (i.InputDoubleRegister(1) != xmm2) {
- __ movaps(xmm2, i.InputDoubleRegister(0));
- __ movaps(xmm1, i.InputDoubleRegister(1));
- } else {
- __ movaps(xmm0, i.InputDoubleRegister(0));
- __ movaps(xmm1, xmm2);
- __ movaps(xmm2, xmm0);
- }
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
- __ movaps(i.OutputDoubleRegister(), xmm3);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -1262,7 +1251,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat32Max: {
- Label compare_nan, compare_swap, done_compare;
+ Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1288,7 +1277,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEFloat64Max: {
- Label compare_nan, compare_swap, done_compare;
+ Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -4023,7 +4012,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Label* const table = AddJumpTable(cases, case_count);
__ cmp(input, Immediate(case_count));
__ j(above_equal, GetLabel(i.InputRpo(1)));
- __ jmp(Operand::JumpTable(input, times_4, table));
+ __ jmp(Operand::JumpTable(input, times_system_pointer_size, table));
}
// The calling convention for JSFunctions on IA32 passes arguments on the
@@ -4197,8 +4186,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -4210,13 +4199,13 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (shrink_slots > 0) {
+ if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -4226,22 +4215,21 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
Register scratch = esi;
__ push(scratch);
__ mov(scratch,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ mov(scratch, Operand(scratch, 0));
- __ add(scratch, Immediate(shrink_slots * kSystemPointerSize));
+ __ add(scratch, Immediate(required_slots * kSystemPointerSize));
__ cmp(esp, scratch);
__ pop(scratch);
__ j(above_equal, &done);
}
- __ mov(ecx, FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Move(esi, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, ecx);
+
+ __ wasm_call(wasm::WasmCode::kWasmStackOverflow,
+ RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
@@ -4250,10 +4238,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved and return slots, which are created below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= frame()->GetReturnSlotCount();
- if (shrink_slots > 0) {
- __ sub(esp, Immediate(shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= frame()->GetReturnSlotCount();
+ if (required_slots > 0) {
+ __ sub(esp, Immediate(required_slots * kSystemPointerSize));
}
}
@@ -4318,7 +4306,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
Register pop_reg = g.ToRegister(pop);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
__ pop(scratch_reg);
- __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
+ __ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
+ static_cast<int>(pop_size)));
__ jmp(scratch_reg);
}
}
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 1e241a8ae9..2740412383 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -72,10 +72,11 @@ class IA32OperandGenerator final : public OperandGenerator {
// really have to this here, then we need to find a way to put this
// information on the HeapConstant node already.
#if 0
- // Constants in new space cannot be used as immediates in V8 because
- // the GC does not scan code objects when collecting the new generation.
+ // Constants in young generation cannot be used as immediates in V8
+ // because the GC does not scan code objects when collecting the young
+ // generation.
Handle<HeapObject> value = HeapConstantOf(node->op());
- return !Heap::InNewSpace(*value);
+ return !Heap::InYoungGeneration(*value);
#else
return false;
#endif
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 760f9ffd88..5988a882d7 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -195,7 +195,8 @@ class UnallocatedOperand final : public InstructionOperand {
: UnallocatedOperand(virtual_register) {
DCHECK(policy == FIXED_SLOT);
value_ |= BasicPolicyField::encode(policy);
- value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
+ value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
+ << FixedSlotIndexField::kShift;
DCHECK(this->fixed_slot_index() == index);
}
@@ -306,7 +307,7 @@ class UnallocatedOperand final : public InstructionOperand {
return LifetimeField::decode(value_) == USED_AT_START;
}
- INSTRUCTION_OPERAND_CASTS(UnallocatedOperand, UNALLOCATED);
+ INSTRUCTION_OPERAND_CASTS(UnallocatedOperand, UNALLOCATED)
// The encoding used for UnallocatedOperand operands depends on the policy
// that is
@@ -369,7 +370,7 @@ class ConstantOperand : public InstructionOperand {
return InstructionOperand::New(zone, ConstantOperand(virtual_register));
}
- INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT);
+ INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT)
STATIC_ASSERT(KindField::kSize == 3);
class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
@@ -382,7 +383,8 @@ class ImmediateOperand : public InstructionOperand {
explicit ImmediateOperand(ImmediateType type, int32_t value)
: InstructionOperand(IMMEDIATE) {
value_ |= TypeField::encode(type);
- value_ |= static_cast<int64_t>(value) << ValueField::kShift;
+ value_ |= static_cast<uint64_t>(static_cast<int64_t>(value))
+ << ValueField::kShift;
}
ImmediateType type() const { return TypeField::decode(value_); }
@@ -401,7 +403,7 @@ class ImmediateOperand : public InstructionOperand {
return InstructionOperand::New(zone, ImmediateOperand(type, value));
}
- INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE);
+ INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE)
STATIC_ASSERT(KindField::kSize == 3);
class TypeField : public BitField64<ImmediateType, 3, 1> {};
@@ -420,7 +422,8 @@ class LocationOperand : public InstructionOperand {
DCHECK(IsSupportedRepresentation(rep));
value_ |= LocationKindField::encode(location_kind);
value_ |= RepresentationField::encode(rep);
- value_ |= static_cast<int64_t>(index) << IndexField::kShift;
+ value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
+ << IndexField::kShift;
}
int index() const {
@@ -518,7 +521,7 @@ class V8_EXPORT_PRIVATE ExplicitOperand
return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
}
- INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
+ INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT)
};
class AllocatedOperand : public LocationOperand {
@@ -531,7 +534,7 @@ class AllocatedOperand : public LocationOperand {
return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
}
- INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+ INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED)
};
#undef INSTRUCTION_OPERAND_CASTS
@@ -973,6 +976,11 @@ class RpoNumber final {
return other.index_ == this->index_ + 1;
}
+ RpoNumber Next() const {
+ DCHECK(IsValid());
+ return RpoNumber(index_ + 1);
+ }
+
// Comparison operators.
bool operator==(RpoNumber other) const { return index_ == other.index_; }
bool operator!=(RpoNumber other) const { return index_ != other.index_; }
diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
index f0173e6ed7..baa4ff1e73 100644
--- a/deps/v8/src/compiler/backend/live-range-separator.cc
+++ b/deps/v8/src/compiler/backend/live-range-separator.cc
@@ -57,11 +57,11 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
}
void SetSlotUse(TopLevelLiveRange* range) {
- range->set_has_slot_use(false);
+ range->reset_slot_use();
for (const UsePosition* pos = range->first_pos();
!range->has_slot_use() && pos != nullptr; pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) {
- range->set_has_slot_use(true);
+ range->register_slot_use(TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
}
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index af726bd065..835c7d0eb6 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -970,10 +970,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log2:
ASSEMBLE_IEEE754_UNOP(log2);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -3173,11 +3172,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
MipsOperandConverter i(this, instr);
- Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label false_value;
DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
@@ -3402,8 +3399,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3415,7 +3412,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
@@ -3424,11 +3421,11 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
- shrink_slots -= returns;
- if (shrink_slots > 0) {
- __ Subu(sp, sp, Operand(shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= 2 * base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Subu(sp, sp, Operand(required_slots * kSystemPointerSize));
}
// Save callee-saved FPU registers.
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 8788fa7ee3..013880ae2a 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -948,10 +948,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -3319,11 +3318,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
MipsOperandConverter i(this, instr);
- Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label false_value;
DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
@@ -3561,8 +3558,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3574,7 +3571,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
@@ -3583,11 +3580,11 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= base::bits::CountPopulation(saves_fpu);
- shrink_slots -= returns;
- if (shrink_slots > 0) {
- __ Dsubu(sp, sp, Operand(shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Dsubu(sp, sp, Operand(required_slots * kSystemPointerSize));
}
if (saves_fpu != 0) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 95e52452d7..eec5d955ff 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -201,6 +201,8 @@ struct ExtendingLoadMatcher {
DCHECK(m.IsWord64Sar());
if (m.left().IsLoad() && m.right().Is(32) &&
selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectiveLevel(node),
+ selector_->GetEffectiveLevel(m.left().node()));
MachineRepresentation rep =
LoadRepresentationOf(m.left().node()->op()).representation();
DCHECK_EQ(3, ElementSizeLog2Of(rep));
@@ -1367,7 +1369,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar: {
- if (TryEmitExtendingLoad(this, value, node)) {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
return;
} else {
Int64BinopMatcher m(value);
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index b74834df17..b861c3d026 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -11,6 +11,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
@@ -1154,6 +1155,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ and_(i.OutputRegister(), i.InputRegister(0),
kSpeculationPoisonRegister);
break;
+ case kPPC_Peek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset), r0);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ LoadFloat32(i.OutputFloatRegister(), MemOperand(fp, offset), r0);
+ }
+ } else {
+ __ LoadP(i.OutputRegister(), MemOperand(fp, offset), r0);
+ }
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1554,11 +1573,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
- __ Move(d1, d3);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kPPC_Neg:
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
@@ -2324,8 +2341,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2336,7 +2353,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
@@ -2346,8 +2363,8 @@ void CodeGenerator::AssembleConstructFrame() {
~kConstantPoolRegister.bit()
: call_descriptor->CalleeSavedRegisters();
- if (shrink_slots > 0) {
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (required_slots > 0) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -2357,24 +2374,19 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
Register scratch = ip;
__ LoadP(
scratch,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ LoadP(scratch, MemOperand(scratch), r0);
- __ Add(scratch, scratch, shrink_slots * kSystemPointerSize, r0);
+ __ Add(scratch, scratch, required_slots * kSystemPointerSize, r0);
__ cmpl(sp, scratch);
__ bge(&done);
}
- __ LoadP(r5,
- FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset),
- r0);
- __ Move(cp, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r5);
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
@@ -2387,11 +2399,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved and return slots, which are pushed below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= frame()->GetReturnSlotCount();
- shrink_slots -= (kDoubleSize / kSystemPointerSize) *
- base::bits::CountPopulation(saves_fp);
- __ Add(sp, sp, -shrink_slots * kSystemPointerSize, r0);
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= frame()->GetReturnSlotCount();
+ required_slots -= (kDoubleSize / kSystemPointerSize) *
+ base::bits::CountPopulation(saves_fp);
+ __ Add(sp, sp, -required_slots * kSystemPointerSize, r0);
}
// Save callee-saved Double registers.
@@ -2469,7 +2481,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { __ EmitConstantPool(); }
+void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 1c241711b9..8491acba4b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -12,6 +12,7 @@ namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_Peek) \
V(PPC_And) \
V(PPC_AndComplement) \
V(PPC_Or) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 423dd7ac99..7ed0d1d585 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -121,6 +121,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadWord64:
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
+ case kPPC_AtomicLoadUint8:
+ case kPPC_AtomicLoadUint16:
+ case kPPC_AtomicLoadWord32:
+ case kPPC_AtomicLoadWord64:
+ case kPPC_Peek:
return kIsLoadOperation;
case kPPC_StoreWord8:
@@ -134,12 +139,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreToStackSlot:
return kHasSideEffect;
- case kPPC_AtomicLoadUint8:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
- return kIsLoadOperation;
-
case kPPC_AtomicStoreUint8:
case kPPC_AtomicStoreUint16:
case kPPC_AtomicStoreWord32:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 9dcae4d465..d5b93e86d0 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2232,7 +2232,24 @@ void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
- // TODO(John): Port.
+ PPCOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kPPC_Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
}
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 883e0001a7..758b4031dc 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -8,8 +8,10 @@
#include "src/assembler-inl.h"
#include "src/base/adapters.h"
+#include "src/base/small-vector.h"
#include "src/compiler/linkage.h"
#include "src/string-stream.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -120,14 +122,15 @@ class LiveRangeBoundArray {
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range) {
- length_ = range->GetChildCount();
+ size_t max_child_count = range->GetMaxChildCount();
- start_ = zone->NewArray<LiveRangeBound>(length_);
+ start_ = zone->NewArray<LiveRangeBound>(max_child_count);
+ length_ = 0;
LiveRangeBound* curr = start_;
// Normally, spilled ranges do not need connecting moves, because the spill
// location has been assigned at definition. For ranges spilled in deferred
// blocks, that is not the case, so we need to connect the spilled children.
- for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+ for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
new (curr) LiveRangeBound(i, i->spilled());
}
}
@@ -389,7 +392,8 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
splitting_pointer_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
- RepresentationField::encode(rep);
+ RepresentationField::encode(rep) |
+ ControlFlowRegisterHint::encode(kUnassignedRegister);
}
void LiveRange::VerifyPositions() const {
@@ -427,6 +431,34 @@ void LiveRange::UnsetAssignedRegister() {
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
+void LiveRange::AttachToNext() {
+ DCHECK_NOT_NULL(next_);
+ DCHECK_NE(TopLevel()->last_child_covers_, next_);
+ last_interval_->set_next(next_->first_interval());
+ next_->first_interval_ = nullptr;
+ last_interval_ = next_->last_interval_;
+ next_->last_interval_ = nullptr;
+ if (first_pos() == nullptr) {
+ first_pos_ = next_->first_pos();
+ } else {
+ UsePosition* ptr = first_pos_;
+ while (ptr->next() != nullptr) {
+ ptr = ptr->next();
+ }
+ ptr->set_next(next_->first_pos());
+ }
+ next_->first_pos_ = nullptr;
+ LiveRange* old_next = next_;
+ next_ = next_->next_;
+ old_next->next_ = nullptr;
+}
+
+void LiveRange::Unspill() {
+ DCHECK(spilled());
+ set_spilled(false);
+ bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
+}
+
void LiveRange::Spill() {
DCHECK(!spilled());
DCHECK(!TopLevel()->HasNoSpillType());
@@ -512,6 +544,7 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
bool LiveRange::IsTopLevel() const { return top_level_ == this; }
InstructionOperand LiveRange::GetAssignedOperand() const {
+ DCHECK(!IsEmpty());
if (HasRegisterAssigned()) {
DCHECK(!spilled());
return AllocatedOperand(LocationOperand::REGISTER, representation(),
@@ -692,10 +725,26 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start == other_start) {
+ // Prefer register that has a controlflow hint to make sure it gets
+ // allocated first. This allows the control flow aware alloction to
+ // just put ranges back into the queue without other ranges interfering.
+ if (controlflow_hint() < other->controlflow_hint()) {
+ return true;
+ }
+ // The other has a smaller hint.
+ if (other->controlflow_hint() != kUnassignedRegister) {
+ return false;
+ }
+ // No hint, use first use position.
UsePosition* pos = first_pos();
- if (pos == nullptr) return false;
UsePosition* other_pos = other->first_pos();
+ // To make the order total, handle the case where both positions are null.
+ if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg();
+ if (pos == nullptr) return false;
if (other_pos == nullptr) return true;
+ // To make the order total, handle the case where both positions are equal.
+ if (pos->pos() == other_pos->pos())
+ return TopLevel()->vreg() < other->TopLevel()->vreg();
return pos->pos() < other_pos->pos();
}
return start < other_start;
@@ -820,6 +869,7 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
spilled_in_deferred_blocks_(false),
spill_start_index_(kMaxInt),
last_pos_(nullptr),
+ last_child_covers_(this),
splinter_(nullptr),
has_preassigned_slot_(false) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
@@ -1033,8 +1083,7 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
TopLevel()->UpdateParentForAllChildren(TopLevel());
TopLevel()->UpdateSpillRangePostMerge(other);
- TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
- other->has_slot_use());
+ TopLevel()->register_slot_use(other->slot_use_kind());
#if DEBUG
Verify();
@@ -1050,6 +1099,15 @@ void TopLevelLiveRange::VerifyChildrenInOrder() const {
}
}
+LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) {
+ LiveRange* child = last_child_covers_;
+ while (child != nullptr && child->End() <= pos) {
+ child = child->next();
+ }
+ last_child_covers_ = child;
+ return !child || !child->Covers(pos) ? nullptr : child;
+}
+
void TopLevelLiveRange::Verify() const {
VerifyChildrenInOrder();
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
@@ -1214,6 +1272,21 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
os << std::setw(3) << toplevel->vreg()
<< (toplevel->IsSplinter() ? "s:" : ": ");
+ const char* kind_string;
+ switch (toplevel->spill_type()) {
+ case TopLevelLiveRange::SpillType::kSpillRange:
+ kind_string = "ss";
+ break;
+ case TopLevelLiveRange::SpillType::kDeferredSpillRange:
+ kind_string = "sd";
+ break;
+ case TopLevelLiveRange::SpillType::kSpillOperand:
+ kind_string = "so";
+ break;
+ default:
+ kind_string = "s?";
+ }
+
for (const LiveRange* range = toplevel; range != nullptr;
range = range->next()) {
for (UseInterval* interval = range->first_interval(); interval != nullptr;
@@ -1230,7 +1303,7 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
int prefix;
if (range->spilled()) {
- prefix = snprintf(buffer, max_prefix_length, "|ss");
+ prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
} else {
const char* reg_name;
if (range->assigned_register() == kUnassignedRegister) {
@@ -1254,7 +1327,7 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
PrintBlockRow(os, code()->instruction_blocks());
- for (auto toplevel : data()->fixed_live_ranges()) {
+ for (auto const toplevel : data()->fixed_live_ranges()) {
if (toplevel == nullptr) continue;
PrintRangeRow(os, toplevel);
}
@@ -1413,7 +1486,9 @@ RegisterAllocationData::RegisterAllocationData(
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()),
- preassigned_slot_ranges_(zone) {
+ preassigned_slot_ranges_(zone),
+ spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
+ zone) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
nullptr);
@@ -1557,7 +1632,8 @@ bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
}
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
- TopLevelLiveRange* range) {
+ TopLevelLiveRange* range, SpillMode spill_mode) {
+ using SpillType = TopLevelLiveRange::SpillType;
DCHECK(!range->HasSpillOperand());
SpillRange* spill_range = range->GetAllocatedSpillRange();
@@ -1565,7 +1641,13 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
DCHECK(!range->IsSplinter());
spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
}
- range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
+ if (spill_mode == SpillMode::kSpillDeferred &&
+ (range->spill_type() != SpillType::kSpillRange)) {
+ DCHECK(FLAG_turbo_control_flow_aware_allocation);
+ range->set_spill_type(SpillType::kDeferredSpillRange);
+ } else {
+ range->set_spill_type(SpillType::kSpillRange);
+ }
int spill_range_index =
range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
@@ -1577,6 +1659,7 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
+ DCHECK(FLAG_turbo_preprocess_ranges);
DCHECK(!range->HasSpillOperand());
DCHECK(!range->IsSplinter());
SpillRange* spill_range =
@@ -2224,7 +2307,15 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int vreg = unalloc->virtual_register();
live->Add(vreg);
if (unalloc->HasSlotPolicy()) {
- data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
+ if (FLAG_turbo_control_flow_aware_allocation) {
+ data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
+ block->IsDeferred()
+ ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
+ : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
+ } else {
+ data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
+ TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
+ }
}
}
Use(block_start_position, use_pos, input);
@@ -2488,7 +2579,12 @@ void LiveRangeBuilder::BuildLiveRanges() {
if (range == nullptr) continue;
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
- data()->AssignSpillRangeToLiveRange(range);
+ SpillMode spill_mode =
+ range->slot_use_kind() ==
+ TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
+ ? SpillMode::kSpillDeferred
+ : SpillMode::kSpillAtDefinition;
+ data()->AssignSpillRangeToLiveRange(range, spill_mode);
}
// TODO(bmeurer): This is a horrible hack to make sure that for constant
// live ranges, every use requires the constant to be in a register.
@@ -2515,7 +2611,8 @@ void LiveRangeBuilder::BuildLiveRanges() {
int slot_id = preassigned.second;
SpillRange* spill = range->HasSpillRange()
? range->GetSpillRange()
- : data()->AssignSpillRangeToLiveRange(range);
+ : data()->AssignSpillRangeToLiveRange(
+ range, SpillMode::kSpillAtDefinition);
spill->set_assigned_slot(slot_id);
}
#ifdef DEBUG
@@ -2744,8 +2841,10 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
+ // Only assume defined by memory operand if we are guaranteed to spill it or
+ // it has a spill operand.
if (range->HasNoSpillType() ||
- (range->HasSpillRange() && !range->has_slot_use())) {
+ (range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
continue;
}
LifetimePosition start = range->Start();
@@ -2765,7 +2864,7 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
- Spill(range);
+ Spill(range, SpillMode::kSpillAtDefinition);
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
@@ -2778,7 +2877,7 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
SplitRangeAt(range, split_pos);
- Spill(range);
+ Spill(range, SpillMode::kSpillAtDefinition);
}
}
}
@@ -2884,14 +2983,28 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
return pos;
}
-void RegisterAllocator::Spill(LiveRange* range) {
+void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
DCHECK(!range->spilled());
+ DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
+ GetInstructionBlock(code(), range->Start())->IsDeferred());
TopLevelLiveRange* first = range->TopLevel();
- TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
+ TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
+ range->relative_id(), spill_mode);
+ TRACE("Starting spill type is %d\n", static_cast<int>(first->spill_type()));
if (first->HasNoSpillType()) {
- data()->AssignSpillRangeToLiveRange(first);
- }
+ TRACE("New spill range needed");
+ data()->AssignSpillRangeToLiveRange(first, spill_mode);
+ }
+ // Upgrade the spillmode, in case this was only spilled in deferred code so
+ // far.
+ if ((spill_mode == SpillMode::kSpillAtDefinition) &&
+ (first->spill_type() ==
+ TopLevelLiveRange::SpillType::kDeferredSpillRange)) {
+ TRACE("Upgrading\n");
+ first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
+ }
+ TRACE("Final spill type is %d\n", static_cast<int>(first->spill_type()));
range->Spill();
}
@@ -2913,12 +3026,398 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
inactive_live_ranges().reserve(8);
}
+void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
+ if (range->next() != nullptr && range->next()->ShouldRecombine()) {
+ LiveRange* to_remove = range->next();
+ TRACE("Recombining %d:%d with %d\n", range->TopLevel()->vreg(),
+ range->relative_id(), to_remove->relative_id());
+
+ // Remove the range from unhandled, as attaching it will change its
+ // state and hence ordering in the unhandled set.
+ auto removed_cnt = unhandled_live_ranges().erase(to_remove);
+ DCHECK_EQ(removed_cnt, 1);
+ USE(removed_cnt);
+
+ range->AttachToNext();
+ } else if (range->next() != nullptr) {
+ TRACE("No recombine for %d:%d to %d\n", range->TopLevel()->vreg(),
+ range->relative_id(), range->next()->relative_id());
+ }
+}
+
+void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
+ LifetimePosition position,
+ SpillMode spill_mode) {
+ for (auto it = active_live_ranges().begin();
+ it != active_live_ranges().end();) {
+ LiveRange* active_range = *it;
+ TopLevelLiveRange* toplevel = (*it)->TopLevel();
+ auto found = to_be_live.find({toplevel, kUnassignedRegister});
+ if (found == to_be_live.end()) {
+ // Is not contained in {to_be_live}, spill it.
+ // Fixed registers are exempt from this. They might have been
+ // added from inactive at the block boundary but we know that
+ // they cannot conflict as they are built before register
+ // allocation starts. It would be algorithmically fine to split
+ // them and reschedule but the code does not allow to do this.
+ if (toplevel->IsFixed()) {
+ TRACE("Keeping reactivated fixed range for %s\n",
+ RegisterName(toplevel->assigned_register()));
+ ++it;
+ } else {
+ // When spilling a previously spilled/reloaded range, we add back the
+ // tail that we might have split off when we reloaded/spilled it
+ // previously. Otherwise we might keep generating small split-offs.
+ MaybeUndoPreviousSplit(active_range);
+ TRACE("Putting back %d:%d\n", toplevel->vreg(),
+ active_range->relative_id());
+ LiveRange* split = SplitRangeAt(active_range, position);
+ DCHECK_NE(split, active_range);
+
+ // Make sure we revisit this range once it has a use that requires
+ // a register.
+ UsePosition* next_use = split->NextRegisterPosition(position);
+ if (next_use != nullptr) {
+ // Move to the start of the gap before use so that we have a space
+ // to perform the potential reload. Otherwise, do not spill but add
+ // to unhandled for reallocation.
+ LifetimePosition revisit_at = next_use->pos().FullStart();
+ TRACE("Next use at %d\n", revisit_at.value());
+ if (!data()->IsBlockBoundary(revisit_at)) {
+ // Leave some space so we have enough gap room.
+ revisit_at = revisit_at.PrevStart().FullStart();
+ }
+ // If this range became life right at the block boundary that we are
+ // currently processing, we do not need to split it. Instead move it
+ // to unhandled right away.
+ if (position < revisit_at) {
+ LiveRange* third_part = SplitRangeAt(split, revisit_at);
+ DCHECK_NE(split, third_part);
+ Spill(split, spill_mode);
+ TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
+ third_part->relative_id());
+ third_part->SetRecombine();
+ AddToUnhandled(third_part);
+ } else {
+ AddToUnhandled(split);
+ }
+ } else {
+ Spill(split, spill_mode);
+ }
+ it = ActiveToHandled(it);
+ }
+ } else {
+ // This range is contained in {to_be_live}, so we can keep it.
+ int expected_register = (*found).expected_register;
+ to_be_live.erase(found);
+ if (expected_register == active_range->assigned_register()) {
+ // Was life and in correct register, simply pass through.
+ TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
+ active_range->relative_id(),
+ RegisterName(active_range->assigned_register()));
+ ++it;
+ } else {
+ // Was life but wrong register. Split and schedule for
+ // allocation.
+ TRACE("Scheduling %d:%d\n", toplevel->vreg(),
+ active_range->relative_id());
+ LiveRange* split = SplitRangeAt(active_range, position);
+ split->set_controlflow_hint(expected_register);
+ AddToUnhandled(split);
+ it = ActiveToHandled(it);
+ }
+ }
+ }
+}
+
+LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
+ int reg) {
+ // We know the register is currently free but it might be in
+ // use by a currently inactive range. So we might not be able
+ // to reload for the full distance. In such case, split here.
+ // TODO(herhut):
+ // It might be better if we could use the normal unhandled queue and
+ // give reloading registers pecedence. That way we would compute the
+ // intersection for the entire future.
+ LifetimePosition new_end = range->End();
+ for (const auto inactive : inactive_live_ranges()) {
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (inactive->assigned_register() != reg) continue;
+ } else {
+ bool conflict = inactive->assigned_register() == reg;
+ if (!conflict) {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(range->representation(), reg,
+ inactive->representation(),
+ &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases-- && !conflict) {
+ int aliased_reg = alias_base_index + aliases;
+ if (aliased_reg == reg) {
+ conflict = true;
+ }
+ }
+ }
+ if (!conflict) continue;
+ }
+ for (auto interval = inactive->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ if (interval->start() > new_end) break;
+ if (interval->end() <= range->Start()) continue;
+ if (new_end > interval->start()) new_end = interval->start();
+ }
+ }
+ if (new_end != range->End()) {
+ TRACE("Found new end for %d:%d at %d\n", range->TopLevel()->vreg(),
+ range->relative_id(), new_end.value());
+ LiveRange* tail = SplitRangeAt(range, new_end);
+ AddToUnhandled(tail);
+ }
+ SetLiveRangeAssignedRegister(range, reg);
+ return range;
+}
+
+void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
+ LifetimePosition position) {
+ // Assumption: All ranges in {to_be_live} are currently spilled and there are
+ // no conflicting registers in the active ranges.
+ // The former is ensured by SpillNotLiveRanges, the latter is by construction
+ // of the to_be_live set.
+ for (RangeWithRegister range_with_register : to_be_live) {
+ TopLevelLiveRange* range = range_with_register.range;
+ int reg = range_with_register.expected_register;
+ LiveRange* to_resurrect = range->GetChildCovers(position);
+ if (to_resurrect == nullptr) {
+ // While the range was life until the end of the predecessor block, it is
+ // not live in this block. Either there is a lifetime gap or the range
+ // died.
+ TRACE("No candidate for %d at %d\n", range->vreg(), position.value());
+ } else {
+ // We might be resurrecting a range that we spilled until its next use
+ // before. In such cases, we have to unsplit it before processing as
+ // otherwise we might get register changes from one range to the other
+ // in the middle of blocks.
+ // If there is a gap between this range and the next, we can just keep
+ // it as a register change won't hurt.
+ MaybeUndoPreviousSplit(to_resurrect);
+ if (to_resurrect->Start() == position) {
+ // This range already starts at this block. It might have been spilled,
+ // so we have to unspill it. Otherwise, it is already in the unhandled
+ // queue waiting for processing.
+ DCHECK(!to_resurrect->HasRegisterAssigned());
+ TRACE("Reload %d:%d starting at %d itself\n", range->vreg(),
+ to_resurrect->relative_id(), position.value());
+ if (to_resurrect->spilled()) {
+ to_resurrect->Unspill();
+ to_resurrect->set_controlflow_hint(reg);
+ AddToUnhandled(to_resurrect);
+ } else {
+ // Assign the preassigned register if we know. Otherwise, nothing to
+ // do as already in unhandeled.
+ if (reg != kUnassignedRegister) {
+ auto erased_cnt = unhandled_live_ranges().erase(to_resurrect);
+ DCHECK_EQ(erased_cnt, 1);
+ USE(erased_cnt);
+ // We know that there is no conflict with active ranges, so just
+ // assign the register to the range.
+ to_resurrect = AssignRegisterOnReload(to_resurrect, reg);
+ AddToActive(to_resurrect);
+ }
+ }
+ } else {
+ // This range was spilled before. We have to split it and schedule the
+ // second part for allocation (or assign the register if we know).
+ DCHECK(to_resurrect->spilled());
+ LiveRange* split = SplitRangeAt(to_resurrect, position);
+ TRACE("Reload %d:%d starting at %d as %d\n", range->vreg(),
+ to_resurrect->relative_id(), split->Start().value(),
+ split->relative_id());
+ DCHECK_NE(split, to_resurrect);
+ if (reg != kUnassignedRegister) {
+ // We know that there is no conflict with active ranges, so just
+ // assign the register to the range.
+ split = AssignRegisterOnReload(split, reg);
+ AddToActive(split);
+ } else {
+ // Let normal register assignment find a suitable register.
+ split->set_controlflow_hint(reg);
+ AddToUnhandled(split);
+ }
+ }
+ }
+ }
+}
+
+RpoNumber LinearScanAllocator::ChooseOneOfTwoPredecessorStates(
+ InstructionBlock* current_block, LifetimePosition boundary) {
+ using SmallRangeVector =
+ base::SmallVector<TopLevelLiveRange*,
+ RegisterConfiguration::kMaxRegisters>;
+ // Pick the state that would generate the least spill/reloads.
+ // Compute vectors of ranges with imminent use for both sides.
+ // As GetChildCovers is cached, it is cheaper to repeatedly
+ // call is rather than compute a shared set first.
+ auto& left = data()->GetSpillState(current_block->predecessors()[0]);
+ auto& right = data()->GetSpillState(current_block->predecessors()[1]);
+ SmallRangeVector left_used;
+ for (const auto item : left) {
+ LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
+ if (at_next_block != nullptr &&
+ at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
+ nullptr) {
+ left_used.emplace_back(item->TopLevel());
+ }
+ }
+ SmallRangeVector right_used;
+ for (const auto item : right) {
+ LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
+ if (at_next_block != nullptr &&
+ at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
+ nullptr) {
+ right_used.emplace_back(item->TopLevel());
+ }
+ }
+ if (left_used.empty() && right_used.empty()) {
+ // There are no beneficial register uses. Look at any use at
+ // all. We do not account for all uses, like flowing into a phi.
+ // So we just look at ranges still being live.
+ TRACE("Looking at only uses\n");
+ for (const auto item : left) {
+ LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
+ if (at_next_block != nullptr &&
+ at_next_block->NextUsePosition(boundary) != nullptr) {
+ left_used.emplace_back(item->TopLevel());
+ }
+ }
+ for (const auto item : right) {
+ LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
+ if (at_next_block != nullptr &&
+ at_next_block->NextUsePosition(boundary) != nullptr) {
+ right_used.emplace_back(item->TopLevel());
+ }
+ }
+ }
+ // Now left_used and right_used contains those ranges that matter.
+ // Count which side matches this most.
+ TRACE("Vote went %zu vs %zu\n", left_used.size(), right_used.size());
+ return left_used.size() > right_used.size()
+ ? current_block->predecessors()[0]
+ : current_block->predecessors()[1];
+}
+
+void LinearScanAllocator::ComputeStateFromManyPredecessors(
+ InstructionBlock* current_block, RangeWithRegisterSet* to_be_live) {
+ struct Vote {
+ size_t count;
+ int used_registers[RegisterConfiguration::kMaxRegisters];
+ };
+ ZoneMap<TopLevelLiveRange*, Vote> counts(data()->allocation_zone());
+ int deferred_blocks = 0;
+ for (RpoNumber pred : current_block->predecessors()) {
+ if (!ConsiderBlockForControlFlow(current_block, pred)) {
+ // Back edges of a loop count as deferred here too.
+ deferred_blocks++;
+ continue;
+ }
+ const auto& pred_state = data()->GetSpillState(pred);
+ for (LiveRange* range : pred_state) {
+ // We might have spilled the register backwards, so the range we
+ // stored might have lost its register. Ignore those.
+ if (!range->HasRegisterAssigned()) continue;
+ TopLevelLiveRange* toplevel = range->TopLevel();
+ auto previous = counts.find(toplevel);
+ if (previous == counts.end()) {
+ auto result = counts.emplace(std::make_pair(toplevel, Vote{1, {0}}));
+ CHECK(result.second);
+ result.first->second.used_registers[range->assigned_register()]++;
+ } else {
+ previous->second.count++;
+ previous->second.used_registers[range->assigned_register()]++;
+ }
+ }
+ }
+
+ // Choose the live ranges from the majority.
+ const size_t majority =
+ (current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
+ bool taken_registers[RegisterConfiguration::kMaxRegisters] = {0};
+ auto assign_to_live = [this, counts, majority](
+ std::function<bool(TopLevelLiveRange*)> filter,
+ RangeWithRegisterSet* to_be_live,
+ bool* taken_registers) {
+ for (const auto& val : counts) {
+ if (!filter(val.first)) continue;
+ if (val.second.count >= majority) {
+ int register_max = 0;
+ int reg = kUnassignedRegister;
+ for (int idx = 0; idx < RegisterConfiguration::kMaxRegisters; idx++) {
+ int uses = val.second.used_registers[idx];
+ if (uses == 0) continue;
+ if (uses > register_max) {
+ reg = idx;
+ register_max = val.second.used_registers[idx];
+ } else if (taken_registers[reg] && uses == register_max) {
+ reg = idx;
+ }
+ }
+ if (taken_registers[reg]) {
+ reg = kUnassignedRegister;
+ } else {
+ taken_registers[reg] = true;
+ }
+ to_be_live->emplace(val.first, reg);
+ TRACE("Reset %d as live due vote %zu in %s\n",
+ val.first->TopLevel()->vreg(), val.second.count,
+ reg == kUnassignedRegister ? "unassigned" : RegisterName(reg));
+ }
+ }
+ };
+ // First round, process fixed registers, as these have precedence.
+ // There is only one fixed range per register, so we cannot have
+ // conflicts.
+ assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live,
+ taken_registers);
+ // Second round, process the rest.
+ assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live,
+ taken_registers);
+}
+
+bool LinearScanAllocator::ConsiderBlockForControlFlow(
+ InstructionBlock* current_block, RpoNumber predecessor) {
+ // We ignore predecessors on back edges when looking for control flow effects,
+ // as those lie in the future of allocation and we have no data yet. Also,
+ // deferred bocks are ignored on deferred to non-deferred boundaries, as we do
+ // not want them to influence allocation of non deferred code.
+ return (predecessor < current_block->rpo_number()) &&
+ (current_block->IsDeferred() ||
+ !code()->InstructionBlockAt(predecessor)->IsDeferred());
+}
+
+bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
+ const InstructionBlock* block) {
+ if (block->IsDeferred()) return true;
+ if (block->PredecessorCount() == 0) return true;
+ bool pred_is_deferred = false;
+ for (auto pred : block->predecessors()) {
+ if (pred.IsNext(block->rpo_number())) {
+ pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
+ break;
+ }
+ }
+ return !pred_is_deferred;
+}
+
void LinearScanAllocator::AllocateRegisters() {
DCHECK(unhandled_live_ranges().empty());
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
SplitAndSpillRangesDefinedByMemoryOperand();
+ data()->ResetSpillState();
+
+ if (FLAG_trace_alloc) {
+ PrintRangeOverview(std::cout);
+ }
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
@@ -2951,16 +3450,180 @@ void LinearScanAllocator::AllocateRegisters() {
}
}
- while (!unhandled_live_ranges().empty()) {
- LiveRange* current = *unhandled_live_ranges().begin();
- unhandled_live_ranges().erase(unhandled_live_ranges().begin());
- LifetimePosition position = current->Start();
+ RpoNumber last_block = RpoNumber::FromInt(0);
+ RpoNumber max_blocks =
+ RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
+ LifetimePosition next_block_boundary =
+ LifetimePosition::InstructionFromInstructionIndex(
+ data()
+ ->code()
+ ->InstructionBlockAt(last_block)
+ ->last_instruction_index())
+ .NextFullStart();
+ SpillMode spill_mode = SpillMode::kSpillAtDefinition;
+
+ // Process all ranges. We also need to ensure that we have seen all block
+ // boundaries. Linear scan might have assigned and spilled ranges before
+ // reaching the last block and hence we would ignore control flow effects for
+ // those. Not only does this produce a potentially bad assignment, it also
+ // breaks with the invariant that we undo spills that happen in deferred code
+ // when crossing a deferred/non-deferred boundary.
+ while (
+ !unhandled_live_ranges().empty() ||
+ (FLAG_turbo_control_flow_aware_allocation && last_block < max_blocks)) {
+ LiveRange* current = unhandled_live_ranges().empty()
+ ? nullptr
+ : *unhandled_live_ranges().begin();
+ LifetimePosition position =
+ current ? current->Start() : next_block_boundary;
#ifdef DEBUG
allocation_finger_ = position;
#endif
+ if (FLAG_turbo_control_flow_aware_allocation) {
+ // Splintering is not supported.
+ CHECK(!FLAG_turbo_preprocess_ranges);
+ // Check whether we just moved across a block boundary. This will trigger
+ // for the first range that is past the current boundary.
+ if (position >= next_block_boundary) {
+ TRACE("Processing boundary at %d leaving %d\n",
+ next_block_boundary.value(), last_block.ToInt());
+
+ // Forward state to before block boundary
+ LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
+ ForwardStateTo(end_of_block);
+
+ // Remember this state.
+ InstructionBlock* current_block = data()->code()->GetInstructionBlock(
+ next_block_boundary.ToInstructionIndex());
+
+ // Store current spill state (as the state at end of block). For
+ // simplicity, we store the active ranges, e.g., the live ranges that
+ // are not spilled.
+ data()->RememberSpillState(last_block, active_live_ranges());
+
+ // Only reset the state if this was not a direct fallthrough. Otherwise
+ // control flow resolution will get confused (it does not expect changes
+ // across fallthrough edges.).
+ bool fallthrough = (current_block->PredecessorCount() == 1) &&
+ current_block->predecessors()[0].IsNext(
+ current_block->rpo_number());
+
+ spill_mode = current_block->IsDeferred()
+ ? SpillMode::kSpillDeferred
+ : SpillMode::kSpillAtDefinition;
+
+ if (!fallthrough) {
+#ifdef DEBUG
+ // Allow allocation at current position.
+ allocation_finger_ = next_block_boundary;
+#endif
+
+ // We are currently at next_block_boundary - 1. Move the state to the
+ // actual block boundary position. In particular, we have to
+ // reactivate inactive ranges so that they get rescheduled for
+ // allocation if they were not live at the predecessors.
+ ForwardStateTo(next_block_boundary);
+
+ RangeWithRegisterSet to_be_live(data()->allocation_zone());
+
+ // If we end up deciding to use the state of the immediate
+ // predecessor, it is better not to perform a change. It would lead to
+ // the same outcome anyway.
+ // This may never happen on boundaries between deferred and
+ // non-deferred code, as we rely on explicit respill to ensure we
+ // spill at definition.
+ bool no_change_required = false;
+
+ auto pick_state_from = [this, current_block](
+ RpoNumber pred,
+ RangeWithRegisterSet* to_be_live) -> bool {
+ TRACE("Using information from B%d\n", pred.ToInt());
+ // If this is a fall-through that is not across a deferred
+ // boundary, there is nothing to do.
+ bool is_noop = pred.IsNext(current_block->rpo_number());
+ if (!is_noop) {
+ auto& spill_state = data()->GetSpillState(pred);
+ TRACE("Not a fallthrough. Adding %zu elements...\n",
+ spill_state.size());
+ for (const auto range : spill_state) {
+ // Filter out ranges that had their register stolen by backwards
+ // working spill heuristics. These have been spilled after the
+ // fact, so ignore them.
+ if (!range->HasRegisterAssigned()) continue;
+ to_be_live->emplace(range);
+ }
+ }
+ return is_noop;
+ };
+
+ // Multiple cases here:
+ // 1) We have a single predecessor => this is a control flow split, so
+ // just restore the predecessor state.
+ // 2) We have two predecessors => this is a conditional, so break ties
+ // based on what to do based on forward uses, trying to benefit
+ // the same branch if in doubt (make one path fast).
+ // 3) We have many predecessors => this is a switch. Compute union
+ // based on majority, break ties by looking forward.
+ if (current_block->PredecessorCount() == 1) {
+ TRACE("Single predecessor for B%d\n",
+ current_block->rpo_number().ToInt());
+ no_change_required =
+ pick_state_from(current_block->predecessors()[0], &to_be_live);
+ } else if (current_block->PredecessorCount() == 2) {
+ TRACE("Two predecessors for B%d\n",
+ current_block->rpo_number().ToInt());
+ // If one of the branches does not contribute any information,
+ // e.g. because it is deferred or a back edge, we can short cut
+ // here right away.
+ RpoNumber chosen_predecessor = RpoNumber::Invalid();
+ if (!ConsiderBlockForControlFlow(
+ current_block, current_block->predecessors()[0])) {
+ chosen_predecessor = current_block->predecessors()[1];
+ } else if (!ConsiderBlockForControlFlow(
+ current_block, current_block->predecessors()[1])) {
+ chosen_predecessor = current_block->predecessors()[0];
+ } else {
+ chosen_predecessor = ChooseOneOfTwoPredecessorStates(
+ current_block, next_block_boundary);
+ }
+ no_change_required =
+ pick_state_from(chosen_predecessor, &to_be_live);
+
+ } else {
+ // Merge at the end of, e.g., a switch.
+ ComputeStateFromManyPredecessors(current_block, &to_be_live);
+ }
+
+ if (!no_change_required) {
+ SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode);
+ ReloadLiveRanges(to_be_live, next_block_boundary);
+ }
+
+ // TODO(herhut) Check removal.
+ // Now forward to current position
+ ForwardStateTo(next_block_boundary);
+ }
+ // Update block information
+ last_block = current_block->rpo_number();
+ next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
+ current_block->last_instruction_index())
+ .NextFullStart();
+
+ // We might have created new unhandled live ranges, so cycle around the
+ // loop to make sure we pick the top most range in unhandled for
+ // processing.
+ continue;
+ }
+ }
+
+ DCHECK_NOT_NULL(current);
+
TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
current->relative_id(), position.value());
+ // Now we can erase current, as we are sure to process it.
+ unhandled_live_ranges().erase(unhandled_live_ranges().begin());
+
if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
continue;
@@ -2968,7 +3631,7 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(!current->HasRegisterAssigned() && !current->spilled());
- ProcessCurrentRange(current);
+ ProcessCurrentRange(current, spill_mode);
}
if (FLAG_trace_alloc) {
@@ -2977,12 +3640,13 @@ void LinearScanAllocator::AllocateRegisters() {
}
bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
+ DCHECK(!FLAG_turbo_control_flow_aware_allocation);
DCHECK(range->TopLevel()->IsSplinter());
// If we can spill the whole range, great. Otherwise, split above the
// first use needing a register and spill the top part.
const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
if (next_reg == nullptr) {
- Spill(range);
+ Spill(range, SpillMode::kSpillAtDefinition);
return true;
} else if (range->FirstHintPosition() == nullptr) {
// If there was no hint, but we have a use position requiring a
@@ -2991,7 +3655,7 @@ bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
} else if (next_reg->pos().PrevStart() > range->Start()) {
LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
AddToUnhandled(tail);
- Spill(range);
+ Spill(range, SpillMode::kSpillAtDefinition);
return true;
}
return false;
@@ -3107,6 +3771,19 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
}
}
+int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
+ DCHECK(start->IsDeferred());
+ RpoNumber last_block =
+ RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
+ while ((start->rpo_number() < last_block)) {
+ InstructionBlock* next =
+ code()->InstructionBlockAt(start->rpo_number().Next());
+ if (!next->IsDeferred()) break;
+ start = next;
+ }
+ return start->last_instruction_index();
+}
+
void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
int* num_regs, int* num_codes,
const int** codes) const {
@@ -3224,16 +3901,18 @@ void LinearScanAllocator::FindFreeRegistersForRange(
//
// - a phi. The same analysis as in the case of the input constraint applies.
//
-void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
+void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
+ SpillMode spill_mode) {
EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
free_until_pos;
FindFreeRegistersForRange(current, free_until_pos);
if (!TryAllocatePreferredReg(current, free_until_pos)) {
if (current->TopLevel()->IsSplinter()) {
+ DCHECK(!FLAG_turbo_control_flow_aware_allocation);
if (TrySplitAndSpillSplinter(current)) return;
}
if (!TryAllocateFreeReg(current, free_until_pos)) {
- AllocateBlockedReg(current);
+ AllocateBlockedReg(current, spill_mode);
}
}
if (current->HasRegisterAssigned()) {
@@ -3244,7 +3923,8 @@ void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
bool LinearScanAllocator::TryAllocatePreferredReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int hint_register;
- if (current->FirstHintPosition(&hint_register) != nullptr ||
+ if (current->RegisterFromControlFlow(&hint_register) ||
+ current->FirstHintPosition(&hint_register) != nullptr ||
current->RegisterFromBundle(&hint_register)) {
TRACE(
"Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
@@ -3286,21 +3966,24 @@ int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
// cloberred after the call except for the argument registers, which are
// set before the call. Hence, the argument registers always get ignored,
// as their available time is shorter.
- int reg = hint_reg == kUnassignedRegister ? codes[0] : hint_reg;
+ int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg;
+ int current_free = -1;
for (int i = 0; i < num_codes; ++i) {
int code = codes[i];
// Prefer registers that have no fixed uses to avoid blocking later hints.
// We use the first register that has no fixed uses to ensure we use
// byte addressable registers in ia32 first.
int candidate_free = free_until_pos[code].ToInstructionIndex();
- int current_free = free_until_pos[reg].ToInstructionIndex();
- if (candidate_free > current_free ||
+ TRACE("Register %s in free until %d\n", RegisterName(code), candidate_free);
+ if ((candidate_free > current_free) ||
(candidate_free == current_free && reg != hint_reg &&
- data()->HasFixedUse(current->representation(), reg) &&
- !data()->HasFixedUse(current->representation(), code))) {
+ (data()->HasFixedUse(current->representation(), reg) &&
+ !data()->HasFixedUse(current->representation(), code)))) {
reg = code;
+ current_free = candidate_free;
}
}
+
return reg;
}
@@ -3308,7 +3991,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
// Compute register hint, if such exists.
int hint_reg = kUnassignedRegister;
- current->FirstHintPosition(&hint_reg) != nullptr ||
+ current->RegisterFromControlFlow(&hint_reg) ||
+ current->FirstHintPosition(&hint_reg) != nullptr ||
current->RegisterFromBundle(&hint_reg);
int reg =
@@ -3341,12 +4025,13 @@ bool LinearScanAllocator::TryAllocateFreeReg(
return true;
}
-void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
+void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
+ SpillMode spill_mode) {
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
- Spill(current);
+ Spill(current, spill_mode);
return;
}
@@ -3441,7 +4126,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// Compute register hint if it exists.
int hint_reg = kUnassignedRegister;
- register_use->HintRegister(&hint_reg) ||
+ current->RegisterFromControlFlow(&hint_reg) ||
+ register_use->HintRegister(&hint_reg) ||
current->RegisterFromBundle(&hint_reg);
int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
@@ -3450,18 +4136,33 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// spill until there. The gap position will then fit the fill move.
if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
register_use->pos())) {
- SpillBetween(current, current->Start(), register_use->pos());
+ SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
return;
}
}
+ // When in deferred spilling mode avoid stealing registers beyond the current
+ // deferred region. This is required as we otherwise might spill an inactive
+ // range with a start outside of deferred code and that would not be reloaded.
+ LifetimePosition new_end = current->End();
+ if (spill_mode == SpillMode::kSpillDeferred) {
+ InstructionBlock* deferred_block =
+ code()->GetInstructionBlock(current->Start().ToInstructionIndex());
+ new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex(
+ LastDeferredInstructionIndex(deferred_block)));
+ }
+
// We couldn't spill until the next register use. Split before the register
// is blocked, if applicable.
- if (block_pos[reg] < current->End()) {
+ if (block_pos[reg] < new_end) {
// Register becomes blocked before the current range end. Split before that
// position.
- LiveRange* tail =
- SplitBetween(current, current->Start(), block_pos[reg].Start());
+ new_end = block_pos[reg].Start();
+ }
+
+ // Split at the new end if we found one.
+ if (new_end != current->End()) {
+ LiveRange* tail = SplitBetween(current, current->Start(), new_end);
AddToUnhandled(tail);
}
@@ -3474,10 +4175,11 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
// at the same lifetime positions as current.
- SplitAndSpillIntersecting(current);
+ SplitAndSpillIntersecting(current, spill_mode);
}
-void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
+ SpillMode spill_mode) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
LifetimePosition split_pos = current->Start();
@@ -3499,9 +4201,13 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+ // TODO(herhut): Be more clever here as long as we do not move split_pos
+ // out of deferred code.
+ LifetimePosition spill_pos = spill_mode == SpillMode::kSpillDeferred
+ ? split_pos
+ : FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
- SpillAfter(range, spill_pos);
+ SpillAfter(range, spill_pos, spill_mode);
} else {
// When spilling between spill_pos and next_pos ensure that the range
// remains spilled at least until the start of the current live range.
@@ -3513,7 +4219,8 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
// current live-range is larger than their end.
DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
next_pos->pos()));
- SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
+ spill_mode);
}
it = ActiveToHandled(it);
}
@@ -3544,10 +4251,10 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == nullptr) {
- SpillAfter(range, split_pos);
+ SpillAfter(range, split_pos, spill_mode);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
+ SpillBetween(range, split_pos, next_intersection, spill_mode);
}
it = InactiveToHandled(it);
} else {
@@ -3598,29 +4305,33 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- Spill(range);
+ Spill(range, SpillMode::kSpillAtDefinition);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
- SpillBetween(range, range->Start(), pos->pos());
+ SpillBetween(range, range->Start(), pos->pos(),
+ SpillMode::kSpillAtDefinition);
return true;
}
return false;
}
-void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos,
+ SpillMode spill_mode) {
LiveRange* second_part = SplitRangeAt(range, pos);
- Spill(second_part);
+ Spill(second_part, spill_mode);
}
void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
- LifetimePosition end) {
- SpillBetweenUntil(range, start, start, end);
+ LifetimePosition end,
+ SpillMode spill_mode) {
+ SpillBetweenUntil(range, start, start, end, spill_mode);
}
void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition start,
LifetimePosition until,
- LifetimePosition end) {
+ LifetimePosition end,
+ SpillMode spill_mode) {
CHECK(start < end);
LiveRange* second_part = SplitRangeAt(range, start);
@@ -3628,17 +4339,34 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
- LifetimePosition third_part_end = end.PrevStart().End();
+
+ // Make sure that the third part always starts after the start of the
+ // second part, as that likely is the current position of the register
+ // allocator and we cannot add ranges to unhandled that start before
+ // the current position.
+ LifetimePosition split_start = Max(second_part->Start().End(), until);
+
+ // If end is an actual use (which it typically is) we have to split
+ // so that there is a gap before so that we have space for moving the
+ // value into its position.
+ // However, if we have no choice, split right where asked.
+ LifetimePosition third_part_end = Max(split_start, end.PrevStart().End());
+ // Instead of spliting right after or even before the block boundary,
+ // split on the boumndary to avoid extra moves.
if (data()->IsBlockBoundary(end.Start())) {
- third_part_end = end.Start();
+ third_part_end = Max(split_start, end.Start());
}
- LiveRange* third_part = SplitBetween(
- second_part, Max(second_part->Start().End(), until), third_part_end);
- DCHECK(third_part != second_part);
+ LiveRange* third_part =
+ SplitBetween(second_part, split_start, third_part_end);
- Spill(second_part);
AddToUnhandled(third_part);
+ // This can happen, even if we checked for start < end above, as we fiddle
+ // with the end location. However, we are guaranteed to be after or at
+ // until, so this is fine.
+ if (third_part != second_part) {
+ Spill(second_part, spill_mode);
+ }
} else {
// The split result does not intersect with [start, end[.
// Nothing to spill. Just put it to unhandled as whole.
@@ -3671,6 +4399,34 @@ void SpillSlotLocator::LocateSpillSlots() {
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
+void OperandAssigner::DecideSpillingMode() {
+ if (FLAG_turbo_control_flow_aware_allocation) {
+ for (auto range : data()->live_ranges()) {
+ int max_blocks = data()->code()->InstructionBlockCount();
+ if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks()) {
+ // If the range is spilled only in deferred blocks and starts in
+ // a non-deferred block, we transition its representation here so
+ // that the LiveRangeConnector processes them correctly. If,
+ // however, they start in a deferred block, we uograde them to
+ // spill at definition, as that definition is in a deferred block
+ // anyway. While this is an optimization, the code in LiveRangeConnector
+ // relies on it!
+ if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
+ TRACE("Live range %d is spilled and alive in deferred code only\n",
+ range->vreg());
+ range->TransitionRangeToSpillAtDefinition();
+ } else {
+ TRACE(
+ "Live range %d is spilled deferred code only but alive outside\n",
+ range->vreg());
+ range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
+ max_blocks);
+ }
+ }
+ }
+ }
+}
+
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
if (range != nullptr && range->get_bundle() != nullptr) {
@@ -3925,6 +4681,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
LifetimePosition block_end =
LifetimePosition::GapFromInstructionIndex(block->code_end());
const LiveRange* current = result.cur_cover_;
+ // TODO(herhut): This is not the successor if we have control flow!
const LiveRange* successor = current->next();
if (current->End() < block_end &&
(successor == nullptr || successor->spilled())) {
@@ -3945,6 +4702,9 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
pred_block->IsDeferred()) {
// The spill location should be defined in pred_block, so add
// pred_block to the list of blocks requiring a spill operand.
+ TRACE("Adding B%d to list of spill blocks for %d\n",
+ pred_block->rpo_number().ToInt(),
+ current->TopLevel()->vreg());
current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
pred_block->rpo_number().ToInt());
}
@@ -4152,6 +4912,8 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
RpoNumber spill_block_number = spill_block->rpo_number();
if (done_moves.find(std::make_pair(
spill_block_number, range->vreg())) == done_moves.end()) {
+ TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
+ spill_block_number.ToInt());
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 6eae9f7682..7e2a197700 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -332,7 +332,26 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
void set_assigned_register(int reg);
void UnsetAssignedRegister();
+ bool ShouldRecombine() const { return RecombineField::decode(bits_); }
+
+ void SetRecombine() { bits_ = RecombineField::update(bits_, true); }
+ void set_controlflow_hint(int reg) {
+ bits_ = ControlFlowRegisterHint::update(bits_, reg);
+ }
+ int controlflow_hint() const {
+ return ControlFlowRegisterHint::decode(bits_);
+ }
+ bool RegisterFromControlFlow(int* reg) {
+ int hint = controlflow_hint();
+ if (hint != kUnassignedRegister) {
+ *reg = hint;
+ return true;
+ }
+ return false;
+ }
bool spilled() const { return SpilledField::decode(bits_); }
+ void AttachToNext();
+ void Unspill();
void Spill();
RegisterKind kind() const;
@@ -448,8 +467,11 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
void VerifyIntervals() const;
typedef BitField<bool, 0, 1> SpilledField;
- typedef BitField<int32_t, 6, 6> AssignedRegisterField;
- typedef BitField<MachineRepresentation, 12, 8> RepresentationField;
+ // Bits (1,7] are used by TopLevelLiveRange.
+ typedef BitField<int32_t, 7, 6> AssignedRegisterField;
+ typedef BitField<MachineRepresentation, 13, 8> RepresentationField;
+ typedef BitField<bool, 21, 1> RecombineField;
+ typedef BitField<uint8_t, 22, 6> ControlFlowRegisterHint;
// Unique among children and splinters of the same virtual register.
int relative_id_;
@@ -555,10 +577,23 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bits_ = IsNonLoopPhiField::update(bits_, value);
}
- bool has_slot_use() const { return HasSlotUseField::decode(bits_); }
- void set_has_slot_use(bool value) {
- bits_ = HasSlotUseField::update(bits_, value);
+ enum SlotUseKind { kNoSlotUse, kDeferredSlotUse, kGeneralSlotUse };
+
+ bool has_slot_use() const {
+ return slot_use_kind() > SlotUseKind::kNoSlotUse;
+ }
+
+ bool has_non_deferred_slot_use() const {
+ return slot_use_kind() == SlotUseKind::kGeneralSlotUse;
+ }
+
+ void reset_slot_use() {
+ bits_ = HasSlotUseField::update(bits_, SlotUseKind::kNoSlotUse);
}
+ void register_slot_use(SlotUseKind value) {
+ bits_ = HasSlotUseField::update(bits_, Max(slot_use_kind(), value));
+ }
+ SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
@@ -580,7 +615,24 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// Spill range management.
void SetSpillRange(SpillRange* spill_range);
- enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
+
+ // Encodes whether a range is also available from a memory localtion:
+ // kNoSpillType: not availble in memory location.
+ // kSpillOperand: computed in a memory location at range start.
+ // kSpillRange: copied (spilled) to memory location at range start.
+ // kDeferredSpillRange: copied (spilled) to memory location at entry
+ // to deferred blocks that have a use from memory.
+ //
+ // Ranges either start out at kSpillOperand, which is also their final
+ // state, or kNoSpillType. When spilled only in deferred code, a range
+ // ends up with kDeferredSpillRange, while when spilled in regular code,
+ // a range will be tagged as kSpillRange.
+ enum class SpillType {
+ kNoSpillType,
+ kSpillOperand,
+ kSpillRange,
+ kDeferredSpillRange
+ };
void set_spill_type(SpillType value) {
bits_ = SpillTypeField::update(bits_, value);
}
@@ -596,7 +648,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
SpillRange* GetSpillRange() const {
- DCHECK_EQ(SpillType::kSpillRange, spill_type());
+ DCHECK_GE(spill_type(), SpillType::kSpillRange);
return spill_range_;
}
bool HasNoSpillType() const {
@@ -605,8 +657,10 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bool HasSpillOperand() const {
return spill_type() == SpillType::kSpillOperand;
}
- bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
-
+ bool HasSpillRange() const { return spill_type() >= SpillType::kSpillRange; }
+ bool HasGeneralSpillRange() const {
+ return spill_type() == SpillType::kSpillRange;
+ }
AllocatedOperand GetSpillRangeOperand() const;
void RecordSpillLocation(Zone* zone, int gap_index,
@@ -628,6 +682,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
+ DCHECK(!FLAG_turbo_control_flow_aware_allocation);
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
@@ -635,9 +690,24 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
new (zone) BitVector(total_block_count, zone);
}
- void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
- const InstructionOperand& spill_operand,
- BitVector* necessary_spill_points);
+ // Updates internal data structures to reflect that this range is not
+ // spilled at definition but instead spilled in some blocks only.
+ void TransitionRangeToDeferredSpill(Zone* zone, int total_block_count) {
+ DCHECK(FLAG_turbo_control_flow_aware_allocation);
+ spill_start_index_ = -1;
+ spill_move_insertion_locations_ = nullptr;
+ list_of_blocks_requiring_spill_operands_ =
+ new (zone) BitVector(total_block_count, zone);
+ }
+
+ // Promotes this range to spill at definition if it was marked for spilling
+ // in deferred blocks before.
+ void TransitionRangeToSpillAtDefinition() {
+ DCHECK_NOT_NULL(spill_move_insertion_locations_);
+ if (spill_type() == SpillType::kDeferredSpillRange) {
+ set_spill_type(SpillType::kSpillRange);
+ }
+ }
TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; }
@@ -654,15 +724,18 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void Verify() const;
void VerifyChildrenInOrder() const;
-
+ LiveRange* GetChildCovers(LifetimePosition pos);
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
}
- int GetChildCount() const { return last_child_id_ + 1; }
+ int GetMaxChildCount() const { return last_child_id_ + 1; }
bool IsSpilledOnlyInDeferredBlocks() const {
+ if (FLAG_turbo_control_flow_aware_allocation) {
+ return spill_type() == SpillType::kDeferredSpillRange;
+ }
return spilled_in_deferred_blocks_;
}
@@ -698,12 +771,13 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
private:
+ friend class LiveRange;
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
- typedef BitField<bool, 1, 1> HasSlotUseField;
- typedef BitField<bool, 2, 1> IsPhiField;
- typedef BitField<bool, 3, 1> IsNonLoopPhiField;
- typedef BitField<SpillType, 4, 2> SpillTypeField;
+ typedef BitField<SlotUseKind, 1, 2> HasSlotUseField;
+ typedef BitField<bool, 3, 1> IsPhiField;
+ typedef BitField<bool, 4, 1> IsNonLoopPhiField;
+ typedef BitField<SpillType, 5, 2> SpillTypeField;
int vreg_;
int last_child_id_;
@@ -724,6 +798,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bool spilled_in_deferred_blocks_;
int spill_start_index_;
UsePosition* last_pos_;
+ LiveRange* last_child_covers_;
TopLevelLiveRange* splinter_;
bool has_preassigned_slot_;
@@ -782,6 +857,9 @@ class SpillRange final : public ZoneObject {
class RegisterAllocationData final : public ZoneObject {
public:
+ // Encodes whether a spill happens in deferred code (kSpillDeferred) or
+ // regular code (kSpillAtDefinition).
+ enum SpillMode { kSpillAtDefinition, kSpillDeferred };
class PhiMapValue : public ZoneObject {
public:
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone);
@@ -871,7 +949,8 @@ class RegisterAllocationData final : public ZoneObject {
TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
- SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
+ SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range,
+ SpillMode spill_mode);
SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
@@ -900,6 +979,18 @@ class RegisterAllocationData final : public ZoneObject {
return preassigned_slot_ranges_;
}
+ void RememberSpillState(RpoNumber block,
+ const ZoneVector<LiveRange*>& state) {
+ spill_state_[block.ToSize()] = state;
+ }
+
+ ZoneVector<LiveRange*>& GetSpillState(RpoNumber block) {
+ auto& result = spill_state_[block.ToSize()];
+ return result;
+ }
+
+ void ResetSpillState() { spill_state_.clear(); }
+
private:
int GetNextLiveRangeId();
@@ -924,6 +1015,7 @@ class RegisterAllocationData final : public ZoneObject {
BitVector* fixed_fp_register_use_;
int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_;
+ ZoneVector<ZoneVector<LiveRange*>> spill_state_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -968,6 +1060,8 @@ class LiveRangeBuilder final : public ZoneObject {
RegisterAllocationData* data);
private:
+ using SpillMode = RegisterAllocationData::SpillMode;
+
RegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
@@ -1017,7 +1111,6 @@ class LiveRangeBuilder final : public ZoneObject {
InstructionOperand* operand) {
Use(block_start, position, operand, nullptr, UsePositionHintType::kNone);
}
-
RegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
@@ -1042,6 +1135,7 @@ class RegisterAllocator : public ZoneObject {
RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
protected:
+ using SpillMode = RegisterAllocationData::SpillMode;
RegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
RegisterKind mode() const { return mode_; }
@@ -1085,7 +1179,7 @@ class RegisterAllocator : public ZoneObject {
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
- void Spill(LiveRange* range);
+ void Spill(LiveRange* range, SpillMode spill_mode);
// If we are trying to spill a range inside the loop try to
// hoist spill position out to the point just before the loop.
@@ -1118,6 +1212,42 @@ class LinearScanAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
+ struct RangeWithRegister {
+ TopLevelLiveRange* range;
+ int expected_register;
+ struct Hash {
+ size_t operator()(const RangeWithRegister item) const {
+ return item.range->vreg();
+ }
+ };
+ struct Equals {
+ bool operator()(const RangeWithRegister one,
+ const RangeWithRegister two) const {
+ return one.range == two.range;
+ }
+ };
+
+ explicit RangeWithRegister(LiveRange* a_range)
+ : range(a_range->TopLevel()),
+ expected_register(a_range->assigned_register()) {}
+ RangeWithRegister(TopLevelLiveRange* toplevel, int reg)
+ : range(toplevel), expected_register(reg) {}
+ };
+
+ using RangeWithRegisterSet =
+ ZoneUnorderedSet<RangeWithRegister, RangeWithRegister::Hash,
+ RangeWithRegister::Equals>;
+
+ void MaybeUndoPreviousSplit(LiveRange* range);
+ void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
+ LifetimePosition position, SpillMode spill_mode);
+ LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
+ void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
+ LifetimePosition position);
+
+ bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
+ const InstructionBlock* block);
+
struct LiveRangeOrdering {
bool operator()(const LiveRange* a, const LiveRange* b) const {
return a->ShouldBeAllocatedBefore(b);
@@ -1147,6 +1277,17 @@ class LinearScanAllocator final : public RegisterAllocator {
void ForwardStateTo(LifetimePosition position);
+ int LastDeferredInstructionIndex(InstructionBlock* start);
+
+ // Helper methods for choosing state after control flow events.
+
+ bool ConsiderBlockForControlFlow(InstructionBlock* current_block,
+ RpoNumber predecessor);
+ RpoNumber ChooseOneOfTwoPredecessorStates(InstructionBlock* current_block,
+ LifetimePosition boundary);
+ void ComputeStateFromManyPredecessors(InstructionBlock* current_block,
+ RangeWithRegisterSet* to_be_live);
+
// Helper methods for allocating registers.
bool TryReuseSpillForPhi(TopLevelLiveRange* range);
int PickRegisterThatIsAvailableLongest(
@@ -1160,23 +1301,23 @@ class LinearScanAllocator final : public RegisterAllocator {
int* num_codes, const int** codes) const;
void FindFreeRegistersForRange(LiveRange* range,
Vector<LifetimePosition> free_until_pos);
- void ProcessCurrentRange(LiveRange* current);
- void AllocateBlockedReg(LiveRange* range);
+ void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
+ void AllocateBlockedReg(LiveRange* range, SpillMode spill_mode);
bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
- void SpillAfter(LiveRange* range, LifetimePosition pos);
+ void SpillAfter(LiveRange* range, LifetimePosition pos, SpillMode spill_mode);
// Spill the given life range after position [start] and up to position [end].
void SpillBetween(LiveRange* range, LifetimePosition start,
- LifetimePosition end);
+ LifetimePosition end, SpillMode spill_mode);
// Spill the given life range after position [start] and up to position [end].
// Range is guaranteed to be spilled at least until position [until].
void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
- LifetimePosition until, LifetimePosition end);
-
- void SplitAndSpillIntersecting(LiveRange* range);
+ LifetimePosition until, LifetimePosition end,
+ SpillMode spill_mode);
+ void SplitAndSpillIntersecting(LiveRange* range, SpillMode spill_mode);
void PrintRangeRow(std::ostream& os, const TopLevelLiveRange* toplevel);
@@ -1216,10 +1357,13 @@ class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(RegisterAllocationData* data);
- // Phase 5: assign spill splots.
+ // Phase 5: final decision on spilling mode.
+ void DecideSpillingMode();
+
+ // Phase 6: assign spill splots.
void AssignSpillSlots();
- // Phase 6: commit assignment.
+ // Phase 7: commit assignment.
void CommitAssignment();
private:
@@ -1234,7 +1378,7 @@ class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(RegisterAllocationData* data);
- // Phase 7: compute values for pointer maps.
+ // Phase 8: compute values for pointer maps.
void PopulateReferenceMaps();
private:
@@ -1259,11 +1403,11 @@ class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(RegisterAllocationData* data);
- // Phase 8: reconnect split ranges with moves, when the control flow
+ // Phase 9: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
void ConnectRanges(Zone* local_zone);
- // Phase 9: insert moves to connect ranges across basic blocks, when the
+ // Phase 10: insert moves to connect ranges across basic blocks, when the
// control flow between them cannot be trivially resolved, such as joining
// branches.
void ResolveControlFlow(Zone* local_zone);
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 757576dd85..cbca355ca8 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -10,6 +10,7 @@
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
@@ -1636,6 +1637,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
break;
+ case kS390_Peek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ LoadFloat32(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ LoadP(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kS390_Abs32:
// TODO(john.yan): zero-ext
__ lpr(i.OutputRegister(0), i.InputRegister(0));
@@ -2066,11 +2085,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
- __ Move(d1, d3);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kS390_Neg32:
__ lcr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
@@ -2639,7 +2656,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
- Label two, unaligned, done;
+ Label two, done;
__ la(r1, MemOperand(base, index));
__ tmll(r1, Operand(3));
__ b(Condition(2), &two);
@@ -3026,8 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -3038,15 +3055,15 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
const RegList saves = call_descriptor->CalleeSavedRegisters();
- if (shrink_slots > 0) {
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (required_slots > 0) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -3056,22 +3073,19 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
Register scratch = r1;
__ LoadP(
scratch,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ LoadP(scratch, MemOperand(scratch));
- __ AddP(scratch, scratch, Operand(shrink_slots * kSystemPointerSize));
+ __ AddP(scratch, scratch, Operand(required_slots * kSystemPointerSize));
__ CmpLogicalP(sp, scratch);
__ bge(&done);
}
- __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Move(cp, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r4);
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
@@ -3084,11 +3098,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved and return slots, which are pushed below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= frame()->GetReturnSlotCount();
- shrink_slots -= (kDoubleSize / kSystemPointerSize) *
- base::bits::CountPopulation(saves_fp);
- __ lay(sp, MemOperand(sp, -shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= frame()->GetReturnSlotCount();
+ required_slots -= (kDoubleSize / kSystemPointerSize) *
+ base::bits::CountPopulation(saves_fp);
+ __ lay(sp, MemOperand(sp, -required_slots * kSystemPointerSize));
}
// Save callee-saved Double registers.
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 1f9408ee47..80039d06eb 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -12,6 +12,7 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_Peek) \
V(S390_Abs32) \
V(S390_Abs64) \
V(S390_And32) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 052784ca79..d8f2d83c4f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -152,6 +152,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse16:
case kS390_LoadReverse32:
case kS390_LoadReverse64:
+ case kS390_Peek:
return kIsLoadOperation;
case kS390_StoreWord8:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index b0afed0bd1..7b121e0c58 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -37,7 +37,7 @@ enum class OperandMode : uint32_t {
};
typedef base::Flags<OperandMode, uint32_t> OperandModes;
-DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
+DEFINE_OPERATORS_FOR_FLAGS(OperandModes)
OperandModes immediateModeMask =
OperandMode::kShift32Imm | OperandMode::kShift64Imm |
OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
@@ -562,7 +562,7 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
FlagsContinuation cont; \
Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
}
-VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS);
+VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS)
#undef DECLARE_VISIT_HELPER_FUNCTIONS
#undef VISIT_OP_LIST_32
#undef VISIT_OP_LIST
@@ -1544,10 +1544,10 @@ static inline bool TryMatchDoubleConstructFromInsert(
Visit##type##BinOp(this, node, op, mode); \
}
-WORD32_BIN_OP_LIST(DECLARE_BIN_OP);
-WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP);
-FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP);
-FLOAT_BIN_OP_LIST(DECLARE_BIN_OP);
+WORD32_BIN_OP_LIST(DECLARE_BIN_OP)
+WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP)
+FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP)
+FLOAT_BIN_OP_LIST(DECLARE_BIN_OP)
#if V8_TARGET_ARCH_S390X
WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
@@ -2632,7 +2632,24 @@ void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
- // TODO(John): Port.
+ S390OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kS390_Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
}
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index bcb37e1b46..d6eb940595 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
- __ subp(rsp, Immediate(kDoubleSize));
+ __ subq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
@@ -214,7 +214,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}
__ movl(result_, MemOperand(rsp, 0));
- __ addp(rsp, Immediate(kDoubleSize));
+ __ addq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
}
@@ -250,7 +250,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
- __ leap(scratch1_, operand_);
+ __ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
@@ -592,7 +592,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
- __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+ __ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
@@ -708,8 +708,8 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
- __ cmpp(kJavaScriptCallCodeStartRegister, rbx);
- __ movp(rbx, Immediate(-1));
+ __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
+ __ movq(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
}
@@ -1015,7 +1015,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = new (zone())
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
- __ movp(operand, value);
+ __ StoreTaggedField(operand, value);
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1084,13 +1084,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
- case kIeee754Float64Pow: {
- // TODO(bmeurer): Improve integration of the stub.
- __ Movsd(xmm2, xmm0);
- __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
- __ Movsd(xmm0, xmm3);
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
break;
- }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
@@ -1396,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat32Max: {
- Label compare_nan, compare_swap, done_compare;
+ Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1451,7 +1447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat64Max: {
- Label compare_nan, compare_swap, done_compare;
+ Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1931,21 +1927,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
- __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand(),
- DEBUG_BOOL ? i.TempRegister(0) : no_reg);
+ __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
- __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand(),
- DEBUG_BOOL ? i.TempRegister(0) : no_reg);
+ __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand(),
- i.TempRegister(0),
- DEBUG_BOOL ? i.TempRegister(1) : no_reg);
+ i.TempRegister(0));
+ break;
+ }
+ case kX64MovqCompressTagged: {
+ CHECK(!instr->HasOutput());
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ StoreTaggedField(operand, i.InputImmediate(index));
+ } else {
+ __ StoreTaggedField(operand, i.InputRegister(index));
+ }
break;
}
case kX64Movq:
@@ -3674,8 +3678,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int shrink_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3687,16 +3691,16 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
+ required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (shrink_slots > 0) {
+ if (required_slots > 0) {
DCHECK(frame_access_state()->has_frame());
- if (info()->IsWasm() && shrink_slots > 128) {
+ if (info()->IsWasm() && required_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
// overflow check before we construct the frame. Otherwise we may not
// have enough space on the stack to call the runtime for the stack
@@ -3706,20 +3710,19 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
__ movq(kScratchRegister,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ addq(kScratchRegister, Immediate(shrink_slots * kSystemPointerSize));
+ __ addq(kScratchRegister,
+ Immediate(required_slots * kSystemPointerSize));
__ cmpq(rsp, kScratchRegister);
__ j(above_equal, &done);
}
- __ LoadTaggedPointerField(
- rcx, FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Move(rsi, Smi::zero());
- __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, rcx);
+
+ __ near_call(wasm::WasmCode::kWasmStackOverflow,
+ RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
@@ -3728,12 +3731,12 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Skip callee-saved and return slots, which are created below.
- shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= base::bits::CountPopulation(saves_fp) *
- (kQuadWordSize / kSystemPointerSize);
- shrink_slots -= frame()->GetReturnSlotCount();
- if (shrink_slots > 0) {
- __ subq(rsp, Immediate(shrink_slots * kSystemPointerSize));
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fp) *
+ (kQuadWordSize / kSystemPointerSize);
+ required_slots -= frame()->GetReturnSlotCount();
+ if (required_slots > 0) {
+ __ subq(rsp, Immediate(required_slots * kSystemPointerSize));
}
}
@@ -3741,7 +3744,7 @@ void CodeGenerator::AssembleConstructFrame() {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
- __ subp(rsp, Immediate(stack_size));
+ __ subq(rsp, Immediate(stack_size));
// Store the registers on the stack.
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -3793,7 +3796,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
slot_idx++;
}
// Adjust the stack pointer.
- __ addp(rsp, Immediate(stack_size));
+ __ addq(rsp, Immediate(stack_size));
}
unwinding_info_writer_.MarkBlockWillExit();
@@ -3844,7 +3847,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32: {
if (RelocInfo::IsWasmReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
+ __ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
int32_t value = src.ToInt32();
if (value == 0) {
@@ -3857,7 +3860,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kInt64:
if (RelocInfo::IsWasmReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
+ __ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
__ Set(dst, src.ToInt64());
}
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 59f9a45ecf..fe4cc6bcdf 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -135,6 +135,7 @@ namespace compiler {
V(X64MovqDecompressTaggedSigned) \
V(X64MovqDecompressTaggedPointer) \
V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 2764a44078..1f7c4bce10 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -306,6 +306,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64MovqDecompressTaggedSigned:
case kX64MovqDecompressTaggedPointer:
case kX64MovqDecompressAnyTagged:
+ case kX64MovqCompressTagged:
case kX64Movq:
case kX64Movsd:
case kX64Movss:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 3f5fe12051..098b35f1b5 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -69,7 +69,10 @@ class X64OperandGenerator final : public OperandGenerator {
case kX64Push:
case kX64Cmp:
case kX64Test:
- return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+ // When pointer compression is enabled 64-bit memory operands can't be
+ // used for tagged values.
+ return rep == MachineRepresentation::kWord64 ||
+ (!COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
case kX64And32:
case kX64Or32:
case kX64Xor32:
@@ -77,7 +80,10 @@ class X64OperandGenerator final : public OperandGenerator {
case kX64Sub32:
case kX64Cmp32:
case kX64Test32:
- return rep == MachineRepresentation::kWord32;
+ // When pointer compression is enabled 32-bit memory operands can be
+ // used for tagged values.
+ return rep == MachineRepresentation::kWord32 ||
+ (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
case kX64Cmp16:
case kX64Test16:
return rep == MachineRepresentation::kWord16;
@@ -280,6 +286,9 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#ifdef V8_COMPRESS_POINTERS
+ return kX64MovqCompressTagged;
+#endif
case MachineRepresentation::kWord64:
return kX64Movq;
break;
@@ -319,19 +328,10 @@ void InstructionSelector::VisitLoad(Node* node) {
ArchOpcode opcode = GetLoadOpcode(load_rep);
size_t temp_count = 0;
- InstructionOperand temps[2];
-#ifdef V8_COMPRESS_POINTERS
- if (opcode == kX64MovqDecompressAnyTagged) {
- temps[temp_count++] = g.TempRegister();
- }
-#ifdef DEBUG
- if (opcode == kX64MovqDecompressTaggedSigned ||
- opcode == kX64MovqDecompressTaggedPointer ||
- opcode == kX64MovqDecompressAnyTagged) {
+ InstructionOperand temps[1];
+ if (COMPRESS_POINTERS_BOOL && opcode == kX64MovqDecompressAnyTagged) {
temps[temp_count++] = g.TempRegister();
}
-#endif // DEBUG
-#endif // V8_COMPRESS_POINTERS
DCHECK_LE(temp_count, arraysize(temps));
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
@@ -1692,6 +1692,15 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
return kX64Cmp16;
}
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ // When pointer compression is enabled the lower 32-bits uniquely
+ // identify tagged value.
+ if (opcode == kX64Cmp) return kX64Cmp32;
+ break;
+#endif
default:
break;
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 53868038e7..8789f683f8 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -18,6 +18,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/smi.h"
+#include "src/objects/template-objects-inl.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -550,6 +551,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
state_values_cache_(jsgraph),
source_positions_(source_positions),
start_position_(shared_info->StartPosition(), inlining_id),
+ shared_info_(shared_info),
native_context_(native_context) {}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
@@ -1696,8 +1698,8 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
// It's not observable when the template object is created, so we
// can just create it eagerly during graph building and bake in
// the JSArray constant here.
- cached_value =
- TemplateObjectDescription::CreateTemplateObject(isolate(), description);
+ cached_value = TemplateObjectDescription::GetTemplateObject(
+ isolate(), native_context(), description, shared_info(), slot.ToInt());
nexus.vector()->Set(slot, *cached_value);
} else {
cached_value =
@@ -2168,7 +2170,7 @@ void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
bytecode_iterator().current_offset()));
Node* node;
const Operator* op = javascript()->CallRuntime(runtime_id);
- if (runtime_id == Runtime::kThrowReferenceError) {
+ if (runtime_id == Runtime::kThrowAccessedUninitializedVariable) {
DCHECK_NOT_NULL(name);
node = NewNode(op, name);
} else {
@@ -2190,7 +2192,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
jsgraph()->TheHoleConstant());
Node* name = jsgraph()->Constant(
handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
- BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowReferenceError, name);
+ BuildHoleCheckAndThrow(check_for_hole,
+ Runtime::kThrowAccessedUninitializedVariable, name);
}
void BytecodeGraphBuilder::VisitThrowSuperNotCalledIfHole() {
@@ -2539,7 +2542,9 @@ void BytecodeGraphBuilder::VisitTestIn() {
Node* object = environment()->LookupAccumulator();
Node* key =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node = NewNode(javascript()->HasProperty(), object, key);
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
+ Node* node = NewNode(javascript()->HasProperty(feedback), object, key);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2804,7 +2809,7 @@ void BytecodeGraphBuilder::VisitDebugger() {
// We cannot create a graph from the debugger copy of the bytecode array.
#define DEBUG_BREAK(Name, ...) \
void BytecodeGraphBuilder::Visit##Name() { UNREACHABLE(); }
-DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
#undef DEBUG_BREAK
void BytecodeGraphBuilder::VisitIncBlockCounter() {
@@ -3481,7 +3486,9 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = EnsureInputBufferSize(input_count_with_deps);
- memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count);
+ if (value_input_count > 0) {
+ memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count);
+ }
Node** current_input = buffer + value_input_count;
if (has_context) {
*current_input++ = OperatorProperties::NeedsExactContext(op)
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index eaa43c6816..99d9b64766 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -371,6 +371,8 @@ class BytecodeGraphBuilder {
needs_eager_checkpoint_ = value;
}
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+
Handle<Context> native_context() const { return native_context_; }
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
@@ -432,6 +434,8 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_;
+ Handle<SharedFunctionInfo> const shared_info_;
+
// The native context for which we optimize.
Handle<Context> const native_context_;
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4b1c211084..3a75c09327 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -951,6 +951,18 @@ Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset,
return raw_assembler()->Load(rep, base, offset, needs_poisoning);
}
+Node* CodeAssembler::LoadFullTagged(Node* base,
+ LoadSensitivity needs_poisoning) {
+ return BitcastWordToTagged(
+ Load(MachineType::Pointer(), base, needs_poisoning));
+}
+
+Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset,
+ LoadSensitivity needs_poisoning) {
+ return BitcastWordToTagged(
+ Load(MachineType::Pointer(), base, offset, needs_poisoning));
+}
+
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
return raw_assembler()->AtomicLoad(rep, base, offset);
}
@@ -972,7 +984,7 @@ TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
ExternalConstant(ExternalReference::isolate_root(isolate()));
int offset = IsolateData::root_slot_offset(root_index);
return UncheckedCast<Object>(
- Load(MachineType::AnyTagged(), isolate_root, IntPtrConstant(offset)));
+ LoadFullTagged(isolate_root, IntPtrConstant(offset)));
}
Node* CodeAssembler::Store(Node* base, Node* value) {
@@ -1007,6 +1019,18 @@ Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
+Node* CodeAssembler::StoreFullTaggedNoWriteBarrier(Node* base,
+ Node* tagged_value) {
+ return StoreNoWriteBarrier(MachineType::PointerRepresentation(), base,
+ BitcastTaggedToWord(tagged_value));
+}
+
+Node* CodeAssembler::StoreFullTaggedNoWriteBarrier(Node* base, Node* offset,
+ Node* tagged_value) {
+ return StoreNoWriteBarrier(MachineType::PointerRepresentation(), base, offset,
+ BitcastTaggedToWord(tagged_value));
+}
+
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
Node* offset, Node* value, Node* value_high) {
return raw_assembler()->AtomicStore(rep, base, offset, value, value_high);
@@ -1019,12 +1043,12 @@ Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
return raw_assembler()->Atomic##name(type, base, offset, value, \
value_high); \
}
-ATOMIC_FUNCTION(Exchange);
-ATOMIC_FUNCTION(Add);
-ATOMIC_FUNCTION(Sub);
-ATOMIC_FUNCTION(And);
-ATOMIC_FUNCTION(Or);
-ATOMIC_FUNCTION(Xor);
+ATOMIC_FUNCTION(Exchange)
+ATOMIC_FUNCTION(Add)
+ATOMIC_FUNCTION(Sub)
+ATOMIC_FUNCTION(And)
+ATOMIC_FUNCTION(Or)
+ATOMIC_FUNCTION(Xor)
#undef ATOMIC_FUNCTION
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
@@ -1041,8 +1065,8 @@ Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
Node* isolate_root =
ExternalConstant(ExternalReference::isolate_root(isolate()));
int offset = IsolateData::root_slot_offset(root_index);
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, isolate_root,
- IntPtrConstant(offset), value);
+ return StoreFullTaggedNoWriteBarrier(isolate_root, IntPtrConstant(offset),
+ value);
}
Node* CodeAssembler::Retain(Node* value) {
@@ -1738,7 +1762,9 @@ void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
<< "\n# previous: " << *label_->block();
FATAL("%s", str.str().c_str());
}
- state_->raw_assembler_->SetSourcePosition(debug_info.file, debug_info.line);
+ if (FLAG_enable_source_at_csa_bind) {
+ state_->raw_assembler_->SetSourcePosition(debug_info.file, debug_info.line);
+ }
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
}
@@ -1820,6 +1846,8 @@ void CodeAssemblerParameterizedLabelBase::AddInputs(std::vector<Node*> inputs) {
if (!phi_nodes_.empty()) {
DCHECK_EQ(inputs.size(), phi_nodes_.size());
for (size_t i = 0; i < inputs.size(); ++i) {
+ // We use {nullptr} as a sentinel for an uninitialized value.
+ if (phi_nodes_[i] == nullptr) continue;
state_->raw_assembler_->AppendPhiInput(phi_nodes_[i], inputs[i]);
}
} else {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 4f63ea3198..5d5cb5e00d 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -22,6 +22,7 @@
#include "src/objects/data-handler.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer.h"
+#include "src/objects/js-collection.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
#include "src/objects/oddball.h"
@@ -40,6 +41,7 @@ class BigInt;
class CallInterfaceDescriptor;
class Callable;
class Factory;
+class FinalizationGroupCleanupJobTask;
class InterpreterData;
class Isolate;
class JSAsyncFunctionObject;
@@ -56,10 +58,9 @@ class JSRelativeTimeFormat;
class JSSegmentIterator;
class JSSegmenter;
class JSV8BreakIterator;
-class JSWeakCell;
class JSWeakCollection;
-class JSWeakFactory;
-class JSWeakFactoryCleanupIterator;
+class JSFinalizationGroup;
+class JSFinalizationGroupCleanupIterator;
class JSWeakMap;
class JSWeakRef;
class JSWeakSet;
@@ -70,7 +71,7 @@ class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
-class WeakFactoryCleanupJobTask;
+class WeakCell;
class Zone;
template <typename T>
@@ -272,6 +273,16 @@ enum class ObjectType {
#undef ENUM_ELEMENT
#undef ENUM_STRUCT_ELEMENT
+enum class CheckBounds { kAlways, kDebugOnly };
+inline bool NeedsBoundsCheck(CheckBounds check_bounds) {
+ switch (check_bounds) {
+ case CheckBounds::kAlways:
+ return true;
+ case CheckBounds::kDebugOnly:
+ return DEBUG_BOOL;
+ }
+}
+
class AccessCheckNeeded;
class BigIntWrapper;
class ClassBoilerplate;
@@ -917,6 +928,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* Load(MachineType rep, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
+ // Load uncompressed tagged value from (most likely off JS heap) memory
+ // location.
+ Node* LoadFullTagged(
+ Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* LoadFullTagged(
+ Node* base, Node* offset,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
// Load a value from the root array.
TNode<Object> LoadRoot(RootIndex root_index);
@@ -927,6 +945,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
+ // Stores uncompressed tagged value to (most likely off JS heap) memory
+ // location without write barrier.
+ Node* StoreFullTaggedNoWriteBarrier(Node* base, Node* tagged_value);
+ Node* StoreFullTaggedNoWriteBarrier(Node* base, Node* offset,
+ Node* tagged_value);
+
// Optimized memory operations that map to Turbofan simplified nodes.
TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
PretenureFlag pretenure);
@@ -1066,6 +1090,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return Unsigned(
IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<RawPtrT> RawPtrAdd(TNode<RawPtrT> left, TNode<IntPtrT> right) {
+ return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
+ }
+ TNode<RawPtrT> RawPtrAdd(TNode<IntPtrT> left, TNode<RawPtrT> right) {
+ return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
+ }
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
@@ -1275,16 +1305,22 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
template <class... TArgs>
- Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
- TArgs... args) {
+ Node* ConstructJSWithTarget(Callable const& callable, Node* context,
+ Node* target, Node* new_target, TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
Node* arity = Int32Constant(argc);
Node* receiver = LoadRoot(RootIndex::kUndefinedValue);
// Construct(target, new_target, arity, receiver, arguments...)
- return CallStub(callable, context, new_target, new_target, arity, receiver,
+ return CallStub(callable, context, target, new_target, arity, receiver,
args...);
}
+ template <class... TArgs>
+ Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
+ TArgs... args) {
+ return ConstructJSWithTarget(callable, context, new_target, new_target,
+ args...);
+ }
Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
Node* const* inputs);
@@ -1553,6 +1589,10 @@ class CodeAssemblerLabel {
std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>,
CodeAssemblerVariable::ImplComparator>
variable_merges_;
+
+ // Cannot be copied because the destructor explicitly call the destructor of
+ // the underlying {RawMachineLabel}, hence only one pointer can point to it.
+ DISALLOW_COPY_AND_ASSIGN(CodeAssemblerLabel);
};
class CodeAssemblerParameterizedLabelBase {
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 2421d7d43b..f9d05ef851 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -883,7 +883,7 @@ struct CommonOperatorGlobalCache final {
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CommonOperatorGlobalCache,
- GetCommonOperatorGlobalCache);
+ GetCommonOperatorGlobalCache)
}
CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 4b3e684c51..efcce684b8 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -6,13 +6,15 @@
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
- : zone_(zone), dependencies_(zone), isolate_(isolate) {}
+CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
+ Zone* zone)
+ : zone_(zone), broker_(broker), dependencies_(zone) {}
class CompilationDependencies::Dependency : public ZoneObject {
public:
@@ -164,24 +166,17 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
FieldTypeDependency(const MapRef& owner, int descriptor,
- const ObjectRef& type, PropertyConstness constness)
- : owner_(owner),
- descriptor_(descriptor),
- type_(type),
- constness_(constness) {
+ const ObjectRef& type)
+ : owner_(owner), descriptor_(descriptor), type_(type) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
DCHECK(type_.equals(owner_.GetFieldType(descriptor_)));
- DCHECK_EQ(constness_, owner_.GetPropertyDetails(descriptor_).constness());
}
bool IsValid() const override {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
Handle<Object> type = type_.object();
- return *type == owner->instance_descriptors()->GetFieldType(descriptor_) &&
- constness_ == owner->instance_descriptors()
- ->GetDetails(descriptor_)
- .constness();
+ return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
}
void Install(const MaybeObjectHandle& code) override {
@@ -194,7 +189,34 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
MapRef owner_;
int descriptor_;
ObjectRef type_;
- PropertyConstness constness_;
+};
+
+class FieldConstnessDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ FieldConstnessDependency(const MapRef& owner, int descriptor)
+ : owner_(owner), descriptor_(descriptor) {
+ DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
+ DCHECK_EQ(PropertyConstness::kConst,
+ owner_.GetPropertyDetails(descriptor_).constness());
+ }
+
+ bool IsValid() const override {
+ DisallowHeapAllocation no_heap_allocation;
+ Handle<Map> owner = owner_.object();
+ return PropertyConstness::kConst ==
+ owner->instance_descriptors()->GetDetails(descriptor_).constness();
+ }
+
+ void Install(const MaybeObjectHandle& code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
+ DependentCode::kFieldOwnerGroup);
+ }
+
+ private:
+ MapRef owner_;
+ int descriptor_;
};
class GlobalPropertyDependency final
@@ -361,15 +383,37 @@ PretenureFlag CompilationDependencies::DependOnPretenureMode(
return mode;
}
+PropertyConstness CompilationDependencies::DependOnFieldConstness(
+ const MapRef& map, int descriptor) {
+ MapRef owner = map.FindFieldOwner(descriptor);
+ PropertyConstness constness =
+ owner.GetPropertyDetails(descriptor).constness();
+ if (constness == PropertyConstness::kMutable) return constness;
+
+ // If the map can have fast elements transitions, then the field can be only
+ // considered constant if the map does not transition.
+ if (Map::CanHaveFastTransitionableElementsKind(map.instance_type())) {
+ // If the map can already transition away, let us report the field as
+ // mutable.
+ if (!map.is_stable()) {
+ return PropertyConstness::kMutable;
+ }
+ DependOnStableMap(map);
+ }
+
+ DCHECK_EQ(constness, PropertyConstness::kConst);
+ dependencies_.push_front(new (zone_)
+ FieldConstnessDependency(owner, descriptor));
+ return PropertyConstness::kConst;
+}
+
void CompilationDependencies::DependOnFieldType(const MapRef& map,
int descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
ObjectRef type = owner.GetFieldType(descriptor);
- PropertyConstness constness =
- owner.GetPropertyDetails(descriptor).constness();
DCHECK(type.equals(map.GetFieldType(descriptor)));
- dependencies_.push_front(
- new (zone_) FieldTypeDependency(owner, descriptor, type, constness));
+ dependencies_.push_front(new (zone_)
+ FieldTypeDependency(owner, descriptor, type));
}
void CompilationDependencies::DependOnGlobalProperty(
@@ -380,8 +424,46 @@ void CompilationDependencies::DependOnGlobalProperty(
GlobalPropertyDependency(cell, type, read_only));
}
-void CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
+bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
+ if (cell.value().AsSmi() != Isolate::kProtectorValid) return false;
dependencies_.push_front(new (zone_) ProtectorDependency(cell));
+ return true;
+}
+
+bool CompilationDependencies::DependOnArrayBufferDetachingProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_,
+ broker_->isolate()->factory()->array_buffer_detaching_protector()));
+}
+
+bool CompilationDependencies::DependOnArrayIteratorProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->array_iterator_protector()));
+}
+
+bool CompilationDependencies::DependOnArraySpeciesProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->array_species_protector()));
+}
+
+bool CompilationDependencies::DependOnNoElementsProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->no_elements_protector()));
+}
+
+bool CompilationDependencies::DependOnPromiseHookProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->promise_hook_protector()));
+}
+
+bool CompilationDependencies::DependOnPromiseSpeciesProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->promise_species_protector()));
+}
+
+bool CompilationDependencies::DependOnPromiseThenProtector() {
+ return DependOnProtector(PropertyCellRef(
+ broker_, broker_->isolate()->factory()->promise_then_protector()));
}
void CompilationDependencies::DependOnElementsKind(
@@ -431,7 +513,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// these cases, because once the code gets executed it will do a stack check
// that triggers its deoptimization.
if (FLAG_stress_gc_during_compilation) {
- isolate_->heap()->PreciseCollectAllGarbage(
+ broker_->isolate()->heap()->PreciseCollectAllGarbage(
Heap::kNoGCFlags, GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
}
@@ -447,8 +529,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
namespace {
// This function expects to never see a JSProxy.
-void DependOnStablePrototypeChain(JSHeapBroker* broker,
- CompilationDependencies* deps, MapRef map,
+void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
const JSObjectRef& last_prototype) {
while (true) {
map.SerializePrototype();
@@ -461,19 +542,18 @@ void DependOnStablePrototypeChain(JSHeapBroker* broker,
} // namespace
void CompilationDependencies::DependOnStablePrototypeChains(
- JSHeapBroker* broker, std::vector<Handle<Map>> const& receiver_maps,
- const JSObjectRef& holder) {
+ std::vector<Handle<Map>> const& receiver_maps, const JSObjectRef& holder) {
// Determine actual holder and perform prototype chain checks.
for (auto map : receiver_maps) {
- MapRef receiver_map(broker, map);
+ MapRef receiver_map(broker_, map);
if (receiver_map.IsPrimitiveMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
base::Optional<JSFunctionRef> constructor =
- broker->native_context().GetConstructorFunction(receiver_map);
+ broker_->native_context().GetConstructorFunction(receiver_map);
if (constructor.has_value()) receiver_map = constructor->initial_map();
}
- DependOnStablePrototypeChain(broker, this, receiver_map, holder);
+ DependOnStablePrototypeChain(this, receiver_map, holder);
}
}
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 1a6760f867..f4fcb58c34 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -28,7 +28,7 @@ class SlackTrackingPrediction {
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
- CompilationDependencies(Isolate* isolate, Zone* zone);
+ CompilationDependencies(JSHeapBroker* broker, Zone* zone);
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
@@ -55,12 +55,31 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// field is identified by the arguments.
void DependOnFieldType(const MapRef& map, int descriptor);
+ // Return a field's constness and, if kConst, record the assumption that it
+ // remains kConst. The field is identified by the arguments.
+ //
+ // For arrays, arguments objects and value wrappers, only consider the field
+ // kConst if the map is stable (and register stability dependency in that
+ // case). This is to ensure that fast elements kind transitions cannot be
+ // used to mutate fields without deoptimization of the dependent code.
+ PropertyConstness DependOnFieldConstness(const MapRef& map, int descriptor);
+
// Record the assumption that neither {cell}'s {CellType} changes, nor the
// {IsReadOnly()} flag of {cell}'s {PropertyDetails}.
void DependOnGlobalProperty(const PropertyCellRef& cell);
- // Record the assumption that the protector remains valid.
- void DependOnProtector(const PropertyCellRef& cell);
+ // Return the validity of the given protector and, if true, record the
+ // assumption that the protector remains valid.
+ bool DependOnProtector(const PropertyCellRef& cell);
+
+ // Convenience wrappers around {DependOnProtector}.
+ bool DependOnArrayBufferDetachingProtector();
+ bool DependOnArrayIteratorProtector();
+ bool DependOnArraySpeciesProtector();
+ bool DependOnNoElementsProtector();
+ bool DependOnPromiseHookProtector();
+ bool DependOnPromiseSpeciesProtector();
+ bool DependOnPromiseThenProtector();
// Record the assumption that {site}'s {ElementsKind} doesn't change.
void DependOnElementsKind(const AllocationSiteRef& site);
@@ -68,8 +87,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Depend on the stability of (the maps of) all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
void DependOnStablePrototypeChains(
- JSHeapBroker* broker, std::vector<Handle<Map>> const& receiver_maps,
- const JSObjectRef& holder);
+ std::vector<Handle<Map>> const& receiver_maps, const JSObjectRef& holder);
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
@@ -89,9 +107,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
class Dependency;
private:
- Zone* zone_;
+ Zone* const zone_;
+ JSHeapBroker* const broker_;
ZoneForwardList<Dependency*> dependencies_;
- Isolate* isolate_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 02e5d10574..fb850093c8 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -963,6 +963,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTransitionElementsKind:
LowerTransitionElementsKind(node);
break;
+ case IrOpcode::kLoadMessage:
+ result = LowerLoadMessage(node);
+ break;
+ case IrOpcode::kStoreMessage:
+ LowerStoreMessage(node);
+ break;
case IrOpcode::kLoadFieldByIndex:
result = LowerLoadFieldByIndex(node);
break;
@@ -972,6 +978,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kLoadDataViewElement:
result = LowerLoadDataViewElement(node);
break;
+ case IrOpcode::kLoadStackArgument:
+ result = LowerLoadStackArgument(node);
+ break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
@@ -1538,7 +1547,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
}
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
- ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
size_t const map_count = maps.size();
Node* value = node->InputAt(0);
@@ -2055,11 +2064,30 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
- const CheckParameters& params = CheckParametersOf(node->op());
+ const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
Node* check = __ Uint32LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ switch (params.mode()) {
+ case CheckBoundsParameters::kDeoptOnOutOfBounds:
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
+ params.check_parameters().feedback(), check,
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ break;
+ case CheckBoundsParameters::kAbortOnOutOfBounds: {
+ auto if_abort = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
+
+ __ Branch(check, &done, &if_abort);
+
+ __ Bind(&if_abort);
+ __ Unreachable();
+ __ Goto(&done);
+
+ __ Bind(&done);
+ break;
+ }
+ }
+
return index;
}
@@ -4103,6 +4131,20 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
__ Bind(&done);
}
+Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object_pattern =
+ __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
+ return __ BitcastWordToTagged(object_pattern);
+}
+
+void EffectControlLinearizer::LowerStoreMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object = node->InputAt(1);
+ Node* object_pattern = __ BitcastTaggedToWord(object);
+ __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
+}
+
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -4302,6 +4344,16 @@ Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Node* argument =
+ __ LoadElement(AccessBuilder::ForStackArgument(), base, index);
+
+ return __ BitcastWordToTagged(argument);
+}
+
void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
@@ -4559,7 +4611,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
Node* float_value =
__ LoadField(AccessBuilder::ForHeapNumberValue(), value);
__ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
- index, float_value);
+ index, __ Float64SilenceNaN(float_value));
__ Goto(&done);
}
}
@@ -4625,7 +4677,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
__ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements, index,
- value);
+ __ Float64SilenceNaN(value));
}
void EffectControlLinearizer::LowerTransitionAndStoreNonNumberElement(
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index af3cba6083..cd7f04b41d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -163,8 +163,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
+ Node* LowerLoadMessage(Node* node);
Node* LowerLoadTypedElement(Node* node);
Node* LowerLoadDataViewElement(Node* node);
+ Node* LowerLoadStackArgument(Node* node);
+ void LowerStoreMessage(Node* node);
void LowerStoreTypedElement(Node* node);
void LowerStoreDataViewElement(Node* node);
void LowerStoreSignedSmallElement(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index ffe49d1e67..688460abee 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -313,18 +313,6 @@ void EscapeAnalysisReducer::Finalize() {
NodeProperties::SetType(arguments_elements_state, Type::OtherInternal());
ReplaceWithValue(node, arguments_elements_state);
- ElementAccess stack_access;
- stack_access.base_is_tagged = BaseTaggedness::kUntaggedBase;
- // Reduce base address by {kSystemPointerSize} such that (length - index)
- // resolves to the right position.
- stack_access.header_size =
- CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize;
- stack_access.type = Type::NonInternal();
- stack_access.machine_type = MachineType::AnyTagged();
- stack_access.write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
- const Operator* load_stack_op =
- jsgraph()->simplified()->LoadElement(stack_access);
-
for (Node* load : loads) {
switch (load->opcode()) {
case IrOpcode::kLoadElement: {
@@ -338,7 +326,8 @@ void EscapeAnalysisReducer::Finalize() {
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
- NodeProperties::ChangeOp(load, load_stack_op);
+ NodeProperties::ChangeOp(
+ load, jsgraph()->simplified()->LoadStackArgument());
break;
}
case IrOpcode::kLoadField: {
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 8b7c599891..5ee6e7de4f 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -754,7 +754,7 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
current->Get(map_field).To(&object_map)) {
if (object_map) {
current->SetReplacement(LowerCompareMapsWithoutLoad(
- object_map, CompareMapsParametersOf(op).maps(), jsgraph));
+ object_map, CompareMapsParametersOf(op), jsgraph));
break;
} else {
// If the variable has no value, we have not reached the fixed-point
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 45392c068b..c8e4517f66 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -39,7 +39,8 @@ namespace compiler {
V(BitcastFloat64ToInt64) \
V(Float64Abs) \
V(Word32ReverseBytes) \
- V(Word64ReverseBytes)
+ V(Word64ReverseBytes) \
+ V(Float64SilenceNaN)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 76c5313329..36e837f563 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -26,6 +26,7 @@
#include "src/optimized-compilation-info.h"
#include "src/ostreams.h"
#include "src/source-position.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d16e38a458..4bc55c3abe 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -944,9 +944,10 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
+ // TODO(magardn): collect feedback so this can be optimized
vtrue = etrue = if_true =
- graph()->NewNode(javascript()->HasProperty(), target, key, context,
- frame_state, etrue, if_true);
+ graph()->NewNode(javascript()->HasProperty(VectorSlotPair()), target,
+ key, context, frame_state, etrue, if_true);
}
// Rewire potential exception edges.
@@ -1070,10 +1071,7 @@ Reduction JSCallReducer::ReduceArrayForEach(
return NoChange();
}
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1089,9 +1087,9 @@ Reduction JSCallReducer::ReduceArrayForEach(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
@@ -1111,7 +1109,7 @@ Reduction JSCallReducer::ReduceArrayForEach(
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -1257,10 +1255,7 @@ Reduction JSCallReducer::ReduceArrayReduce(
}
};
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1287,13 +1282,12 @@ Reduction JSCallReducer::ReduceArrayReduce(
Builtins::Name builtin_lazy =
left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
: Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
- const std::vector<Node*> checkpoint_params(
- {receiver, fncallback, k, original_length,
- jsgraph()->UndefinedConstant()});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, k, original_length,
+ jsgraph()->UndefinedConstant()};
+ const int stack_parameters = arraysize(checkpoint_params);
check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
- checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ &checkpoint_params[0], stack_parameters - 1, outer_frame_state,
ContinuationFrameStateMode::LAZY);
}
Node* check_fail = nullptr;
@@ -1315,13 +1309,12 @@ Reduction JSCallReducer::ReduceArrayReduce(
Builtins::Name builtin_eager =
left ? Builtins::kArrayReducePreLoopEagerDeoptContinuation
: Builtins::kArrayReduceRightPreLoopEagerDeoptContinuation;
- const std::vector<Node*> checkpoint_params(
- {receiver, fncallback, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* find_first_element_frame_state =
CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, builtin_eager, node->InputAt(0), context,
- checkpoint_params.data(), stack_parameters, outer_frame_state,
+ &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::EAGER);
Node* vloop = k = WireInLoopStart(k, &control, &effect);
@@ -1374,7 +1367,7 @@ Reduction JSCallReducer::ReduceArrayReduce(
: graph()->NewNode(simplified()->NumberLessThanOrEqual(),
jsgraph()->ZeroConstant(), k);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -1385,12 +1378,12 @@ Reduction JSCallReducer::ReduceArrayReduce(
Builtins::Name builtin_eager =
left ? Builtins::kArrayReduceLoopEagerDeoptContinuation
: Builtins::kArrayReduceRightLoopEagerDeoptContinuation;
- const std::vector<Node*> checkpoint_params(
- {receiver, fncallback, k, original_length, curloop});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, k, original_length,
+ curloop};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, builtin_eager, node->InputAt(0), context,
- checkpoint_params.data(), stack_parameters, outer_frame_state,
+ &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::EAGER);
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
@@ -1431,12 +1424,12 @@ Reduction JSCallReducer::ReduceArrayReduce(
Builtins::Name builtin_lazy =
left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
: Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
- const std::vector<Node*> checkpoint_params(
- {receiver, fncallback, next_k, original_length, curloop});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, next_k, original_length,
+ curloop};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
- checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ &checkpoint_params[0], stack_parameters - 1, outer_frame_state,
ContinuationFrameStateMode::LAZY);
next_cur = control = effect =
@@ -1526,12 +1519,9 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
}
if (IsHoleyElementsKind(kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_species_protector()));
+ if (!dependencies()->DependOnArraySpeciesProtector()) UNREACHABLE();
Node* array_constructor = jsgraph()->Constant(
native_context().GetInitialJSArrayMap(kind).GetConstructor());
@@ -1550,6 +1540,13 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
+ // If the array length >= kMaxFastArrayLength, then CreateArray
+ // will create a dictionary. We should deopt in this case, and make sure
+ // not to attempt inlining again.
+ original_length = effect = graph()->NewNode(
+ simplified()->CheckBounds(p.feedback()), original_length,
+ jsgraph()->Constant(JSArray::kMaxFastArrayLength), effect, control);
+
// Even though {JSCreateArray} is not marked as {kNoThrow}, we can elide the
// exceptional projections because it cannot throw with the given parameters.
Node* a = control = effect = graph()->NewNode(
@@ -1557,9 +1554,9 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
array_constructor, array_constructor, original_length, context,
outer_frame_state, effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, a, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg,
+ a, k, original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
@@ -1579,7 +1576,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -1729,12 +1726,9 @@ Reduction JSCallReducer::ReduceArrayFilter(
const ElementsKind packed_kind = GetPackedElementsKind(kind);
if (IsHoleyElementsKind(kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_species_protector()));
+ if (!dependencies()->DependOnArraySpeciesProtector()) UNREACHABLE();
MapRef initial_map = native_context().GetInitialJSArrayMap(packed_kind);
@@ -1781,9 +1775,9 @@ Reduction JSCallReducer::ReduceArrayFilter(
// checkpoint_params yet, but that's okay because it'll never be called.
// Therefore, "to" is mentioned twice, once standing in for the k_value
// value.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, a, k, original_length, to, to});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, a,
+ k, original_length, to, to};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
@@ -1801,7 +1795,7 @@ Reduction JSCallReducer::ReduceArrayFilter(
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -1809,9 +1803,9 @@ Reduction JSCallReducer::ReduceArrayFilter(
control = if_true;
{
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, a, k, original_length, to});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, a,
+ k, original_length, to};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayFilterLoopEagerDeoptContinuation,
@@ -1866,9 +1860,9 @@ Reduction JSCallReducer::ReduceArrayFilter(
{
// This frame state is dealt with by hand in
// Builtins::kArrayFilterLoopLazyDeoptContinuation.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, a, k, original_length, element, to});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, a,
+ k, original_length, element, to};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
@@ -1895,10 +1889,10 @@ Reduction JSCallReducer::ReduceArrayFilter(
// point in this case. This is safe, because re-evaluating a [ToBoolean]
// coercion is safe.
{
- std::vector<Node*> checkpoint_params({receiver, fncallback, this_arg, a, k,
- original_length, element, to,
- callback_value});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg,
+ a, k, original_length,
+ element, to, callback_value};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
@@ -1997,10 +1991,7 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
return NoChange();
}
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2016,9 +2007,9 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
@@ -2044,7 +2035,7 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue), continue_test, control);
+ common()->Branch(BranchHint::kNone), continue_test, control);
control = graph()->NewNode(common()->IfTrue(), continue_branch);
if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
}
@@ -2091,11 +2082,10 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
// Call the callback.
Node* callback_value = nullptr;
{
- std::vector<Node*> call_checkpoint_params({receiver, fncallback, this_arg,
- next_k, original_length,
- if_found_return_value});
- const int call_stack_parameters =
- static_cast<int>(call_checkpoint_params.size());
+ Node* call_checkpoint_params[] = {receiver, fncallback,
+ this_arg, next_k,
+ original_length, if_found_return_value};
+ const int call_stack_parameters = arraysize(call_checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, after_callback_lazy_continuation_builtin,
@@ -2317,12 +2307,9 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
}
if (IsHoleyElementsKind(kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_species_protector()));
+ if (!dependencies()->DependOnArraySpeciesProtector()) UNREACHABLE();
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2351,9 +2338,9 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
// This frame state doesn't ever call the deopt continuation, it's only
// necessary to specifiy a continuation in order to handle the exceptional
// case.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
@@ -2369,7 +2356,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -2377,9 +2364,9 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
control = if_true;
{
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayEveryLoopEagerDeoptContinuation,
@@ -2433,9 +2420,9 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
{
// This frame state is dealt with by hand in
// Builtins::kArrayEveryLoopLazyDeoptContinuation.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
@@ -2565,23 +2552,36 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
return NoChange();
}
- Handle<Map> map;
- if (!NodeProperties::GetMapWitness(broker(), node).ToHandle(&map))
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
+ &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
return NoChange();
+ }
- MapRef receiver_map(broker(), map);
- if (!receiver_map.supports_fast_array_iteration()) return NoChange();
+ if (IsHoleyElementsKind(kind)) {
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ }
- ElementsKind const elements_kind = receiver_map.elements_kind();
- if (IsHoleyElementsKind(elements_kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
}
- Callable const callable =
- search_variant == SearchVariant::kIndexOf
- ? GetCallableForArrayIndexOf(elements_kind, isolate())
- : GetCallableForArrayIncludes(elements_kind, isolate());
+ Callable const callable = search_variant == SearchVariant::kIndexOf
+ ? GetCallableForArrayIndexOf(kind, isolate())
+ : GetCallableForArrayIncludes(kind, isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
@@ -2589,9 +2589,6 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
// The stub expects the following arguments: the receiver array, its elements,
// the search_element, the array length, and the index to start searching
// from.
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
Node* elements = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
effect, control);
@@ -2599,8 +2596,8 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(elements_kind)),
- receiver, effect, control);
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
Node* new_from_index = jsgraph()->ZeroConstant();
if (node->op()->ValueInputCount() >= 4) {
Node* from_index = NodeProperties::GetValueInput(node, 3);
@@ -2664,12 +2661,9 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
}
if (IsHoleyElementsKind(kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_species_protector()));
+ if (!dependencies()->DependOnArraySpeciesProtector()) UNREACHABLE();
Node* k = jsgraph()->ZeroConstant();
@@ -2698,9 +2692,9 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
// This frame state doesn't ever call the deopt continuation, it's only
// necessary to specifiy a continuation in order to handle the exceptional
// case.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
@@ -2721,7 +2715,7 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
continue_test, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
@@ -2729,9 +2723,9 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
control = if_true;
{
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArraySomeLoopEagerDeoptContinuation,
@@ -2785,9 +2779,9 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
{
// This frame state is dealt with by hand in
// Builtins::kArrayEveryLoopLazyDeoptContinuation.
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
+ original_length};
+ const int stack_parameters = arraysize(checkpoint_params);
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
@@ -2866,68 +2860,116 @@ Reduction JSCallReducer::ReduceCallApiFunction(
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver =
- (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
- ? jsgraph()->Constant(native_context().global_proxy_object())
- : NodeProperties::GetValueInput(node, 1);
+ Node* global_proxy =
+ jsgraph()->Constant(native_context().global_proxy_object());
+ Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
+ ? global_proxy
+ : NodeProperties::GetValueInput(node, 1);
+ Node* holder;
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // See if we can optimize this API call to {shared}.
Handle<FunctionTemplateInfo> function_template_info(
FunctionTemplateInfo::cast(shared.object()->function_data()), isolate());
+ CallOptimization call_optimization(isolate(), function_template_info);
+ if (!call_optimization.is_simple_api_call()) return NoChange();
- // Infer the {receiver} maps, and check if we can inline the API function
- // callback based on those.
+ // Try to infer the {receiver} maps from the graph.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSObjectMap() ||
- (!function_template_info->accept_any_receiver() &&
- receiver_map.is_access_check_needed())) {
- return NoChange();
- }
- // In case of unreliable {receiver} information, the {receiver_maps}
- // must all be stable in order to consume the information.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- if (!receiver_map.is_stable()) return NoChange();
- }
- }
-
- // See if we can constant-fold the compatible receiver checks.
- CallOptimization call_optimization(isolate(), function_template_info);
- if (!call_optimization.is_simple_api_call()) return NoChange();
- CallOptimization::HolderLookup lookup;
- Handle<JSObject> api_holder =
- call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
- if (lookup == CallOptimization::kHolderNotFound) return NoChange();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- CallOptimization::HolderLookup lookupi;
- Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
- receiver_maps[i], &lookupi);
- if (lookup != lookupi) return NoChange();
- if (!api_holder.is_identical_to(holder)) return NoChange();
- }
-
- // Install stability dependencies for unreliable {receiver_maps}.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
+ if (result != NodeProperties::kNoReceiverMaps) {
+ // Check that all {receiver_maps} are actually JSReceiver maps and
+ // that the {function_template_info} accepts them without access
+ // checks (even if "access check needed" is set for {receiver}).
+ //
+ // Note that we don't need to know the concrete {receiver} maps here,
+ // meaning it's fine if the {receiver_maps} are unreliable, and we also
+ // don't need to install any stability dependencies, since the only
+ // relevant information regarding the {receiver} is the Map::constructor
+ // field on the root map (which is different from the JavaScript exposed
+ // "constructor" property) and that field cannot change.
+ //
+ // So if we know that {receiver} had a certain constructor at some point
+ // in the past (i.e. it had a certain map), then this constructor is going
+ // to be the same later, since this information cannot change with map
+ // transitions.
+ //
+ // The same is true for the instance type, e.g. we still know that the
+ // instance type is JSObject even if that information is unreliable, and
+ // the "access check needed" bit, which also cannot change later.
for (Handle<Map> map : receiver_maps) {
MapRef receiver_map(broker(), map);
- dependencies()->DependOnStableMap(receiver_map);
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() &&
+ !function_template_info->accept_any_receiver())) {
+ return NoChange();
+ }
}
- }
- // Load the {target}s context.
- Node* context = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
- effect, control);
+ // See if we can constant-fold the compatible receiver checks.
+ CallOptimization::HolderLookup lookup;
+ Handle<JSObject> api_holder =
+ call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return NoChange();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ CallOptimization::HolderLookup lookupi;
+ Handle<JSObject> holderi = call_optimization.LookupHolderOfExpectedType(
+ receiver_maps[i], &lookupi);
+ if (lookup != lookupi) return NoChange();
+ if (!api_holder.is_identical_to(holderi)) return NoChange();
+ }
+
+ // Determine the appropriate holder for the {lookup}.
+ holder = lookup == CallOptimization::kHolderFound
+ ? jsgraph()->HeapConstant(api_holder)
+ : receiver;
+ } else if (function_template_info->accept_any_receiver() &&
+ function_template_info->signature()->IsUndefined(isolate())) {
+ // We haven't found any {receiver_maps}, but we might still be able to
+ // optimize the API call depending on the {function_template_info}.
+ // If the API function accepts any kind of {receiver}, we only need to
+ // ensure that the {receiver} is actually a JSReceiver at this point,
+ // and also pass that as the {holder}. There are two independent bits
+ // here:
+ //
+ // a. When the "accept any receiver" bit is set, it means we don't
+ // need to perform access checks, even if the {receiver}'s map
+ // has the "needs access check" bit set.
+ // b. When the {function_template_info} has no signature, we don't
+ // need to do the compatible receiver check, since all receivers
+ // are considered compatible at that point, and the {receiver}
+ // will be pass as the {holder}.
+ //
+ receiver = holder = effect =
+ graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
+ receiver, global_proxy, effect, control);
+ } else {
+ // We don't have enough information to eliminate the access check
+ // and/or the compatible receiver check, so use the generic builtin
+ // that does those checks dynamically. This is still significantly
+ // faster than the generic call sequence.
+ Builtins::Name builtin_name =
+ !function_template_info->accept_any_receiver()
+ ? (function_template_info->signature()->IsUndefined(isolate())
+ ? Builtins::kCallFunctionTemplate_CheckAccess
+ : Builtins::
+ kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)
+ : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
+ Callable callable = Builtins::CallableFor(isolate(), builtin_name);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+ }
- // CallApiCallbackStub's register arguments: code, target, call data, holder,
- // function address.
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
// this and lower it during JSGenericLowering, and unify this with the
// JSNativeContextSpecialization::InlineApiCall method a bit.
@@ -2937,25 +2979,18 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), cid,
- cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
+ graph()->zone(), cid, argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
- Node* holder = lookup == CallOptimization::kHolderFound
- ? jsgraph()->HeapConstant(api_holder)
- : receiver;
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(call_api_callback.code()));
- node->ReplaceInput(1, context);
- node->InsertInput(graph()->zone(), 2,
- jsgraph()->ExternalConstant(function_reference));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(argc));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(data));
- node->InsertInput(graph()->zone(), 5, holder);
- node->ReplaceInput(6, receiver);
- node->RemoveInput(7 + argc); // Remove context input.
+ node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 4, holder);
+ node->ReplaceInput(5, receiver); // Update receiver input.
node->ReplaceInput(8 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
@@ -3094,8 +3129,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// that no one messed with the %ArrayIteratorPrototype%.next method.
if (node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstructWithSpread) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_iterator_protector()));
+ if (!dependencies()->DependOnArrayIteratorProtector()) UNREACHABLE();
}
// Remove the {arguments_list} input from the {node}.
@@ -3332,12 +3366,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- // Introduce a SOFT deopt if the call {node} wasn't executed so far.
- return ReduceSoftDeoptimize(
- node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
- }
- return NoChange();
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
}
base::Optional<HeapObjectRef> feedback =
@@ -3752,13 +3782,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
if (p.feedback().IsValid()) {
FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- // Introduce a SOFT deopt if the construct {node} wasn't executed so
- // far.
- return ReduceSoftDeoptimize(
- node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
- }
- return NoChange();
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
}
base::Optional<HeapObjectRef> feedback =
@@ -4304,18 +4329,21 @@ Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
DeoptimizeReason reason) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
- return Changed(node);
+ if (flags() & kBailoutOnUninitialized) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
+ }
+ return NoChange();
}
// ES6 section 22.1.3.18 Array.prototype.push ( )
@@ -4343,8 +4371,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
return NoChange();
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4448,8 +4475,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
return NoChange();
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4558,8 +4584,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
return NoChange();
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4770,15 +4795,10 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
}
}
- // Install code dependency on the Array[@@species] protector.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->array_species_protector()));
-
- // Install code dependency on the array protector for holey arrays.
if (can_be_holey) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
+ if (!dependencies()->DependOnArraySpeciesProtector()) UNREACHABLE();
// If we have unreliable maps, we need a map check, as there might be
// side-effects caused by the evaluation of the {node}s parameters.
@@ -4931,8 +4951,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Install code dependency on the array protector for holey arrays.
if (IsHoleyElementsKind(elements_kind)) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
// Load the (current) {iterated_object} from the {iterator}.
@@ -4949,12 +4968,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
if (IsFixedTypedArrayElementsKind(elements_kind)) {
// See if we can skip the detaching check.
- if (isolate()->IsArrayBufferDetachingIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets detached.
- dependencies()->DependOnProtector(PropertyCellRef(
- broker(), factory()->array_buffer_detaching_protector()));
- } else {
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Bail out if the {iterated_object}s JSArrayBuffer was detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
@@ -5013,7 +5027,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Check whether {index} is within the valid range for the {iterated_object}.
Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ graph()->NewNode(common()->Branch(BranchHint::kNone), check, control);
Node* done_true;
Node* value_true;
@@ -5355,7 +5369,7 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* check0 =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ graph()->NewNode(common()->Branch(BranchHint::kNone), check0, control);
Node* etrue0 = effect;
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
@@ -5490,13 +5504,11 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
if (!FLAG_experimental_inline_promise_constructor) return NoChange();
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
// Only handle builtins Promises, not subclasses.
if (target != new_target) return NoChange();
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
SharedFunctionInfoRef promise_shared =
native_context().promise_function().shared();
@@ -5648,10 +5660,7 @@ Reduction JSCallReducer::ReducePromiseInternalConstructor(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
// Check that promises aren't being observed through (debug) hooks.
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
-
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
// Create a new pending promise.
Node* value = effect =
@@ -5720,12 +5729,6 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Check that the Promise.then protector is intact. This protector guards
- // that all JSPromise instances whose [[Prototype]] is the initial
- // %PromisePrototype% yield the initial %PromisePrototype%.then method
- // when looking up "then".
- if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
-
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
@@ -5746,8 +5749,11 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
}
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_then_protector()));
+ // Check that the Promise.then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!dependencies()->DependOnPromiseThenProtector()) return NoChange();
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5790,21 +5796,6 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
return NoChange();
}
- // Check that promises aren't being observed through (debug) hooks.
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
-
- // Check that the Promise#then protector is intact. This protector guards
- // that all JSPromise instances whose [[Prototype]] is the initial
- // %PromisePrototype% yield the initial %PromisePrototype%.then method
- // when looking up "then".
- if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
-
- // Also check that the @@species protector is intact, which guards the
- // lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
- // the initial %PromisePrototype%, and the Symbol.species lookup on the
- // %PromisePrototype%.
- if (!isolate()->IsPromiseSpeciesLookupChainIntact()) return NoChange();
-
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
@@ -5825,12 +5816,20 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_then_protector()));
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_species_protector()));
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
+
+ // Check that the Promise#then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!dependencies()->DependOnPromiseThenProtector()) return NoChange();
+
+ // Also check that the @@species protector is intact, which guards the
+ // lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
+ // the initial %PromisePrototype%, and the Symbol.species lookup on the
+ // %PromisePrototype%.
+ if (!dependencies()->DependOnPromiseSpeciesProtector()) return NoChange();
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5951,15 +5950,6 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- // Check that promises aren't being observed through (debug) hooks.
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
-
- // Check if the @@species protector is intact. The @@species protector
- // guards the "constructor" lookup on all JSPromise instances and the
- // initial Promise.prototype, as well as the Symbol.species lookup on
- // the Promise constructor.
- if (!isolate()->IsPromiseSpeciesLookupChainIntact()) return NoChange();
-
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
@@ -5982,10 +5972,14 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
}
}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_species_protector()));
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
+
+ // Check if the @@species protector is intact. The @@species protector
+ // guards the "constructor" lookup on all JSPromise instances and the
+ // initial Promise.prototype, as well as the Symbol.species lookup on
+ // the Promise constructor.
+ if (!dependencies()->DependOnPromiseSpeciesProtector()) return NoChange();
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -6676,12 +6670,7 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
receiver, effect, control);
// See if we can skip the detaching check.
- if (isolate()->IsArrayBufferDetachingIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets detached.
- dependencies()->DependOnProtector(PropertyCellRef(
- broker(), factory()->array_buffer_detaching_protector()));
- } else {
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Check whether {receiver}s JSArrayBuffer was detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
@@ -6825,12 +6814,7 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- if (isolate()->IsArrayBufferDetachingIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets detached.
- dependencies()->DependOnProtector(PropertyCellRef(
- broker(), factory()->array_buffer_detaching_protector()));
- } else {
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Bail out if the {buffer} was detached.
Node* buffer_bit_field = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
@@ -7017,8 +7001,8 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Compute property access info for "exec" on {resolution}.
PropertyAccessInfo ai_exec;
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
MapHandles(regexp_maps.begin(), regexp_maps.end()),
factory()->exec_string(), AccessMode::kLoad, &ai_exec)) {
@@ -7044,19 +7028,18 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Protect the prototype chain from changes.
dependencies()->DependOnStablePrototypeChains(
- broker(), ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
+ ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
// Protect the exec method change in the holder.
Handle<Object> exec_on_proto;
- Handle<Map> holder_map(holder->map(), isolate());
- Handle<DescriptorArray> descriptors(holder_map->instance_descriptors(),
- isolate());
+ MapRef holder_map(broker(), handle(holder->map(), isolate()));
+ Handle<DescriptorArray> descriptors(
+ holder_map.object()->instance_descriptors(), isolate());
int descriptor_index =
- descriptors->Search(*(factory()->exec_string()), *holder_map);
+ descriptors->Search(*(factory()->exec_string()), *holder_map.object());
CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
-
- dependencies()->DependOnFieldType(MapRef(broker(), holder_map),
- descriptor_index);
+ holder_map.SerializeOwnDescriptors();
+ dependencies()->DependOnFieldType(holder_map, descriptor_index);
} else {
return NoChange();
}
@@ -7067,7 +7050,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
Handle<JSObject> holder;
if (ai_exec.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- broker(), ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
+ ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
}
if (need_map_check) {
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index db64b984fb..c88144788c 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -241,8 +241,6 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
if (!p.feedback().IsValid()) {
- node->InsertInput(
- zone(), 3, jsgraph()->SmiConstant(static_cast<int>(p.language_mode())));
ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
return;
}
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index d4805d278a..5628e0d337 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -11,6 +11,7 @@
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/instance-type-inl.h"
@@ -310,6 +311,7 @@ class JSFunctionData : public JSObjectData {
JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSFunction> object);
+ bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
bool PrototypeRequiresRuntimeLookup() const {
@@ -323,12 +325,14 @@ class JSFunctionData : public JSObjectData {
MapData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
SharedFunctionInfoData* shared() const { return shared_; }
+ FeedbackVectorData* feedback_vector() const { return feedback_vector_; }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
}
private:
+ bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
bool PrototypeRequiresRuntimeLookup_;
@@ -340,6 +344,7 @@ class JSFunctionData : public JSObjectData {
MapData* initial_map_ = nullptr;
ObjectData* prototype_ = nullptr;
SharedFunctionInfoData* shared_ = nullptr;
+ FeedbackVectorData* feedback_vector_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
@@ -833,6 +838,7 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSFunction> object)
: JSObjectData(broker, storage, object),
+ has_feedback_vector_(object->has_feedback_vector()),
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
@@ -851,11 +857,16 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
+ DCHECK_NULL(feedback_vector_);
context_ = broker->GetOrCreateData(function->context())->AsContext();
native_context_ =
broker->GetOrCreateData(function->native_context())->AsNativeContext();
shared_ = broker->GetOrCreateData(function->shared())->AsSharedFunctionInfo();
+ feedback_vector_ = has_feedback_vector()
+ ? broker->GetOrCreateData(function->feedback_vector())
+ ->AsFeedbackVector()
+ : nullptr;
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())->AsMap()
: nullptr;
@@ -1138,7 +1149,8 @@ class SharedFunctionInfoData : public HeapObjectData {
int builtin_id() const { return builtin_id_; }
BytecodeArrayData* GetBytecodeArray() const { return GetBytecodeArray_; }
- void SetSerializedForCompilation(FeedbackVectorRef feedback);
+ void SetSerializedForCompilation(JSHeapBroker* broker,
+ FeedbackVectorRef feedback);
bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
@@ -1177,8 +1189,10 @@ SharedFunctionInfoData::SharedFunctionInfoData(
}
void SharedFunctionInfoData::SetSerializedForCompilation(
- FeedbackVectorRef feedback) {
+ JSHeapBroker* broker, FeedbackVectorRef feedback) {
CHECK(serialized_for_compilation_.insert(feedback.object()).second);
+ TRACE(broker, "Set function " << object() << " with " << feedback.object()
+ << " as serialized for compilation.");
}
bool SharedFunctionInfoData::IsSerializedForCompilation(
@@ -1439,7 +1453,7 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
if (empty_or_cow) {
// We need to make sure copy-on-write elements are tenured.
- if (Heap::InNewSpace(*elements_object)) {
+ if (ObjectInYoungGeneration(*elements_object)) {
elements_object = isolate->factory()->CopyAndTenureFixedCOWArray(
Handle<FixedArray>::cast(elements_object));
boilerplate->set_elements(*elements_object);
@@ -1558,7 +1572,8 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
current_zone_(broker_zone),
refs_(new (zone())
RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())),
- array_and_object_prototypes_(zone()) {
+ array_and_object_prototypes_(zone()),
+ feedback_(zone()) {
// Note that this initialization of the refs_ pointer with the minimal
// initial capacity is redundant in the normal use case (concurrent
// compilation enabled, standard objects to be serialized), as the map
@@ -1567,9 +1582,9 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
TRACE(this, "Constructing heap broker.");
}
-std::ostream& JSHeapBroker::Trace() const {
- std::cout << "[" << this << "] " << std::string(trace_indentation_ * 2, ' ');
- return std::cout;
+std::ostream& JSHeapBroker::Trace() {
+ return trace_out_ << "[" << this << "] "
+ << std::string(trace_indentation_ * 2, ' ');
}
void JSHeapBroker::StartSerializing() {
@@ -1924,8 +1939,8 @@ ScriptContextTableRef::lookup(const NameRef& name) const {
if (!name.IsString()) return {};
ScriptContextTable::LookupResult lookup_result;
auto table = object();
- if (!ScriptContextTable::Lookup(broker()->isolate(), table,
- name.AsString().object(), &lookup_result)) {
+ if (!ScriptContextTable::Lookup(broker()->isolate(), *table,
+ *name.AsString().object(), &lookup_result)) {
return {};
}
Handle<Context> script_context = ScriptContextTable::GetContext(
@@ -2017,7 +2032,7 @@ void JSObjectRef::EnsureElementsTenured() {
AllowHeapAllocation allow_heap_allocation;
Handle<FixedArrayBase> object_elements = elements().object();
- if (Heap::InNewSpace(*object_elements)) {
+ if (ObjectInYoungGeneration(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
// old-to-new-space pointers (overflowing the store buffer).
@@ -2234,14 +2249,16 @@ BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_offset)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
+BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length_value)
@@ -2410,26 +2427,26 @@ bool ObjectRef::BooleanValue() const {
return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
}
-double ObjectRef::OddballToNumber() const {
+Maybe<double> ObjectRef::OddballToNumber() const {
OddballType type = AsHeapObject().map().oddball_type();
switch (type) {
case OddballType::kBoolean: {
ObjectRef true_ref(broker(),
broker()->isolate()->factory()->true_value());
- return this->equals(true_ref) ? 1 : 0;
+ return this->equals(true_ref) ? Just(1.0) : Just(0.0);
break;
}
case OddballType::kUndefined: {
- return std::numeric_limits<double>::quiet_NaN();
+ return Just(std::numeric_limits<double>::quiet_NaN());
break;
}
case OddballType::kNull: {
- return 0;
+ return Just(0.0);
break;
}
default: {
- UNREACHABLE();
+ return Nothing<double>();
break;
}
}
@@ -2480,7 +2497,7 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
case JSHeapBroker::kRetired:
UNREACHABLE();
}
- CHECK_NOT_NULL(data_);
+ CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker");
}
namespace {
@@ -2675,10 +2692,15 @@ void JSFunctionRef::Serialize() {
data()->AsJSFunction()->Serialize(broker());
}
+bool JSFunctionRef::IsSerializedForCompilation() const {
+ return shared().IsSerializedForCompilation(feedback_vector());
+}
+
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsSharedFunctionInfo()->SetSerializedForCompilation(feedback);
+ data()->AsSharedFunctionInfo()->SetSerializedForCompilation(broker(),
+ feedback);
}
bool SharedFunctionInfoRef::IsSerializedForCompilation(
@@ -2735,6 +2757,66 @@ void JSBoundFunctionRef::Serialize() {
data()->AsJSBoundFunction()->Serialize(broker());
}
+bool CanInlineElementAccess(Handle<Map> map) {
+ if (!map->IsJSObjectMap()) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->has_indexed_interceptor()) return false;
+ ElementsKind const elements_kind = map->elements_kind();
+ if (IsFastElementsKind(elements_kind)) return true;
+ if (IsFixedTypedArrayElementsKind(elements_kind) &&
+ elements_kind != BIGUINT64_ELEMENTS &&
+ elements_kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
+ return false;
+}
+
+bool JSHeapBroker::HasFeedback(FeedbackNexus const& nexus) const {
+ return feedback_.find(nexus) != feedback_.end();
+}
+
+ProcessedFeedback& JSHeapBroker::GetOrCreateFeedback(
+ FeedbackNexus const& nexus) {
+ auto it = feedback_.find(nexus);
+ if (it != feedback_.end()) return it->second;
+ auto insertion = feedback_.insert({nexus, ProcessedFeedback(zone())});
+ CHECK(insertion.second);
+ return insertion.first->second;
+}
+
+void ProcessFeedbackMapsForElementAccess(Isolate* isolate,
+ MapHandles const& maps,
+ ProcessedFeedback* processed) {
+ DCHECK(processed->receiver_maps.empty());
+ DCHECK(processed->transitions.empty());
+
+ // Collect possible transition targets.
+ MapHandles possible_transition_targets;
+ possible_transition_targets.reserve(maps.size());
+ for (Handle<Map> map : maps) {
+ if (CanInlineElementAccess(map) &&
+ IsFastElementsKind(map->elements_kind()) &&
+ GetInitialFastElementsKind() != map->elements_kind()) {
+ possible_transition_targets.push_back(map);
+ }
+ }
+
+ // Separate the actual receiver maps and the possible transition sources.
+ for (Handle<Map> map : maps) {
+ // Don't generate elements kind transitions from stable maps.
+ Map transition_target = map->is_stable()
+ ? Map()
+ : map->FindElementsKindTransitionedMap(
+ isolate, possible_transition_targets);
+ if (transition_target.is_null()) {
+ processed->receiver_maps.push_back(map);
+ } else {
+ processed->transitions.emplace_back(map,
+ handle(transition_target, isolate));
+ }
+ }
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 0108575013..4b87d81ea1 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -8,11 +8,14 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/compiler/refs-map.h"
+#include "src/feedback-vector.h"
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/builtin-function-id.h"
#include "src/objects/instance-type.h"
+#include "src/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -119,7 +122,7 @@ class ObjectRef {
bool IsNullOrUndefined() const;
bool BooleanValue() const;
- double OddballToNumber() const;
+ Maybe<double> OddballToNumber() const;
Isolate* isolate() const;
@@ -230,11 +233,13 @@ class JSFunctionRef : public JSObjectRef {
using JSObjectRef::JSObjectRef;
Handle<JSFunction> object() const;
+ bool has_feedback_vector() const;
bool has_initial_map() const;
bool has_prototype() const;
bool PrototypeRequiresRuntimeLookup() const;
void Serialize();
+ bool IsSerializedForCompilation() const;
// The following are available only after calling Serialize().
ObjectRef prototype() const;
@@ -242,6 +247,7 @@ class JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ FeedbackVectorRef feedback_vector() const;
int InitialMapInstanceSizeWithMinSlack() const;
};
@@ -521,7 +527,8 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, HasBuiltinId) \
V(BuiltinFunctionId, builtin_function_id) \
V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray)
+ V(bool, HasBytecodeArray) \
+ V(bool, is_safe_to_skip_arguments_adaptor)
class SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -609,9 +616,18 @@ class InternalizedStringRef : public StringRef {
static const uint32_t kNotAnArrayIndex = -1; // 2^32-1 is not a valid index.
};
+struct ProcessedFeedback {
+ ZoneVector<Handle<Map>> receiver_maps;
+ ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
+
+ explicit ProcessedFeedback(Zone* zone)
+ : receiver_maps(zone), transitions(zone) {}
+};
+
class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone);
+
void SetNativeContextRef();
void SerializeStandardObjects();
@@ -638,8 +654,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
// %ObjectPrototype%.
bool IsArrayOrObjectPrototype(const JSObjectRef& object) const;
- std::ostream& Trace() const;
+ bool HasFeedback(FeedbackNexus const& nexus) const;
+ ProcessedFeedback& GetOrCreateFeedback(FeedbackNexus const& nexus);
+ std::ostream& Trace();
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -651,6 +669,18 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
void SerializeShareableObjects();
void CollectArrayAndObjectPrototypes();
+ struct FeedbackNexusHash {
+ size_t operator()(FeedbackNexus const& nexus) const {
+ return base::hash_combine(nexus.vector_handle().location(), nexus.slot());
+ }
+ };
+ struct FeedbackNexusEqual {
+ bool operator()(FeedbackNexus const& lhs, FeedbackNexus const& rhs) const {
+ return lhs.vector_handle().equals(rhs.vector_handle()) &&
+ lhs.slot() == rhs.slot();
+ }
+ };
+
Isolate* const isolate_;
Zone* const broker_zone_;
Zone* current_zone_;
@@ -659,10 +689,13 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
ZoneUnorderedSet<Handle<JSObject>, Handle<JSObject>::hash,
Handle<JSObject>::equal_to>
array_and_object_prototypes_;
-
BrokerMode mode_ = kDisabled;
+ StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_;
+ ZoneUnorderedMap<FeedbackNexus, ProcessedFeedback, FeedbackNexusHash,
+ FeedbackNexusEqual>
+ feedback_;
static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
@@ -679,6 +712,13 @@ class Reduction;
Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
const char* function, int line);
+// Miscellaneous definitions that should be moved elsewhere once concurrent
+// compilation is finished.
+bool CanInlineElementAccess(Handle<Map> map);
+void ProcessFeedbackMapsForElementAccess(Isolate* isolate,
+ MapHandles const& maps,
+ ProcessedFeedback* processed);
+
#define TRACE_BROKER(broker, x) \
do { \
if (FLAG_trace_heap_broker) broker->Trace() << x << '\n'; \
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 1e701113d2..e289757ebb 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -114,22 +114,21 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kMapGuard: {
- ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
for (Handle<Map> map : maps) {
MapRef(broker(), map);
}
break;
}
case IrOpcode::kCheckMaps: {
- ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
for (Handle<Map> map : maps) {
MapRef(broker(), map);
}
break;
}
case IrOpcode::kCompareMaps: {
- ZoneHandleSet<Map> const maps =
- CompareMapsParametersOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
for (Handle<Map> map : maps) {
MapRef(broker(), map);
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 12ca2d42ff..1168c7f38d 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -408,14 +408,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- // Function contains break points.
- if (shared_info->HasBreakInfo()) {
- TRACE("Not inlining %s into %s because callee may contain break points\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// To ensure inlining always terminates, we have an upper limit on inlining
// the nested calls.
int nesting_level = 0;
@@ -457,6 +449,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
+ if (info_->is_source_positions_enabled()) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared_info);
+ }
+
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
@@ -465,10 +461,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
info_->shared_info()->DebugName()->ToCString().get(),
(exception_target != nullptr) ? " (inside try-block)" : "");
- // Get the bytecode array.
- Handle<BytecodeArray> bytecode_array =
- handle(shared_info->GetBytecodeArray(), isolate());
-
// Determine the targets feedback vector and its context.
Node* context;
Handle<FeedbackVector> feedback_vector;
@@ -485,6 +477,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
+ Handle<BytecodeArray> bytecode_array =
+ handle(shared_info->GetBytecodeArray(), isolate());
+
// Remember that we inlined this function.
int inlining_id = info_->AddInlinedFunction(
shared_info, bytecode_array, source_positions_->GetSourcePosition(node));
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index c78970f0c9..8a3fab5e6e 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -106,6 +106,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSLoadNamed(node);
case IrOpcode::kJSStoreNamed:
return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSHasProperty:
+ return ReduceJSHasProperty(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
@@ -208,7 +210,7 @@ bool IsStringConstant(JSHeapBroker* broker, Node* node) {
HeapObjectMatcher matcher(node);
return matcher.HasValue() && matcher.Ref(broker).IsString();
}
-}
+} // namespace
Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
Node* node) {
@@ -219,11 +221,8 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Install a code dependency on the promise hook protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
// Create the promise for the async function.
Node* promise = effect =
@@ -252,11 +251,8 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionReject(
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Install a code dependency on the promise hook protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
// Load the promise from the {async_function_object}.
Node* promise = effect = graph()->NewNode(
@@ -291,11 +287,8 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve(
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Install a code dependency on the promise hook protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
// Load the promise from the {async_function_object}.
Node* promise = effect = graph()->NewNode(
@@ -405,8 +398,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Compute property access info for @@hasInstance on the constructor.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
&access_info)) {
@@ -426,7 +419,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
// Monomorphic property access.
@@ -473,12 +466,15 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
*(factory()->has_instance_symbol()), *(holder_map.object()));
CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
holder_map.SerializeOwnDescriptors();
- dependencies()->DependOnFieldType(holder_map, descriptor_index);
+ if (dependencies()->DependOnFieldConstness(
+ holder_map, descriptor_index) != PropertyConstness::kConst) {
+ return NoChange();
+ }
}
if (found_on_proto) {
dependencies()->DependOnStablePrototypeChains(
- broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
DCHECK(constant->IsCallable());
@@ -571,7 +567,6 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
}
}
DCHECK_IMPLIES(all, !none);
- DCHECK_IMPLIES(none, !all);
if (all) return kIsInPrototypeChain;
if (none) return kIsNotInPrototypeChain;
@@ -663,10 +658,6 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!isolate()->IsPromiseHookProtectorIntact()) {
- return NoChange();
- }
-
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
if (!m.HasValue() ||
@@ -686,9 +677,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
if (value_map->IsJSPromiseMap()) return NoChange();
}
- // Install a code dependency on the promise hook protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
// Create a %Promise% instance and resolve it with {value}.
Node* promise = effect =
@@ -726,8 +715,8 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// Compute property access info for "then" on {resolution}.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
MapHandles(resolution_maps.begin(), resolution_maps.end()),
factory()->then_string(), AccessMode::kLoad, &access_info)) {
@@ -743,7 +732,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
// Add stability dependencies on the {resolution_maps}.
@@ -797,9 +786,6 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* receiver, Node* value, Handle<Name> name,
AccessMode access_mode, Node* index) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
// Lookup on the global object. We only deal with own data properties
// of the global object here (represented as PropertyCell).
LookupIterator it(isolate(), global_object(), name, LookupIterator::OWN);
@@ -807,9 +793,25 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
if (it.state() != LookupIterator::DATA) return NoChange();
if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
- PropertyDetails property_details = property_cell->property_details();
+ return ReduceGlobalAccess(node, receiver, value, name, access_mode, index,
+ property_cell);
+}
+
+Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
+ Node* node, Node* receiver, Node* value, Handle<Name> name,
+ AccessMode access_mode, Node* index, Handle<PropertyCell> property_cell) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
Handle<Object> property_cell_value(property_cell->value(), isolate());
+ if (property_cell_value.is_identical_to(factory()->the_hole_value())) {
+ // The property cell is no longer valid.
+ return NoChange();
+ }
+
+ PropertyDetails property_details = property_cell->property_details();
PropertyCellType property_cell_type = property_details.cell_type();
+ DCHECK_EQ(kData, property_details.kind());
// We have additional constraints for stores.
if (access_mode == AccessMode::kStore) {
@@ -827,6 +829,13 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
return NoChange();
}
}
+ } else if (access_mode == AccessMode::kHas) {
+ // has checks cannot follow the fast-path used by loads when these
+ // conditions hold.
+ if ((property_details.IsConfigurable() || !property_details.IsReadOnly()) &&
+ property_details.cell_type() != PropertyCellType::kConstant &&
+ property_details.cell_type() != PropertyCellType::kUndefined)
+ return NoChange();
}
// Ensure that {index} matches the specified {name} (if {index} is given).
@@ -845,11 +854,13 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
check, effect, control);
}
- if (access_mode == AccessMode::kLoad) {
+ if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
// Load from non-configurable, read-only data property on the global
// object can be constant-folded, even without deoptimization support.
if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
- value = jsgraph()->Constant(property_cell_value);
+ value = access_mode == AccessMode::kHas
+ ? jsgraph()->TrueConstant()
+ : jsgraph()->Constant(property_cell_value);
} else {
// Record a code dependency on the cell if we can benefit from the
// additional feedback, or the global property is configurable (i.e.
@@ -863,10 +874,14 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Load from constant/undefined global property can be constant-folded.
if (property_details.cell_type() == PropertyCellType::kConstant ||
property_details.cell_type() == PropertyCellType::kUndefined) {
- value = jsgraph()->Constant(property_cell_value);
+ value = access_mode == AccessMode::kHas
+ ? jsgraph()->TrueConstant()
+ : jsgraph()->Constant(property_cell_value);
CHECK(
!property_cell_value.is_identical_to(factory()->the_hole_value()));
} else {
+ DCHECK_NE(AccessMode::kHas, access_mode);
+
// Load from constant type cell can benefit from type feedback.
MaybeHandle<Map> map;
Type property_cell_value_type = Type::NonInternal();
@@ -986,58 +1001,104 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
- NameRef name(broker(), LoadGlobalParametersOf(node->op()).name());
Node* effect = NodeProperties::GetEffectInput(node);
- // Try to lookup the name on the script context table first (lexical scoping).
- base::Optional<ScriptContextTableRef::LookupResult> result =
- native_context().script_context_table().lookup(name);
- if (result) {
- ObjectRef contents = result->context.get(result->index);
- if (contents.IsHeapObject() &&
- contents.AsHeapObject().map().oddball_type() == OddballType::kHole) {
- return NoChange();
+ LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op());
+ if (!p.feedback().IsValid()) return NoChange();
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
+ nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
+ if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
+ return NoChange();
+ }
+ Handle<Object> feedback(nexus.GetFeedback()->GetHeapObjectOrSmi(), isolate());
+
+ if (feedback->IsSmi()) {
+ // The wanted name belongs to a script-scope variable and the feedback tells
+ // us where to find its value.
+
+ int number = feedback->Number();
+ int const script_context_index =
+ FeedbackNexus::ContextIndexBits::decode(number);
+ int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
+ bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number);
+ Handle<Context> context = ScriptContextTable::GetContext(
+ isolate(), native_context().script_context_table().object(),
+ script_context_index);
+
+ {
+ ObjectRef contents(broker(),
+ handle(context->get(context_slot_index), isolate()));
+ CHECK(!contents.equals(ObjectRef(broker(), factory()->the_hole_value())));
}
- Node* context = jsgraph()->Constant(result->context);
+
+ Node* context_constant = jsgraph()->Constant(context);
Node* value = effect = graph()->NewNode(
- javascript()->LoadContext(0, result->index, result->immutable), context,
- effect);
+ javascript()->LoadContext(0, context_slot_index, immutable),
+ context_constant, effect);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
- // Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, nullptr, name.object(),
- AccessMode::kLoad);
+ CHECK(feedback->IsPropertyCell());
+ // The wanted name belongs (or did belong) to a property on the global object
+ // and the feedback is the cell holding its value.
+ return ReduceGlobalAccess(node, nullptr, nullptr, p.name(), AccessMode::kLoad,
+ nullptr, Handle<PropertyCell>::cast(feedback));
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
- NameRef name(broker(), StoreGlobalParametersOf(node->op()).name());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Try to lookup the name on the script context table first (lexical scoping).
- base::Optional<ScriptContextTableRef::LookupResult> result =
- native_context().script_context_table().lookup(name);
- if (result) {
- ObjectRef contents = result->context.get(result->index);
- if ((contents.IsHeapObject() &&
- contents.AsHeapObject().map().oddball_type() == OddballType::kHole) ||
- result->immutable) {
- return NoChange();
+ StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op());
+ if (!p.feedback().IsValid()) return NoChange();
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ DCHECK(nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
+ nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict);
+ if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
+ return NoChange();
+ }
+ Handle<Object> feedback(nexus.GetFeedback()->GetHeapObjectOrSmi(), isolate());
+
+ if (feedback->IsSmi()) {
+ // The wanted name belongs to a script-scope variable and the feedback tells
+ // us where to find its value.
+
+ int const script_context_index =
+ FeedbackNexus::ContextIndexBits::decode(feedback->Number());
+ int const context_slot_index =
+ FeedbackNexus::SlotIndexBits::decode(feedback->Number());
+ bool const immutable =
+ FeedbackNexus::ImmutabilityBit::decode(feedback->Number());
+ Handle<Context> context = ScriptContextTable::GetContext(
+ isolate(), native_context().script_context_table().object(),
+ script_context_index);
+
+ if (immutable) return NoChange();
+
+ {
+ ObjectRef contents(broker(),
+ handle(context->get(context_slot_index), isolate()));
+ CHECK(!contents.equals(ObjectRef(broker(), factory()->the_hole_value())));
}
- Node* context = jsgraph()->Constant(result->context);
- effect = graph()->NewNode(javascript()->StoreContext(0, result->index),
- value, context, effect, control);
+
+ Node* context_constant = jsgraph()->Constant(context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, context_slot_index),
+ value, context_constant, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
- // Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, value, name.object(),
- AccessMode::kStore);
+ CHECK(feedback->IsPropertyCell());
+ // The wanted name belongs (or did belong) to a property on the global object
+ // and the feedback is the cell holding its value.
+ return ReduceGlobalAccess(node, nullptr, value, p.name(), AccessMode::kStore,
+ nullptr, Handle<PropertyCell>::cast(feedback));
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
@@ -1047,7 +1108,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn);
+ node->opcode() == IrOpcode::kJSStoreNamedOwn ||
+ node->opcode() == IrOpcode::kJSHasProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
@@ -1072,8 +1134,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Compute property access infos for the receiver maps.
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
ZoneVector<PropertyAccessInfo> access_infos(zone());
if (!access_info_factory.ComputePropertyAccessInfos(
receiver_maps, name, access_mode, &access_infos)) {
@@ -1329,12 +1391,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
} else if (receiver_maps.empty()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
- }
- return NoChange();
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
DCHECK(!nexus.IsUninitialized());
@@ -1387,7 +1445,6 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
AccessMode::kLoad);
}
-
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
@@ -1416,224 +1473,340 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
AccessMode::kStoreInLiteral);
}
+Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
+ Node* node, Node* index, Node* value, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Strings are immutable in JavaScript.
+ if (access_mode == AccessMode::kStore) return NoChange();
+
+ // Ensure that the {receiver} is actually a String.
+ receiver = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
+
+ // Determine the {receiver} length.
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Load the single character string from {receiver} or yield undefined
+ // if the {index} is out of bounds (depending on the {load_mode}).
+ value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
+ load_mode);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSNativeContextSpecialization::ReduceElementAccess(
- Node* node, Node* index, Node* value, MapHandles const& receiver_maps,
- AccessMode access_mode, KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode) {
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+ MapHandles const& receiver_maps, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreInArrayLiteral);
+ node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
+ node->opcode() == IrOpcode::kJSHasProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- // Check for keyed access to strings.
if (HasOnlyStringMaps(receiver_maps)) {
- // Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
-
- // Ensure that the {receiver} is actually a String.
- receiver = effect = graph()->NewNode(
- simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
-
- // Determine the {receiver} length.
- Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+ return ReduceElementAccessOnString(node, index, value, access_mode,
+ load_mode);
+ }
- // Load the single character string from {receiver} or yield undefined
- // if the {index} is out of bounds (depending on the {load_mode}).
- value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
- load_mode);
- } else {
- // Retrieve the native context from the given {node}.
- // Compute element access infos for the receiver maps.
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
- ZoneVector<ElementAccessInfo> access_infos(zone());
- if (!access_info_factory.ComputeElementAccessInfos(
- receiver_maps, access_mode, &access_infos)) {
- return NoChange();
- }
+ // Compute element access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
+ ZoneVector<ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputeElementAccessInfos(
+ nexus, receiver_maps, access_mode, &access_infos)) {
+ return NoChange();
+ }
- // Nothing to do if we have no non-deprecated maps.
- if (access_infos.empty()) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) {
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
+ }
- // For holey stores or growing stores, we need to check that the prototype
- // chain contains no setters for elements, and we need to guard those checks
- // via code dependencies on the relevant prototype maps.
- if (access_mode == AccessMode::kStore) {
- // TODO(turbofan): We could have a fast path here, that checks for the
- // common case of Array or Object prototype only and therefore avoids
- // the zone allocation of this vector.
- ZoneVector<Handle<Map>> prototype_maps(zone());
- for (ElementAccessInfo const& access_info : access_infos) {
- for (Handle<Map> receiver_map : access_info.receiver_maps()) {
- // If the {receiver_map} has a prototype and its elements backing
- // store is either holey, or we have a potentially growing store,
- // then we need to check that all prototypes have stable maps with
- // fast elements (and we need to guard against changes to that below).
- if (IsHoleyOrDictionaryElementsKind(receiver_map->elements_kind()) ||
- IsGrowStoreMode(store_mode)) {
- // Make sure all prototypes are stable and have fast elements.
- for (Handle<Map> map = receiver_map;;) {
- Handle<Object> map_prototype(map->prototype(), isolate());
- if (map_prototype->IsNull(isolate())) break;
- if (!map_prototype->IsJSObject()) return NoChange();
- map = handle(Handle<JSObject>::cast(map_prototype)->map(),
- isolate());
- if (!map->is_stable()) return NoChange();
- if (!IsFastElementsKind(map->elements_kind())) return NoChange();
- prototype_maps.push_back(map);
- }
+ // For holey stores or growing stores, we need to check that the prototype
+ // chain contains no setters for elements, and we need to guard those checks
+ // via code dependencies on the relevant prototype maps.
+ if (access_mode == AccessMode::kStore) {
+ // TODO(turbofan): We could have a fast path here, that checks for the
+ // common case of Array or Object prototype only and therefore avoids
+ // the zone allocation of this vector.
+ ZoneVector<Handle<Map>> prototype_maps(zone());
+ for (ElementAccessInfo const& access_info : access_infos) {
+ for (Handle<Map> receiver_map : access_info.receiver_maps()) {
+ // If the {receiver_map} has a prototype and its elements backing
+ // store is either holey, or we have a potentially growing store,
+ // then we need to check that all prototypes have stable maps with
+ // fast elements (and we need to guard against changes to that below).
+ if (IsHoleyOrDictionaryElementsKind(receiver_map->elements_kind()) ||
+ IsGrowStoreMode(store_mode)) {
+ // Make sure all prototypes are stable and have fast elements.
+ for (Handle<Map> map = receiver_map;;) {
+ Handle<Object> map_prototype(map->prototype(), isolate());
+ if (map_prototype->IsNull(isolate())) break;
+ if (!map_prototype->IsJSObject()) return NoChange();
+ map =
+ handle(Handle<JSObject>::cast(map_prototype)->map(), isolate());
+ if (!map->is_stable()) return NoChange();
+ if (!IsFastElementsKind(map->elements_kind())) return NoChange();
+ prototype_maps.push_back(map);
}
}
}
+ }
- // Install dependencies on the relevant prototype maps.
- for (Handle<Map> prototype_map : prototype_maps) {
- dependencies()->DependOnStableMap(MapRef(broker(), prototype_map));
+ // Install dependencies on the relevant prototype maps.
+ for (Handle<Map> prototype_map : prototype_maps) {
+ dependencies()->DependOnStableMap(MapRef(broker(), prototype_map));
+ }
+ } else if (access_mode == AccessMode::kHas) {
+ // If we have any fast arrays, we need to check and depend on
+ // NoElementsProtector.
+ for (ElementAccessInfo const& access_info : access_infos) {
+ if (IsFastElementsKind(access_info.elements_kind())) {
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
+ break;
}
}
+ }
- // Ensure that {receiver} is a heap object.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ // Ensure that {receiver} is a heap object.
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
+ receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
+
+ // Check for the monomorphic case.
+ if (access_infos.size() == 1) {
+ ElementAccessInfo access_info = access_infos.front();
+
+ // Perform possible elements kind transitions.
+ Handle<Map> const transition_target = access_info.receiver_maps().front();
+ for (auto transition_source : access_info.transition_sources()) {
+ DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ effect = graph()->NewNode(
+ simplified()->TransitionElementsKind(ElementsTransition(
+ IsSimpleMapChangeTransition(transition_source->elements_kind(),
+ transition_target->elements_kind())
+ ? ElementsTransition::kFastTransition
+ : ElementsTransition::kSlowTransition,
+ transition_source, transition_target)),
+ receiver, effect, control);
+ }
+
+ // TODO(turbofan): The effect/control linearization will not find a
+ // FrameState after the StoreField or Call that is generated for the
+ // elements kind transition above. This is because those operators
+ // don't have the kNoWrite flag on it, even though they are not
+ // observable by JavaScript.
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Perform map check on the {receiver}.
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
+
+ // Access the actual element.
+ ValueEffectControl continuation =
+ BuildElementAccess(receiver, index, value, effect, control, access_info,
+ access_mode, load_mode, store_mode);
+ value = continuation.value();
+ effect = continuation.effect();
+ control = continuation.control();
+ } else {
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
- // Check for the monomorphic case.
- if (access_infos.size() == 1) {
- ElementAccessInfo access_info = access_infos.front();
+ // Generate code for the various different element access patterns.
+ Node* fallthrough_control = control;
+ for (size_t j = 0; j < access_infos.size(); ++j) {
+ ElementAccessInfo const& access_info = access_infos[j];
+ Node* this_receiver = receiver;
+ Node* this_value = value;
+ Node* this_index = index;
+ Node* this_effect = effect;
+ Node* this_control = fallthrough_control;
// Perform possible elements kind transitions.
- for (auto transition : access_info.transitions()) {
- Handle<Map> const transition_source = transition.first;
- Handle<Map> const transition_target = transition.second;
- effect = graph()->NewNode(
+ Handle<Map> const transition_target = access_info.receiver_maps().front();
+ for (auto transition_source : access_info.transition_sources()) {
+ DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source->elements_kind(),
transition_target->elements_kind())
? ElementsTransition::kFastTransition
: ElementsTransition::kSlowTransition,
transition_source, transition_target)),
- receiver, effect, control);
+ receiver, this_effect, this_control);
}
- // TODO(turbofan): The effect/control linearization will not find a
- // FrameState after the StoreField or Call that is generated for the
- // elements kind transition above. This is because those operators
- // don't have the kNoWrite flag on it, even though they are not
- // observable by JavaScript.
- effect = graph()->NewNode(common()->Checkpoint(), frame_state, effect,
- control);
+ // Perform map check(s) on {receiver}.
+ MapHandles const& receiver_maps = access_info.receiver_maps();
+ if (j == access_infos.size() - 1) {
+ // Last map check on the fallthrough control path, do a
+ // conditional eager deoptimization exit here.
+ access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
+ receiver_maps);
+ fallthrough_control = nullptr;
+ } else {
+ // Explicitly branch on the {receiver_maps}.
+ ZoneHandleSet<Map> maps;
+ for (Handle<Map> map : receiver_maps) {
+ maps.insert(map, graph()->zone());
+ }
+ Node* check = this_effect =
+ graph()->NewNode(simplified()->CompareMaps(maps), receiver,
+ this_effect, fallthrough_control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
- // Perform map check on the {receiver}.
- access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ // Introduce a MapGuard to learn from this on the effect chain.
+ this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
+ this_effect, this_control);
+ }
// Access the actual element.
- ValueEffectControl continuation =
- BuildElementAccess(receiver, index, value, effect, control,
- access_info, access_mode, load_mode, store_mode);
- value = continuation.value();
- effect = continuation.effect();
- control = continuation.control();
+ ValueEffectControl continuation = BuildElementAccess(
+ this_receiver, this_index, this_value, this_effect, this_control,
+ access_info, access_mode, load_mode, store_mode);
+ values.push_back(continuation.value());
+ effects.push_back(continuation.effect());
+ controls.push_back(continuation.control());
+ }
+
+ DCHECK_NULL(fallthrough_control);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
} else {
- // The final states for every polymorphic branch. We join them with
- // Merge+Phi+EffectPhi at the bottom.
- ZoneVector<Node*> values(zone());
- ZoneVector<Node*> effects(zone());
- ZoneVector<Node*> controls(zone());
-
- // Generate code for the various different element access patterns.
- Node* fallthrough_control = control;
- for (size_t j = 0; j < access_infos.size(); ++j) {
- ElementAccessInfo const& access_info = access_infos[j];
- Node* this_receiver = receiver;
- Node* this_value = value;
- Node* this_index = index;
- Node* this_effect = effect;
- Node* this_control = fallthrough_control;
-
- // Perform possible elements kind transitions.
- for (auto transition : access_info.transitions()) {
- Handle<Map> const transition_source = transition.first;
- Handle<Map> const transition_target = transition.second;
- this_effect = graph()->NewNode(
- simplified()->TransitionElementsKind(
- ElementsTransition(IsSimpleMapChangeTransition(
- transition_source->elements_kind(),
- transition_target->elements_kind())
- ? ElementsTransition::kFastTransition
- : ElementsTransition::kSlowTransition,
- transition_source, transition_target)),
- receiver, this_effect, this_control);
- }
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ }
- // Perform map check(s) on {receiver}.
- MapHandles const& receiver_maps = access_info.receiver_maps();
- if (j == access_infos.size() - 1) {
- // Last map check on the fallthrough control path, do a
- // conditional eager deoptimization exit here.
- access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
- receiver_maps);
- fallthrough_control = nullptr;
- } else {
- // Explicitly branch on the {receiver_maps}.
- ZoneHandleSet<Map> maps;
- for (Handle<Map> map : receiver_maps) {
- maps.insert(map, graph()->zone());
- }
- Node* check = this_effect =
- graph()->NewNode(simplified()->CompareMaps(maps), receiver,
- this_effect, fallthrough_control);
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
- // Introduce a MapGuard to learn from this on the effect chain.
- this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
- this_effect, this_control);
- }
+Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
+ Node* node, Node* index, FeedbackNexus const& nexus, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kJSLoadProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
- // Access the actual element.
- ValueEffectControl continuation = BuildElementAccess(
- this_receiver, this_index, this_value, this_effect, this_control,
- access_info, access_mode, load_mode, store_mode);
- values.push_back(continuation.value());
- effects.push_back(continuation.effect());
- controls.push_back(continuation.control());
- }
+ HeapObjectMatcher mreceiver(receiver);
+ HeapObjectRef receiver_ref = mreceiver.Ref(broker()).AsHeapObject();
+ if (receiver_ref.map().oddball_type() == OddballType::kHole ||
+ receiver_ref.map().oddball_type() == OddballType::kNull ||
+ receiver_ref.map().oddball_type() == OddballType::kUndefined ||
+ (receiver_ref.map().IsString() && access_mode == AccessMode::kHas)) {
+ return NoChange();
+ }
- DCHECK_NULL(fallthrough_control);
+ // Check whether we're accessing a known element on the {receiver}
+ // that is non-configurable, non-writable (e.g. the {receiver} was
+ // frozen using Object.freeze).
+ NumberMatcher mindex(index);
+ if (mindex.IsInteger() && mindex.IsInRange(0.0, kMaxUInt32 - 1.0)) {
+ LookupIterator it(isolate(), receiver_ref.object(),
+ static_cast<uint32_t>(mindex.Value()),
+ LookupIterator::OWN);
+ if (it.state() == LookupIterator::DATA) {
+ if (it.IsReadOnly() && !it.IsConfigurable()) {
+ // We can safely constant-fold the {index} access to {receiver},
+ // since the element is non-configurable, non-writable and thus
+ // cannot change anymore.
+ Node* value = access_mode == AccessMode::kHas
+ ? jsgraph()->TrueConstant()
+ : jsgraph()->Constant(it.GetDataValue());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
- // Generate the final merge point for all (polymorphic) branches.
- int const control_count = static_cast<int>(controls.size());
- if (control_count == 0) {
- value = effect = control = jsgraph()->Dead();
- } else if (control_count == 1) {
- value = values.front();
- effect = effects.front();
- control = controls.front();
- } else {
- control = graph()->NewNode(common()->Merge(control_count),
- control_count, &controls.front());
- values.push_back(control);
- value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, control_count),
- control_count + 1, &values.front());
- effects.push_back(control);
- effect = graph()->NewNode(common()->EffectPhi(control_count),
- control_count + 1, &effects.front());
+ // Check if the {receiver} is a known constant with a copy-on-write
+ // backing store, and whether {index} is within the appropriate
+ // bounds. In that case we can constant-fold the access and only
+ // check that the {elements} didn't change. This is sufficient as
+ // the backing store of a copy-on-write JSArray is defensively
+ // copied whenever the length or the elements (might) change.
+ //
+ // What's interesting here is that we don't need to map check the
+ // {receiver}, since JSArray's will always have their elements in
+ // the backing store.
+ if (receiver_ref.IsJSArray()) {
+ Handle<JSArray> array = receiver_ref.AsJSArray().object();
+ if (array->elements()->IsCowArray()) {
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Handle<FixedArray> array_elements(FixedArray::cast(array->elements()),
+ isolate());
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), elements,
+ jsgraph()->HeapConstant(array_elements));
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kCowArrayElementsChanged),
+ check, effect, control);
+ Node* value = access_mode == AccessMode::kHas
+ ? jsgraph()->TrueConstant()
+ : jsgraph()->Constant(it.GetDataValue());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
}
}
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ // For constant Strings we can eagerly strength-reduce the keyed
+ // accesses using the known length, which doesn't change.
+ if (receiver_ref.IsString() && access_mode != AccessMode::kHas) {
+ // We can only assume that the {index} is a valid array index if the
+ // IC is in element access mode and not MEGAMORPHIC, otherwise there's
+ // no guard for the bounds check below.
+ if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
+ // Ensure that {index} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
+
+ // Load the single character string from {receiver} or yield
+ // undefined if the {index} is out of bounds (depending on the
+ // {load_mode}).
+ Node* value = BuildIndexedStringLoad(receiver, index, length, &effect,
+ &control, load_mode);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+
+ return NoChange();
}
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
@@ -1641,94 +1814,17 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
AccessMode access_mode, KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty);
+ node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSHasProperty);
+
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Optimize the case where we load from a constant {receiver}.
- if (access_mode == AccessMode::kLoad) {
- HeapObjectMatcher mreceiver(receiver);
- if (mreceiver.HasValue()) {
- HeapObjectRef receiver_ref = mreceiver.Ref(broker()).AsHeapObject();
- if (receiver_ref.map().oddball_type() != OddballType::kHole &&
- receiver_ref.map().oddball_type() != OddballType::kNull &&
- receiver_ref.map().oddball_type() != OddballType::kUndefined) {
- // Check whether we're accessing a known element on the {receiver}
- // that is non-configurable, non-writable (i.e. the {receiver} was
- // frozen using Object.freeze).
- NumberMatcher mindex(index);
- if (mindex.IsInteger() && mindex.IsInRange(0.0, kMaxUInt32 - 1.0)) {
- LookupIterator it(isolate(), receiver_ref.object(),
- static_cast<uint32_t>(mindex.Value()),
- LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA) {
- if (it.IsReadOnly() && !it.IsConfigurable()) {
- // We can safely constant-fold the {index} access to {receiver},
- // since the element is non-configurable, non-writable and thus
- // cannot change anymore.
- value = jsgraph()->Constant(it.GetDataValue());
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- // Check if the {receiver} is a known constant with a copy-on-write
- // backing store, and whether {index} is within the appropriate
- // bounds. In that case we can constant-fold the access and only
- // check that the {elements} didn't change. This is sufficient as
- // the backing store of a copy-on-write JSArray is defensively
- // copied whenever the length or the elements (might) change.
- //
- // What's interesting here is that we don't need to map check the
- // {receiver}, since JSArray's will always have their elements in
- // the backing store.
- if (receiver_ref.IsJSArray()) {
- Handle<JSArray> array = receiver_ref.AsJSArray().object();
- if (array->elements()->IsCowArray()) {
- Node* elements = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- Handle<FixedArray> array_elements(
- FixedArray::cast(array->elements()), isolate());
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), elements,
- jsgraph()->HeapConstant(array_elements));
- effect = graph()->NewNode(
- simplified()->CheckIf(
- DeoptimizeReason::kCowArrayElementsChanged),
- check, effect, control);
- value = jsgraph()->Constant(it.GetDataValue());
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
- }
- // For constant Strings we can eagerly strength-reduce the keyed
- // accesses using the known length, which doesn't change.
- if (receiver_ref.IsString()) {
- // We can only assume that the {index} is a valid array index if the
- // IC is in element access mode and not MEGAMORPHIC, otherwise there's
- // no guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC &&
- nexus.GetKeyType() == ELEMENT) {
- // Ensure that {index} is less than {receiver} length.
- Node* length =
- jsgraph()->Constant(receiver_ref.AsString().length());
-
- // Load the single character string from {receiver} or yield
- // undefined if the {index} is out of bounds (depending on the
- // {load_mode}).
- value = BuildIndexedStringLoad(receiver, index, length, &effect,
- &control, load_mode);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
- }
+ if (access_mode == AccessMode::kLoad &&
+ receiver->opcode() == IrOpcode::kHeapConstant) {
+ Reduction reduction = ReduceKeyedLoadFromHeapConstant(
+ node, index, nexus, access_mode, load_mode);
+ if (reduction.Changed()) return reduction;
}
// Extract receiver maps from the {nexus}.
@@ -1736,12 +1832,8 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
} else if (receiver_maps.empty()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- return NoChange();
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
DCHECK(!nexus.IsUninitialized());
@@ -1765,7 +1857,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
// Check if we have feedback for a named access.
- Name name = nexus.FindFirstName();
+ Name name = nexus.GetName();
if (!name.is_null()) {
return ReduceNamedAccess(node, value, receiver_maps,
handle(name, isolate()), access_mode, index);
@@ -1783,38 +1875,47 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
// Try to lower the element access based on the {receiver_maps}.
- return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
- load_mode, store_mode);
+ return ReduceElementAccess(node, index, value, nexus, receiver_maps,
+ access_mode, load_mode, store_mode);
}
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* node, DeoptimizeReason reason) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
- return Changed(node);
+ if (flags() & kBailoutOnUninitialized) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
+ }
+ return NoChange();
}
-Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* name = NodeProperties::GetValueInput(node, 1);
+ Node* index = NodeProperties::GetValueInput(node, 1);
Node* value = jsgraph()->Dead();
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // We can optimize a property load if it's being used inside a for..in,
- // so for code like this:
- //
+ // Extract receiver maps from the has property IC using the FeedbackNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kHas,
+ STANDARD_LOAD, STANDARD_STORE);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
+ Node* node) {
+ // We can optimize a property load if it's being used inside a for..in:
// for (name in receiver) {
// value = receiver[name];
// ...
@@ -1851,64 +1952,81 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
// Also note that it's safe to look through the {JSToObject}, since the
// [[Get]] operation does an implicit ToObject anyway, and these operations
// are not observable.
- if (name->opcode() == IrOpcode::kJSForInNext) {
- ForInMode const mode = ForInModeOf(name->op());
- if (mode == ForInMode::kUseEnumCacheKeysAndIndices) {
- Node* object = NodeProperties::GetValueInput(name, 0);
- Node* enumerator = NodeProperties::GetValueInput(name, 2);
- Node* index = NodeProperties::GetValueInput(name, 3);
- if (object->opcode() == IrOpcode::kJSToObject) {
- object = NodeProperties::GetValueInput(object, 0);
- }
- if (object == receiver) {
- // No need to repeat the map check if we can prove that there's no
- // observable side effect between {effect} and {name].
- if (!NodeProperties::NoObservableSideEffectBetween(effect, name)) {
- // Check that the {receiver} map is still valid.
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- receiver_map, enumerator);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
- control);
- }
- // Load the enum cache indices from the {cache_type}.
- Node* descriptor_array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
- enumerator, effect, control);
- Node* enum_cache = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForDescriptorArrayEnumCache()),
- descriptor_array, effect, control);
- Node* enum_indices = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForEnumCacheIndices()),
- enum_cache, effect, control);
-
- // Ensure that the {enum_indices} are valid.
- Node* check = graph()->NewNode(
- simplified()->BooleanNot(),
- graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
- jsgraph()->EmptyFixedArrayConstant()));
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongEnumIndices), check,
- effect, control);
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* name = NodeProperties::GetValueInput(node, 1);
+ DCHECK_EQ(IrOpcode::kJSForInNext, name->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
- // Determine the index from the {enum_indices}.
- index = effect = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForFixedArrayElement(PACKED_SMI_ELEMENTS)),
- enum_indices, index, effect, control);
+ if (ForInModeOf(name->op()) != ForInMode::kUseEnumCacheKeysAndIndices) {
+ return NoChange();
+ }
- // Load the actual field value.
- Node* value = effect = graph()->NewNode(
- simplified()->LoadFieldByIndex(), receiver, index, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
+ Node* object = NodeProperties::GetValueInput(name, 0);
+ Node* enumerator = NodeProperties::GetValueInput(name, 2);
+ Node* index = NodeProperties::GetValueInput(name, 3);
+ if (object->opcode() == IrOpcode::kJSToObject) {
+ object = NodeProperties::GetValueInput(object, 0);
+ }
+ if (object != receiver) return NoChange();
+
+ // No need to repeat the map check if we can prove that there's no
+ // observable side effect between {effect} and {name].
+ if (!NodeProperties::NoObservableSideEffectBetween(effect, name)) {
+ // Check that the {receiver} map is still valid.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+ enumerator);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
+ check, effect, control);
+ }
+
+ // Load the enum cache indices from the {cache_type}.
+ Node* descriptor_array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()), enumerator,
+ effect, control);
+ Node* enum_cache = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
+ descriptor_array, effect, control);
+ Node* enum_indices = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForEnumCacheIndices()), enum_cache,
+ effect, control);
+
+ // Ensure that the {enum_indices} are valid.
+ Node* check = graph()->NewNode(
+ simplified()->BooleanNot(),
+ graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
+ jsgraph()->EmptyFixedArrayConstant()));
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongEnumIndices), check, effect,
+ control);
+
+ // Determine the index from the {enum_indices}.
+ index = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(PACKED_SMI_ELEMENTS)),
+ enum_indices, index, effect, control);
+
+ // Load the actual field value.
+ Node* value = effect = graph()->NewNode(simplified()->LoadFieldByIndex(),
+ receiver, index, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* name = NodeProperties::GetValueInput(node, 1);
+
+ if (name->opcode() == IrOpcode::kJSForInNext) {
+ Reduction reduction = ReduceJSLoadPropertyWithEnumeratedKey(node);
+ if (reduction.Changed()) return reduction;
}
// Extract receiver maps from the keyed load IC using the FeedbackNexus.
@@ -1919,6 +2037,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
// Try to lower the keyed access based on the {nexus}.
+ Node* value = jsgraph()->Dead();
return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
load_mode, STANDARD_STORE);
}
@@ -2047,16 +2166,17 @@ Node* JSNativeContextSpecialization::InlineApiCall(
// Add CallApiCallbackStub's register argument as well.
Node* context = jsgraph()->Constant(native_context());
Node* inputs[11] = {
- code, context, function_reference, jsgraph()->Constant(argc), data,
- holder, receiver};
- int index = 7 + argc;
+ code, function_reference, jsgraph()->Constant(argc), data, holder,
+ receiver};
+ int index = 6 + argc;
+ inputs[index++] = context;
inputs[index++] = frame_state;
inputs[index++] = *effect;
inputs[index++] = *control;
// This needs to stay here because of the edge case described in
// http://crbug.com/675648.
if (value != nullptr) {
- inputs[7] = value;
+ inputs[6] = value;
}
return *effect = *control =
@@ -2073,7 +2193,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
// Generate the actual property access.
@@ -2103,6 +2223,22 @@ JSNativeContextSpecialization::BuildPropertyLoad(
}
JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyTest(
+ Node* effect, Node* control, PropertyAccessInfo const& access_info) {
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ dependencies()->DependOnStablePrototypeChains(
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ }
+
+ Node* value = access_info.IsNotFound() ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+
+ return ValueEffectControl(value, effect, control);
+}
+
+JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
@@ -2116,6 +2252,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
return BuildPropertyStore(receiver, value, context, frame_state, effect,
control, name, if_exceptions, access_info,
access_mode);
+ case AccessMode::kHas:
+ return BuildPropertyTest(effect, control, access_info);
}
UNREACHABLE();
return ValueEffectControl();
@@ -2132,7 +2270,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
if (access_info.holder().ToHandle(&holder)) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
- broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
DCHECK(!access_info.IsNotFound());
@@ -2341,7 +2479,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
- Map map = nexus.FindFirstMap();
+ Map map = nexus.GetFirstMap();
if (map.is_null()) {
// Maps are weakly held in the type feedback vector, we may not have one.
return NoChange();
@@ -2356,8 +2494,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
isolate());
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(
- broker(), dependencies(), native_context().object(), graph()->zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, cached_name, AccessMode::kStoreInLiteral,
&access_info)) {
@@ -2418,12 +2556,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
} else if (receiver_maps.empty()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- return NoChange();
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
DCHECK(!nexus.IsUninitialized());
DCHECK_EQ(ELEMENT, nexus.GetKeyType());
@@ -2431,7 +2565,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
if (nexus.ic_state() == MEGAMORPHIC) return NoChange();
// Try to lower the element access based on the {receiver_maps}.
- return ReduceElementAccess(node, index, value, receiver_maps,
+ return ReduceElementAccess(node, index, value, nexus, receiver_maps,
AccessMode::kStoreInLiteral, STANDARD_LOAD,
store_mode);
}
@@ -2488,7 +2622,6 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
ElementAccessInfo const& access_info, AccessMode access_mode,
KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
-
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -2554,12 +2687,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// See if we can skip the detaching check.
- if (isolate()->IsArrayBufferDetachingIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets detached.
- dependencies()->DependOnProtector(PropertyCellRef(
- broker(), factory()->array_buffer_detaching_protector()));
- } else {
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Deopt if the {buffer} was detached.
// Note: A detached buffer leads to megamorphic feedback.
Node* buffer_bit_field = effect = graph()->NewNode(
@@ -2588,7 +2716,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// below are performed on unsigned values, which means that all the
// Negative32 values are treated as out-of-bounds.
index = graph()->NewNode(simplified()->NumberToUint32(), index);
- } else {
+ } else if (access_mode != AccessMode::kHas) {
// Check that the {index} is in the valid range for the {receiver}.
index = effect =
graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
@@ -2693,6 +2821,13 @@ JSNativeContextSpecialization::BuildElementAccess(
}
break;
}
+ case AccessMode::kHas:
+ // For has property on a typed array, all we need is a bounds check.
+ value = effect =
+ graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ index, length, effect, control);
+ break;
}
} else {
// Load the elements for the {receiver}.
@@ -2739,7 +2874,8 @@ JSNativeContextSpecialization::BuildElementAccess(
index = effect = graph()->NewNode(
simplified()->CheckBounds(VectorSlotPair()), index,
jsgraph()->Constant(Smi::kMaxValue), effect, control);
- } else {
+ } else if (access_mode != AccessMode::kHas ||
+ load_mode != LOAD_IGNORE_OUT_OF_BOUNDS) {
// Check that the {index} is in the valid range for the {receiver}.
index = effect =
graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
@@ -2859,6 +2995,78 @@ JSNativeContextSpecialization::BuildElementAccess(
effect, control);
}
}
+ } else if (access_mode == AccessMode::kHas) {
+ // For packed arrays with NoElementsProctector valid, a bound check
+ // is equivalent to HasProperty.
+ value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
+ NumberOperationHint::kSignedSmall),
+ index, length, effect, control);
+ if (IsHoleyElementsKind(elements_kind)) {
+ // If the index is in bounds, do a load and hole check.
+
+ Node* branch = graph()->NewNode(common()->Branch(), value, control);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->FalseConstant();
+
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ element_access.machine_type = MachineType::AnyTagged();
+ }
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* checked = etrue =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, etrue, if_true);
+
+ Node* element = etrue =
+ graph()->NewNode(simplified()->LoadElement(element_access),
+ elements, checked, etrue, if_true);
+
+ Node* vtrue;
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ // Turn the hole into undefined.
+ vtrue = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ } else {
+ vtrue =
+ graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ }
+
+ // has == !IsHole
+ vtrue = graph()->NewNode(simplified()->BooleanNot(), vtrue);
+ } else {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ // Bailout if we see the hole.
+ etrue = graph()->NewNode(simplified()->CheckNotTaggedHole(),
+ element, etrue, if_true);
+ } else {
+ etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kNeverReturnHole, VectorSlotPair()),
+ element, etrue, if_true);
+ }
+
+ vtrue = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
} else {
DCHECK(access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral);
@@ -2968,10 +3176,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* receiver, Node* index, Node* length, Node** effect, Node** control,
KeyedAccessLoadMode load_mode) {
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
- isolate()->IsNoElementsProtectorIntact()) {
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
-
+ dependencies()->DependOnNoElementsProtector()) {
// Ensure that the {index} is a valid String length.
index = *effect = graph()->NewNode(
simplified()->CheckBounds(VectorSlotPair()), index,
@@ -3118,17 +3323,26 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
}
// Check if the array prototype chain is intact.
- if (!isolate()->IsNoElementsProtectorIntact()) return false;
+ return dependencies()->DependOnNoElementsProtector();
+}
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
- return true;
+namespace {
+void TryUpdateThenDropDeprecated(Isolate* isolate, MapHandles* maps) {
+ for (auto it = maps->begin(); it != maps->end();) {
+ if (Map::TryUpdate(isolate, *it).ToHandle(&*it)) {
+ DCHECK(!(*it)->is_deprecated());
+ ++it;
+ } else {
+ it = maps->erase(it);
+ }
+ }
}
+} // namespace
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
- DCHECK_EQ(0, receiver_maps->size());
+ DCHECK(receiver_maps->empty());
if (nexus.IsUninitialized()) return true;
// See if we can infer a concrete type for the {receiver}. Solely relying on
@@ -3139,7 +3353,7 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
bool use_inference =
!IsKeyedStoreICKind(kind) && !IsStoreInArrayLiteralICKind(kind);
if (use_inference && InferReceiverMaps(receiver, effect, receiver_maps)) {
- // We can assume that {receiver} still has the inferred {receiver_maps}.
+ TryUpdateThenDropDeprecated(isolate(), receiver_maps);
return true;
}
}
@@ -3159,6 +3373,7 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
}),
receiver_maps->end());
}
+ TryUpdateThenDropDeprecated(isolate(), receiver_maps);
return true;
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index fb8ee9b616..9c6cbdcb6e 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -84,6 +84,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSStoreGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSHasProperty(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
Reduction ReduceJSStoreNamedOwn(Node* node);
@@ -92,6 +93,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
+ FeedbackNexus const& nexus,
MapHandles const& receiver_maps,
AccessMode access_mode,
KeyedAccessLoadMode load_mode,
@@ -112,10 +114,22 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
Handle<Name> name, AccessMode access_mode,
Node* index = nullptr);
+ Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
+ Handle<Name> name, AccessMode access_mode,
+ Node* index, Handle<PropertyCell> property_cell);
+ Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* index,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
+ KeyedAccessLoadMode load_mode);
+ Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
+ AccessMode access_mode,
+ KeyedAccessLoadMode load_mode);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSLoadPropertyWithEnumeratedKey(Node* node);
+
const StringConstantBase* CreateDelayedStringConstant(Node* node);
// A triple of nodes that represents a continuation.
@@ -158,6 +172,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
PropertyAccessInfo const& access_info,
AccessMode access_mode);
+ ValueEffectControl BuildPropertyTest(Node* effect, Node* control,
+ PropertyAccessInfo const& access_info);
+
// Helpers for accessor inlining.
Node* InlinePropertyGetterCall(Node* receiver, Node* context,
Node* frame_state, Node** effect,
@@ -206,9 +223,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
MapHandles* receiver_maps);
// Try to infer maps for the given {receiver} at the current {effect}.
- // If maps are returned then you can be sure that the {receiver} definitely
- // has one of the returned maps at this point in the program (identified
- // by {effect}).
bool InferReceiverMaps(Node* receiver, Node* effect,
MapHandles* receiver_maps);
// Try to infer a root map for the {receiver} independent of the current
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index f2a2e7c924..e207a034eb 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -282,7 +282,8 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
PropertyAccess const& PropertyAccessOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSLoadProperty ||
+ DCHECK(op->opcode() == IrOpcode::kJSHasProperty ||
+ op->opcode() == IrOpcode::kJSLoadProperty ||
op->opcode() == IrOpcode::kJSStoreProperty);
return OpParameter<PropertyAccess>(op);
}
@@ -624,7 +625,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(CreateTypedArray, Operator::kNoProperties, 5, 1) \
V(CreateObject, Operator::kNoProperties, 1, 1) \
V(ObjectIsArray, Operator::kNoProperties, 1, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
@@ -722,8 +722,7 @@ struct JSOperatorGlobalCache final {
};
namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(JSOperatorGlobalCache,
- GetJSOperatorGlobalCache);
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(JSOperatorGlobalCache, GetJSOperatorGlobalCache)
}
JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
@@ -951,6 +950,15 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
+const Operator* JSOperatorBuilder::HasProperty(VectorSlotPair const& feedback) {
+ PropertyAccess access(LanguageMode::kSloppy, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
+ IrOpcode::kJSHasProperty, Operator::kNoProperties, // opcode
+ "JSHasProperty", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
+}
+
const Operator* JSOperatorBuilder::InstanceOf(VectorSlotPair const& feedback) {
FeedbackParameter parameter(feedback);
return new (zone()) Operator1<FeedbackParameter>( // --
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 16eaf615d9..773f5ddcbb 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -789,7 +789,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* DeleteProperty();
- const Operator* HasProperty();
+ const Operator* HasProperty(VectorSlotPair const& feedback);
const Operator* GetSuperConstructor();
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 9be71cbd27..452609a4d5 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -536,12 +536,16 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// JSAdd("", x:primitive) => JSToString(x)
NodeProperties::ReplaceValueInputs(node, r.right());
NodeProperties::ChangeOp(node, javascript()->ToString());
+ NodeProperties::SetType(
+ node, Type::Intersect(r.type(), Type::String(), graph()->zone()));
Reduction const reduction = ReduceJSToString(node);
return reduction.Changed() ? reduction : Changed(node);
} else if (r.RightInputIs(empty_string_type_)) {
// JSAdd(x:primitive, "") => JSToString(x)
NodeProperties::ReplaceValueInputs(node, r.left());
NodeProperties::ChangeOp(node, javascript()->ToString());
+ NodeProperties::SetType(
+ node, Type::Intersect(r.type(), Type::String(), graph()->zone()));
Reduction const reduction = ReduceJSToString(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -959,8 +963,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
}
if (input_type.IsHeapConstant()) {
HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
- if (input_value.map().oddball_type() != OddballType::kNone) {
- return Replace(jsgraph()->Constant(input_value.OddballToNumber()));
+ double value;
+ if (input_value.OddballToNumber().To(&value)) {
+ return Replace(jsgraph()->Constant(value));
}
}
if (input_type.Is(Type::Number())) {
@@ -1611,7 +1616,7 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Reduction JSTypedLowering::ReduceJSCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int const arity = static_cast<int>(p.arity() - 2);
+ int arity = static_cast<int>(p.arity() - 2);
ConvertReceiverMode convert_mode = p.convert_mode();
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
@@ -1669,21 +1674,52 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- Node* argument_count = jsgraph()->Constant(arity);
if (NeedsArgumentAdaptorFrame(shared, arity)) {
- // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, argument_count);
- node->InsertInput(
- graph()->zone(), 4,
- jsgraph()->Constant(shared.internal_formal_parameter_count()));
- NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 1 + arity, flags)));
+ // Check if it's safe to skip the arguments adaptor for {shared},
+ // that is whether the target function anyways cannot observe the
+ // actual arguments. Details can be found in this document at
+ // https://bit.ly/v8-faster-calls-with-arguments-mismatch and
+ // on the tracking bug at https://crbug.com/v8/8895
+ if (shared.is_safe_to_skip_arguments_adaptor()) {
+ // Currently we only support skipping arguments adaptor frames
+ // for strict mode functions, since there's Function.arguments
+ // legacy accessor, which is still available in sloppy mode.
+ DCHECK_EQ(LanguageMode::kStrict, shared.language_mode());
+
+ // Massage the arguments to match the expected number of arguments.
+ int expected_argument_count = shared.internal_formal_parameter_count();
+ for (; arity > expected_argument_count; --arity) {
+ node->RemoveInput(arity + 1);
+ }
+ for (; arity < expected_argument_count; ++arity) {
+ node->InsertInput(graph()->zone(), arity + 2,
+ jsgraph()->UndefinedConstant());
+ }
+
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), arity + 2, new_target);
+ node->InsertInput(graph()->zone(), arity + 3,
+ jsgraph()->Constant(arity));
+ NodeProperties::ChangeOp(node,
+ common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + arity,
+ flags | CallDescriptor::kCanUseRoots)));
+ } else {
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(
+ graph()->zone(), 4,
+ jsgraph()->Constant(shared.internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node,
+ common()->Call(Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), 1 + arity, flags)));
+ }
} else if (shared.HasBuiltinId() &&
Builtins::HasCppImplementation(shared.builtin_id())) {
// Patch {node} to a direct CEntry call.
@@ -1701,12 +1737,12 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, argument_count);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, argument_count);
+ node->InsertInput(graph()->zone(), arity + 3, jsgraph()->Constant(arity));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity,
@@ -1986,8 +2022,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
ExternalReference const ref =
ExternalReference::address_of_pending_message_obj(isolate());
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
- NodeProperties::ChangeOp(
- node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->LoadMessage());
return Changed(node);
}
@@ -1998,8 +2033,7 @@ Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
node->ReplaceInput(1, value);
- NodeProperties::ChangeOp(
- node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->StoreMessage());
return Changed(node);
}
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 0dc28e0f77..6a7bdd90ab 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -119,6 +119,17 @@ int CallDescriptor::GetStackParameterDelta(
return stack_param_delta;
}
+int CallDescriptor::GetTaggedParameterSlots() const {
+ int result = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ LinkageLocation operand = GetInputLocation(i);
+ if (!operand.IsRegister() && operand.GetType().IsTagged()) {
+ ++result;
+ }
+ }
+ return result;
+}
+
bool CallDescriptor::CanTailCall(const Node* node) const {
return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 0be3053274..5ebfedf4fd 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -153,7 +153,9 @@ class LinkageLocation {
LinkageLocation(LocationType type, int32_t location,
MachineType machine_type) {
bit_field_ = TypeField::encode(type) |
- ((location << LocationField::kShift) & LocationField::kMask);
+ // {location} can be -1 (ANY_REGISTER).
+ ((static_cast<uint32_t>(location) << LocationField::kShift) &
+ LocationField::kMask);
machine_type_ = machine_type;
}
@@ -315,6 +317,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
+ int GetTaggedParameterSlots() const;
+
bool CanTailCall(const Node* call) const;
int CalculateFixedFrameSize() const;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 4e89ab6b9c..8bad89a7f7 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -4,6 +4,7 @@
#include "src/compiler/load-elimination.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -112,9 +113,13 @@ Reduction LoadElimination::Reduce(Node* node) {
case IrOpcode::kTransitionElementsKind:
return ReduceTransitionElementsKind(node);
case IrOpcode::kLoadField:
- return ReduceLoadField(node);
+ return ReduceLoadField(node, FieldAccessOf(node->op()));
case IrOpcode::kStoreField:
- return ReduceStoreField(node);
+ return ReduceStoreField(node, FieldAccessOf(node->op()));
+ case IrOpcode::kStoreMessage:
+ return ReduceStoreField(node, AccessBuilder::ForExternalIntPtr());
+ case IrOpcode::kLoadMessage:
+ return ReduceLoadField(node, AccessBuilder::ForExternalIntPtr());
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
@@ -622,7 +627,7 @@ void LoadElimination::AbstractStateForEffectNodes::Set(
}
Reduction LoadElimination::ReduceMapGuard(Node* node) {
- ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
@@ -637,7 +642,7 @@ Reduction LoadElimination::ReduceMapGuard(Node* node) {
}
Reduction LoadElimination::ReduceCheckMaps(Node* node) {
- ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
@@ -652,7 +657,7 @@ Reduction LoadElimination::ReduceCheckMaps(Node* node) {
}
Reduction LoadElimination::ReduceCompareMaps(Node* node) {
- ZoneHandleSet<Map> const maps = CompareMapsParametersOf(node->op()).maps();
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
@@ -675,7 +680,7 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- // Check if the {elements} already have the fixed array map.
+ // Check if the {elements} already have the fixed array map.
ZoneHandleSet<Map> elements_maps;
ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
if (state->LookupMaps(elements, &elements_maps) &&
@@ -784,8 +789,8 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
return UpdateState(node, state);
}
-Reduction LoadElimination::ReduceLoadField(Node* node) {
- FieldAccess const& access = FieldAccessOf(node->op());
+Reduction LoadElimination::ReduceLoadField(Node* node,
+ FieldAccess const& access) {
Node* object = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -833,8 +838,8 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
return UpdateState(node, state);
}
-Reduction LoadElimination::ReduceStoreField(Node* node) {
- FieldAccess const& access = FieldAccessOf(node->op());
+Reduction LoadElimination::ReduceStoreField(Node* node,
+ FieldAccess const& access) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const new_value = NodeProperties::GetValueInput(node, 1);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -1070,6 +1075,25 @@ Reduction LoadElimination::UpdateState(Node* node, AbstractState const* state) {
return NoChange();
}
+LoadElimination::AbstractState const*
+LoadElimination::ComputeLoopStateForStoreField(
+ Node* current, LoadElimination::AbstractState const* state,
+ FieldAccess const& access) const {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ if (access.offset == HeapObject::kMapOffset) {
+ // Invalidate what we know about the {object}s map.
+ state = state->KillMaps(object, zone());
+ } else {
+ int field_index = FieldIndexOf(access);
+ if (field_index < 0) {
+ state = state->KillFields(object, access.name, zone());
+ } else {
+ state = state->KillField(object, field_index, access.name, zone());
+ }
+ }
+ return state;
+}
+
LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
Node* node, AbstractState const* state) const {
Node* const control = NodeProperties::GetControlInput(node);
@@ -1126,23 +1150,14 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
MaybeHandle<Name>(), zone());
break;
}
- case IrOpcode::kStoreField: {
- FieldAccess const& access = FieldAccessOf(current->op());
- Node* const object = NodeProperties::GetValueInput(current, 0);
- if (access.offset == HeapObject::kMapOffset) {
- // Invalidate what we know about the {object}s map.
- state = state->KillMaps(object, zone());
- } else {
- int field_index = FieldIndexOf(access);
- if (field_index < 0) {
- state = state->KillFields(object, access.name, zone());
- } else {
- state =
- state->KillField(object, field_index, access.name, zone());
- }
- }
+ case IrOpcode::kStoreField:
+ state = ComputeLoopStateForStoreField(current, state,
+ FieldAccessOf(current->op()));
+ break;
+ case IrOpcode::kStoreMessage:
+ state = ComputeLoopStateForStoreField(
+ current, state, AccessBuilder::ForExternalIntPtr());
break;
- }
case IrOpcode::kStoreElement: {
Node* const object = NodeProperties::GetValueInput(current, 0);
Node* const index = NodeProperties::GetValueInput(current, 1);
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 8fa31be074..e18c3a7602 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -243,20 +243,25 @@ class V8_EXPORT_PRIVATE LoadElimination final
Reduction ReduceEnsureWritableFastElements(Node* node);
Reduction ReduceMaybeGrowFastElements(Node* node);
Reduction ReduceTransitionElementsKind(Node* node);
- Reduction ReduceLoadField(Node* node);
- Reduction ReduceStoreField(Node* node);
+ Reduction ReduceLoadField(Node* node, FieldAccess const& access);
+ Reduction ReduceStoreField(Node* node, FieldAccess const& access);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
Reduction ReduceTransitionAndStoreElement(Node* node);
Reduction ReduceStoreTypedElement(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReduceStart(Node* node);
+ Reduction ReduceStoreMessage(Node* node);
+ Reduction ReduceLoadMessage(Node* node);
Reduction ReduceOtherNode(Node* node);
Reduction UpdateState(Node* node, AbstractState const* state);
AbstractState const* ComputeLoopState(Node* node,
AbstractState const* state) const;
+ AbstractState const* ComputeLoopStateForStoreField(
+ Node* current, LoadElimination::AbstractState const* state,
+ FieldAccess const& access) const;
AbstractState const* UpdateStateForPhi(AbstractState const* state,
Node* effect_phi, Node* phi);
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 751cdacca6..a2fa6a5bb7 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -563,7 +563,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Pow: {
Float64BinopMatcher m(node);
if (m.IsFoldable()) {
- return ReplaceFloat64(Pow(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(
+ base::ieee754::pow(m.left().Value(), m.right().Value()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
} else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
@@ -1395,8 +1396,7 @@ namespace {
bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
if (m.HasValue()) {
double v = m.Value();
- float fv = static_cast<float>(v);
- return static_cast<double>(fv) == v;
+ return DoubleToFloat32(v) == v;
}
return false;
}
@@ -1461,7 +1461,7 @@ Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) {
- return ReplaceFloat64(Floor(m.Value()));
+ return ReplaceFloat64(std::floor(m.Value()));
}
return NoChange();
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d740ff6f72..e16f2588d5 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -818,7 +818,7 @@ struct CommentOperator : public Operator1<const char*> {
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
- GetMachineOperatorGlobalCache);
+ GetMachineOperatorGlobalCache)
}
MachineOperatorBuilder::MachineOperatorBuilder(
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 91a19891db..ca9a2668d3 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -97,6 +97,8 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
+ case IrOpcode::kStore:
+ return VisitStore(node, state);
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
@@ -110,7 +112,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
- case IrOpcode::kStore:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
@@ -458,6 +459,20 @@ void MemoryOptimizer::VisitStoreField(Node* node,
EnqueueUses(node, state);
}
+void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStore, node->opcode());
+ StoreRepresentation representation = StoreRepresentationOf(node->op());
+ Node* object = node->InputAt(0);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ object, state, representation.write_barrier_kind());
+ if (write_barrier_kind != representation.write_barrier_kind()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ representation.representation(), write_barrier_kind)));
+ }
+ EnqueueUses(node, state);
+}
+
void MemoryOptimizer::VisitOtherEffect(Node* node,
AllocationState const* state) {
EnqueueUses(node, state);
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 31c04e5f2f..c0010ed0ee 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -117,6 +117,7 @@ class MemoryOptimizer final {
void VisitLoadField(Node*, AllocationState const*);
void VisitStoreElement(Node*, AllocationState const*);
void VisitStoreField(Node*, AllocationState const*);
+ void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 6b9c8dc07d..78d7eccbb3 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -112,11 +112,13 @@ void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
// -----------------------------------------------------------------------------
// Instantiations
-template class V8_EXPORT_PRIVATE NodeCache<int32_t>;
-template class V8_EXPORT_PRIVATE NodeCache<int64_t>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) NodeCache<int32_t>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) NodeCache<int64_t>;
-template class V8_EXPORT_PRIVATE NodeCache<RelocInt32Key>;
-template class V8_EXPORT_PRIVATE NodeCache<RelocInt64Key>;
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) NodeCache<RelocInt32Key>;
+template class EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) NodeCache<RelocInt64Key>;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index 72b5fdf2f3..6a70212f47 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_NODE_CACHE_H_
#define V8_COMPILER_NODE_CACHE_H_
+#include "src/base/export-template.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
@@ -27,7 +28,7 @@ class Node;
// nodes such as constants, parameters, etc.
template <typename Key, typename Hash = base::hash<Key>,
typename Pred = std::equal_to<Key> >
-class V8_EXPORT_PRIVATE NodeCache final {
+class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
public:
explicit NodeCache(unsigned max = 256)
: entries_(nullptr), size_(0), max_(max) {}
@@ -77,6 +78,17 @@ typedef Int32NodeCache IntPtrNodeCache;
typedef Int64NodeCache IntPtrNodeCache;
#endif
+// Explicit instantiation declarations.
+extern template class EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) NodeCache<int32_t>;
+extern template class EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) NodeCache<int64_t>;
+
+extern template class EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) NodeCache<RelocInt32Key>;
+extern template class EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) NodeCache<RelocInt64Key>;
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 375a420dd4..d81d84eca3 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -427,7 +427,7 @@ enum class AddressOption : uint8_t {
};
typedef base::Flags<AddressOption, uint8_t> AddressOptions;
-DEFINE_OPERATORS_FOR_FLAGS(AddressOptions);
+DEFINE_OPERATORS_FOR_FLAGS(AddressOptions)
template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 8e3421c1a0..a769fba563 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -13,7 +13,6 @@
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
-#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -392,7 +391,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
case IrOpcode::kMapGuard: {
Node* const object = GetValueInput(effect, 0);
if (IsSame(receiver, object)) {
- *maps_return = MapGuardMapsOf(effect->op()).maps();
+ *maps_return = MapGuardMapsOf(effect->op());
return result;
}
break;
@@ -511,20 +510,6 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
-MaybeHandle<Map> NodeProperties::GetMapWitness(JSHeapBroker* broker,
- Node* node) {
- ZoneHandleSet<Map> maps;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker, receiver, effect, &maps);
- if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
- return maps[0];
- }
- return MaybeHandle<Map>();
-}
-
-// static
bool NodeProperties::HasInstanceTypeWitness(JSHeapBroker* broker,
Node* receiver, Node* effect,
InstanceType instance_type) {
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index df50a1d90a..0a04c52b5d 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -155,7 +155,6 @@ class V8_EXPORT_PRIVATE NodeProperties final {
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return);
- static MaybeHandle<Map> GetMapWitness(JSHeapBroker* broker, Node* node);
static bool HasInstanceTypeWitness(JSHeapBroker* broker, Node* receiver,
Node* effect, InstanceType instance_type);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 3576c9b589..1da13e5786 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -25,7 +25,7 @@ void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr,
// Extract the inputs from the old use and input pointers and copy them
// to this out-of-line-storage.
Use* new_use_ptr = reinterpret_cast<Use*>(this) - 1;
- Node** new_input_ptr = inputs_;
+ Node** new_input_ptr = inputs();
for (int current = 0; current < count; current++) {
new_use_ptr->bit_field_ =
Use::InputIndexField::encode(current) | Use::InlineField::encode(false);
@@ -72,20 +72,21 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
has_extensible_inputs ? input_count + kMaxInlineCapacity : input_count;
OutOfLineInputs* outline = OutOfLineInputs::New(zone, capacity);
- // Allocate node.
- void* node_buffer = zone->New(sizeof(Node));
+ // Allocate node, with space for OutOfLineInputs pointer.
+ void* node_buffer = zone->New(sizeof(Node) + sizeof(OutOfLineInputs*));
node = new (node_buffer) Node(id, op, kOutlineMarker, 0);
- node->inputs_.outline_ = outline;
+ node->set_outline_inputs(outline);
outline->node_ = node;
outline->count_ = input_count;
- input_ptr = outline->inputs_;
+ input_ptr = outline->inputs();
use_ptr = reinterpret_cast<Use*>(outline);
is_inline = false;
} else {
- // Allocate node with inline inputs.
- int capacity = input_count;
+ // Allocate node with inline inputs. Capacity must be at least 1 so that
+ // an OutOfLineInputs pointer can be stored when inputs are added later.
+ int capacity = std::max(1, input_count);
if (has_extensible_inputs) {
const int max = kMaxInlineCapacity;
capacity = std::min(input_count + 3, max);
@@ -97,7 +98,7 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
reinterpret_cast<void*>(raw_buffer + capacity * sizeof(Use));
node = new (node_buffer) Node(id, op, input_count, capacity);
- input_ptr = node->inputs_.inline_;
+ input_ptr = node->inline_inputs();
use_ptr = reinterpret_cast<Use*>(node);
is_inline = true;
}
@@ -119,8 +120,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
Node* Node::Clone(Zone* zone, NodeId id, const Node* node) {
int const input_count = node->InputCount();
Node* const* const inputs = node->has_inline_inputs()
- ? node->inputs_.inline_
- : node->inputs_.outline_->inputs_;
+ ? node->inline_inputs()
+ : node->outline_inputs()->inputs();
Node* const clone = New(zone, id, node->op(), input_count, inputs, false);
clone->set_type(node->type());
return clone;
@@ -158,16 +159,16 @@ void Node::AppendInput(Zone* zone, Node* new_to) {
outline->node_ = this;
outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
bit_field_ = InlineCountField::update(bit_field_, kOutlineMarker);
- inputs_.outline_ = outline;
+ set_outline_inputs(outline);
} else {
// use current out of line inputs.
- outline = inputs_.outline_;
+ outline = outline_inputs();
if (input_count >= outline->capacity_) {
// out of space in out-of-line inputs.
outline = OutOfLineInputs::New(zone, input_count * 2 + 3);
outline->node_ = this;
outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
- inputs_.outline_ = outline;
+ set_outline_inputs(outline);
}
}
outline->count_++;
@@ -247,7 +248,7 @@ void Node::TrimInputCount(int new_input_count) {
if (has_inline_inputs()) {
bit_field_ = InlineCountField::update(bit_field_, new_input_count);
} else {
- inputs_.outline_->count_ = new_input_count;
+ outline_inputs()->count_ = new_input_count;
}
}
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 2ddd33ff31..b4d92e895a 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -60,7 +60,7 @@ class V8_EXPORT_PRIVATE Node final {
int InputCount() const {
return has_inline_inputs() ? InlineCountField::decode(bit_field_)
- : inputs_.outline_->count_;
+ : outline_inputs()->count_;
}
#ifdef DEBUG
@@ -170,7 +170,9 @@ class V8_EXPORT_PRIVATE Node final {
Node* node_;
int count_;
int capacity_;
- Node* inputs_[1];
+
+ // Inputs are allocated right behind the OutOfLineInputs instance.
+ inline Node** inputs();
static OutOfLineInputs* New(Zone* zone, int capacity);
void ExtractFrom(Use* use_ptr, Node** input_ptr, int count);
@@ -189,8 +191,8 @@ class V8_EXPORT_PRIVATE Node final {
int index = input_index();
Use* start = this + 1 + index;
Node** inputs = is_inline_use()
- ? reinterpret_cast<Node*>(start)->inputs_.inline_
- : reinterpret_cast<OutOfLineInputs*>(start)->inputs_;
+ ? reinterpret_cast<Node*>(start)->inline_inputs()
+ : reinterpret_cast<OutOfLineInputs*>(start)->inputs();
return &inputs[index];
}
@@ -239,17 +241,29 @@ class V8_EXPORT_PRIVATE Node final {
Node(NodeId id, const Operator* op, int inline_count, int inline_capacity);
+ inline Address inputs_location() const;
+
+ Node** inline_inputs() const {
+ return reinterpret_cast<Node**>(inputs_location());
+ }
+ OutOfLineInputs* outline_inputs() const {
+ return *reinterpret_cast<OutOfLineInputs**>(inputs_location());
+ }
+ void set_outline_inputs(OutOfLineInputs* outline) {
+ *reinterpret_cast<OutOfLineInputs**>(inputs_location()) = outline;
+ }
+
Node* const* GetInputPtrConst(int input_index) const {
- return has_inline_inputs() ? &(inputs_.inline_[input_index])
- : &inputs_.outline_->inputs_[input_index];
+ return has_inline_inputs() ? &(inline_inputs()[input_index])
+ : &(outline_inputs()->inputs()[input_index]);
}
Node** GetInputPtr(int input_index) {
- return has_inline_inputs() ? &(inputs_.inline_[input_index])
- : &inputs_.outline_->inputs_[input_index];
+ return has_inline_inputs() ? &(inline_inputs()[input_index])
+ : &(outline_inputs()->inputs()[input_index]);
}
Use* GetUsePtr(int input_index) {
Use* ptr = has_inline_inputs() ? reinterpret_cast<Use*>(this)
- : reinterpret_cast<Use*>(inputs_.outline_);
+ : reinterpret_cast<Use*>(outline_inputs());
return &ptr[-1 - input_index];
}
@@ -287,11 +301,6 @@ class V8_EXPORT_PRIVATE Node final {
Mark mark_;
uint32_t bit_field_;
Use* first_use_;
- union {
- // Inline storage for inputs or out-of-line storage.
- Node* inline_[1];
- OutOfLineInputs* outline_;
- } inputs_;
friend class Edge;
friend class NodeMarkerBase;
@@ -300,6 +309,14 @@ class V8_EXPORT_PRIVATE Node final {
DISALLOW_COPY_AND_ASSIGN(Node);
};
+Address Node::inputs_location() const {
+ return reinterpret_cast<Address>(this) + sizeof(Node);
+}
+
+Node** Node::OutOfLineInputs::inputs() {
+ return reinterpret_cast<Node**>(reinterpret_cast<Address>(this) +
+ sizeof(Node::OutOfLineInputs));
+}
std::ostream& operator<<(std::ostream& os, const Node& n);
@@ -402,21 +419,21 @@ bool Node::IsDead() const {
Node::InputEdges Node::input_edges() {
int inline_count = InlineCountField::decode(bit_field_);
if (inline_count != kOutlineMarker) {
- return InputEdges(inputs_.inline_, reinterpret_cast<Use*>(this) - 1,
+ return InputEdges(inline_inputs(), reinterpret_cast<Use*>(this) - 1,
inline_count);
} else {
- return InputEdges(inputs_.outline_->inputs_,
- reinterpret_cast<Use*>(inputs_.outline_) - 1,
- inputs_.outline_->count_);
+ return InputEdges(outline_inputs()->inputs(),
+ reinterpret_cast<Use*>(outline_inputs()) - 1,
+ outline_inputs()->count_);
}
}
Node::Inputs Node::inputs() const {
int inline_count = InlineCountField::decode(bit_field_);
if (inline_count != kOutlineMarker) {
- return Inputs(inputs_.inline_, inline_count);
+ return Inputs(inline_inputs(), inline_count);
} else {
- return Inputs(inputs_.outline_->inputs_, inputs_.outline_->count_);
+ return Inputs(outline_inputs()->inputs(), outline_inputs()->count_);
}
}
diff --git a/deps/v8/src/compiler/opcodes.cc b/deps/v8/src/compiler/opcodes.cc
index 2a8e01a26d..c465422d34 100644
--- a/deps/v8/src/compiler/opcodes.cc
+++ b/deps/v8/src/compiler/opcodes.cc
@@ -26,6 +26,8 @@ char const* const kMnemonics[] = {
// static
char const* IrOpcode::Mnemonic(Value value) {
+ DCHECK_LE(0, static_cast<int>(value));
+ DCHECK_LE(static_cast<int>(value), IrOpcode::Value::kLast);
size_t const n = std::min<size_t>(value, arraysize(kMnemonics) - 1);
return kMnemonics[n];
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 706248bd04..ee2773fdc8 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -402,10 +402,13 @@
V(LoadFieldByIndex) \
V(LoadField) \
V(LoadElement) \
+ V(LoadMessage) \
V(LoadTypedElement) \
V(LoadDataViewElement) \
+ V(LoadStackArgument) \
V(StoreField) \
V(StoreElement) \
+ V(StoreMessage) \
V(StoreTypedElement) \
V(StoreDataViewElement) \
V(StoreSignedSmallElement) \
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 3a4212e3d5..30b4d6efca 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -136,7 +136,7 @@ class PipelineData {
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
dependencies_ =
- new (info_->zone()) CompilationDependencies(isolate_, info_->zone());
+ new (info_->zone()) CompilationDependencies(broker_, info_->zone());
}
// For WebAssembly compile entry point.
@@ -535,7 +535,7 @@ class PipelineImpl final {
std::unique_ptr<AssemblerBuffer> buffer = {});
// Step D. Run the code finalization pass.
- MaybeHandle<Code> FinalizeCode();
+ MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
// Step E. Install any code dependencies.
bool CommitDependencies(Handle<Code> code);
@@ -930,6 +930,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsFunctionContextSpecializing();
}
+ if (compilation_info()->is_source_positions_enabled()) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate, compilation_info()->shared_info());
+ }
+
data_.set_start_source_position(
compilation_info()->shared_info()->StartPosition());
@@ -1603,7 +1608,7 @@ struct InstructionSelectionPhase {
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
!data->isolate() || data->isolate()->serializer_enabled() ||
- data->isolate()->ShouldLoadConstantsFromRootList()
+ data->isolate()->IsGeneratingEmbeddedBuiltins()
? InstructionSelector::kDisableRootsRelativeAddressing
: InstructionSelector::kEnableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
@@ -1716,6 +1721,14 @@ struct LocateSpillSlotsPhase {
}
};
+struct DecideSpillingModePhase {
+ static const char* phase_name() { return "decide spilling mode"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ OperandAssigner assigner(data->register_allocation_data());
+ assigner.DecideSpillingMode();
+ }
+};
struct AssignSpillSlotsPhase {
static const char* phase_name() { return "assign spill slots"; }
@@ -2197,13 +2210,14 @@ wasm::WasmCode* Pipeline::GenerateCodeForWasmNativeStub(
CodeGenerator* code_generator = pipeline.code_generator();
CodeDesc code_desc;
- code_generator->tasm()->GetCode(nullptr, &code_desc);
+ code_generator->tasm()->GetCode(
+ nullptr, &code_desc, code_generator->safepoint_table_builder(),
+ static_cast<int>(code_generator->GetHandlerTableOffset()));
wasm::WasmCode* code = native_module->AddCode(
wasm::WasmCode::kAnonymousFuncIndex, code_desc,
code_generator->frame()->GetTotalFrameSlotCount(),
- code_generator->GetSafepointTableOffset(),
- code_generator->GetHandlerTableOffset(),
+ call_descriptor->GetTaggedParameterSlots(),
code_generator->GetProtectedInstructions(),
code_generator->GetSourcePositionTable(),
static_cast<wasm::WasmCode::Kind>(wasm_kind), wasm::WasmCode::kOther);
@@ -2290,12 +2304,17 @@ MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
- OptimizedCompilationInfo* info, Isolate* isolate) {
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ JSHeapBroker** out_broker) {
ZoneStats zone_stats(isolate->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats));
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
+ if (out_broker != nullptr) {
+ *out_broker = data.broker();
+ }
+
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -2305,7 +2324,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
pipeline.AssembleCode(&linkage);
Handle<Code> code;
- if (pipeline.FinalizeCode().ToHandle(&code) &&
+ if (pipeline.FinalizeCode(out_broker == nullptr).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
return code;
}
@@ -2438,12 +2457,13 @@ void Pipeline::GenerateCodeForWasmFunction(
auto result = base::make_unique<wasm::WasmCompilationResult>();
CodeGenerator* code_generator = pipeline.code_generator();
- code_generator->tasm()->GetCode(nullptr, &result->code_desc);
+ code_generator->tasm()->GetCode(
+ nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
+ static_cast<int>(code_generator->GetHandlerTableOffset()));
result->instr_buffer = instruction_buffer->ReleaseBuffer();
result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
- result->safepoint_table_offset = code_generator->GetSafepointTableOffset();
- result->handler_table_offset = code_generator->GetHandlerTableOffset();
+ result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result->source_positions = code_generator->GetSourcePositionTable();
result->protected_instructions = code_generator->GetProtectedInstructions();
@@ -2454,7 +2474,7 @@ void Pipeline::GenerateCodeForWasmFunction(
std::stringstream disassembler_stream;
Disassembler::Decode(
nullptr, &disassembler_stream, result->code_desc.buffer,
- result->code_desc.buffer + result->safepoint_table_offset,
+ result->code_desc.buffer + result->code_desc.instr_size,
CodeReference(&result->code_desc));
for (auto const c : disassembler_stream.str()) {
json_of << AsEscapedUC16ForJSON(c);
@@ -2703,9 +2723,9 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
return out;
}
-MaybeHandle<Code> PipelineImpl::FinalizeCode() {
+MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
PipelineData* data = this->data_;
- if (data->broker()) {
+ if (data->broker() && retire_broker) {
data->broker()->Retire();
}
Run<FinalizeCodePhase>();
@@ -2863,8 +2883,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<MergeSplintersPhase>();
}
+ Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
-
Run<CommitAssignmentPhase>();
// TODO(chromium:725559): remove this check once
@@ -2875,12 +2895,13 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
}
Run<PopulateReferenceMapsPhase>();
+
Run<ConnectRangesPhase>();
+
Run<ResolveControlFlowPhase>();
if (FLAG_turbo_move_optimization) {
Run<OptimizeMovesPhase>();
}
-
Run<LocateSpillSlotsPhase>();
TraceSequence(info(), data, "after register allocation");
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 8c56911023..f5993afc8c 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -32,6 +32,7 @@ namespace compiler {
class CallDescriptor;
class Graph;
class InstructionSequence;
+class JSHeapBroker;
class MachineGraph;
class NodeOriginTable;
class Schedule;
@@ -80,8 +81,11 @@ class Pipeline : public AllStatic {
// ---------------------------------------------------------------------------
// Run the pipeline on JavaScript bytecode and generate code.
+ // If requested, hands out the heap broker, which is allocated
+ // in {info}'s zone.
static MaybeHandle<Code> GenerateCodeForTesting(
- OptimizedCompilationInfo* info, Isolate* isolate);
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ JSHeapBroker** out_broker = nullptr);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index ee06867cd1..9f1d4d79cf 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -220,7 +220,11 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
MapRef map(broker(),
handle(it.GetHolder<HeapObject>()->map(), isolate()));
map.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldType(map, it.GetFieldDescriptorIndex());
+ if (dependencies()->DependOnFieldConstness(
+ map, it.GetFieldDescriptorIndex()) !=
+ PropertyConstness::kConst) {
+ return nullptr;
+ }
}
return value;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 73a7701926..190b42ba24 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -564,7 +564,7 @@ void RawMachineAssembler::Unreachable() {
current_block_ = nullptr;
}
-void RawMachineAssembler::Comment(std::string msg) {
+void RawMachineAssembler::Comment(const std::string& msg) {
size_t length = msg.length() + 1;
char* zone_buffer = zone()->NewArray<char>(length);
MemCopy(zone_buffer, msg.c_str(), length);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 11aefe570c..1c7ba465ee 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -236,12 +236,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
DCHECK_NULL(value_high); \
return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
}
- ATOMIC_FUNCTION(Exchange);
- ATOMIC_FUNCTION(Add);
- ATOMIC_FUNCTION(Sub);
- ATOMIC_FUNCTION(And);
- ATOMIC_FUNCTION(Or);
- ATOMIC_FUNCTION(Xor);
+ ATOMIC_FUNCTION(Exchange)
+ ATOMIC_FUNCTION(Add)
+ ATOMIC_FUNCTION(Sub)
+ ATOMIC_FUNCTION(And)
+ ATOMIC_FUNCTION(Or)
+ ATOMIC_FUNCTION(Xor)
#undef ATOMIC_FUNCTION
#undef VALUE_HALVES
@@ -496,18 +496,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
: prefix##32##name(a, b); \
}
- INTPTR_BINOP(Int, Add);
- INTPTR_BINOP(Int, AddWithOverflow);
- INTPTR_BINOP(Int, Sub);
- INTPTR_BINOP(Int, SubWithOverflow);
- INTPTR_BINOP(Int, Mul);
- INTPTR_BINOP(Int, Div);
- INTPTR_BINOP(Int, LessThan);
- INTPTR_BINOP(Int, LessThanOrEqual);
- INTPTR_BINOP(Word, Equal);
- INTPTR_BINOP(Word, NotEqual);
- INTPTR_BINOP(Int, GreaterThanOrEqual);
- INTPTR_BINOP(Int, GreaterThan);
+ INTPTR_BINOP(Int, Add)
+ INTPTR_BINOP(Int, AddWithOverflow)
+ INTPTR_BINOP(Int, Sub)
+ INTPTR_BINOP(Int, SubWithOverflow)
+ INTPTR_BINOP(Int, Mul)
+ INTPTR_BINOP(Int, Div)
+ INTPTR_BINOP(Int, LessThan)
+ INTPTR_BINOP(Int, LessThanOrEqual)
+ INTPTR_BINOP(Word, Equal)
+ INTPTR_BINOP(Word, NotEqual)
+ INTPTR_BINOP(Int, GreaterThanOrEqual)
+ INTPTR_BINOP(Int, GreaterThan)
#undef INTPTR_BINOP
@@ -517,10 +517,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
: prefix##32##name(a, b); \
}
- UINTPTR_BINOP(Uint, LessThan);
- UINTPTR_BINOP(Uint, LessThanOrEqual);
- UINTPTR_BINOP(Uint, GreaterThanOrEqual);
- UINTPTR_BINOP(Uint, GreaterThan);
+ UINTPTR_BINOP(Uint, LessThan)
+ UINTPTR_BINOP(Uint, LessThanOrEqual)
+ UINTPTR_BINOP(Uint, GreaterThanOrEqual)
+ UINTPTR_BINOP(Uint, GreaterThan)
#undef UINTPTR_BINOP
@@ -941,7 +941,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void DebugAbort(Node* message);
void DebugBreak();
void Unreachable();
- void Comment(std::string msg);
+ void Comment(const std::string& msg);
#if DEBUG
void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index dfb0ff3f7b..70b0d14e33 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -467,9 +467,13 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (IsWord(output_rep)) {
if (output_type.Is(Type::Signed31())) {
op = simplified()->ChangeInt31ToTaggedSigned();
- } else if (output_type.Is(Type::Signed32())) {
+ } else if (output_type.Is(Type::Signed32()) ||
+ (output_type.Is(Type::Signed32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero())) {
op = simplified()->ChangeInt32ToTagged();
} else if (output_type.Is(Type::Unsigned32()) ||
+ (output_type.Is(Type::Unsigned32OrMinusZero()) &&
+ truncation.IdentifiesZeroAndMinusZero()) ||
truncation.IsUsedAsWord32()) {
// Either the output is uint32 or the uses only care about the
// low 32 bits (so we can pick uint32 safely).
@@ -520,7 +524,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
Type::Unsigned32())) { // float64 -> uint32 -> tagged
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
- } else if (output_type.Is(Type::Number())) {
+ } else if (output_type.Is(Type::Number()) ||
+ (output_type.Is(Type::NumberOrOddball()) &&
+ truncation.IsUsedAsFloat64())) {
op = simplified()->ChangeFloat64ToTagged(
output_type.Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
@@ -637,7 +643,17 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
op = machine()->ChangeUint32ToFloat64();
}
} else if (output_rep == MachineRepresentation::kBit) {
- op = machine()->ChangeUint32ToFloat64();
+ CHECK(output_type.Is(Type::Boolean()));
+ if (use_info.truncation().IsUsedAsFloat64()) {
+ op = machine()->ChangeUint32ToFloat64();
+ } else {
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotAHeapNumber);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64),
+ unreachable);
+ }
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedSigned ||
output_rep == MachineRepresentation::kTaggedPointer) {
@@ -978,7 +994,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64), node);
} else if (output_rep == MachineRepresentation::kBit) {
- return node; // Sloppy comparison -> word64
+ CHECK(output_type.Is(Type::Boolean()));
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
+ unreachable);
} else if (IsWord(output_rep)) {
if (output_type.Is(Type::Unsigned32())) {
op = machine()->ChangeUint32ToUint64();
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index b6e0d279de..6e9f3f5978 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -188,9 +188,10 @@ class UseInfo {
}
// Possibly deoptimizing conversions.
- static UseInfo CheckedHeapObjectAsTaggedPointer() {
+ static UseInfo CheckedHeapObjectAsTaggedPointer(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
- TypeCheckKind::kHeapObject);
+ TypeCheckKind::kHeapObject, feedback);
}
static UseInfo CheckedSignedSmallAsTaggedSigned(
const VectorSlotPair& feedback,
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 0d761f82b3..6c2eaed7bc 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -4,38 +4,49 @@
#include "src/compiler/serializer-for-background-compilation.h"
+#include <sstream>
+
#include "src/compiler/js-heap-broker.h"
#include "src/handles-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
+using BytecodeArrayIterator = interpreter::BytecodeArrayIterator;
+
+CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
+ Isolate* isolate)
+ : blueprint_{handle(closure->shared(), isolate),
+ handle(closure->feedback_vector(), isolate)},
+ closure_(closure) {
+ CHECK(closure->has_feedback_vector());
+}
+
Hints::Hints(Zone* zone)
: constants_(zone), maps_(zone), function_blueprints_(zone) {}
-const ZoneVector<Handle<Object>>& Hints::constants() const {
- return constants_;
-}
+const ConstantsSet& Hints::constants() const { return constants_; }
-const ZoneVector<Handle<Map>>& Hints::maps() const { return maps_; }
+const MapsSet& Hints::maps() const { return maps_; }
-const ZoneVector<FunctionBlueprint>& Hints::function_blueprints() const {
+const BlueprintsSet& Hints::function_blueprints() const {
return function_blueprints_;
}
void Hints::AddConstant(Handle<Object> constant) {
- constants_.push_back(constant);
+ constants_.insert(constant);
}
-void Hints::AddMap(Handle<Map> map) { maps_.push_back(map); }
+void Hints::AddMap(Handle<Map> map) { maps_.insert(map); }
void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint) {
- function_blueprints_.push_back(function_blueprint);
+ function_blueprints_.insert(function_blueprint);
}
void Hints::Add(const Hints& other) {
@@ -44,23 +55,53 @@ void Hints::Add(const Hints& other) {
for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
}
+bool Hints::IsEmpty() const {
+ return constants().empty() && maps().empty() && function_blueprints().empty();
+}
+
+std::ostream& operator<<(std::ostream& out,
+ const FunctionBlueprint& blueprint) {
+ out << Brief(*blueprint.shared) << std::endl;
+ out << Brief(*blueprint.feedback_vector) << std::endl;
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const Hints& hints) {
+ !hints.constants().empty() &&
+ out << "\t\tConstants (" << hints.constants().size() << "):" << std::endl;
+ for (auto x : hints.constants()) out << Brief(*x) << std::endl;
+ !hints.maps().empty() && out << "\t\tMaps (" << hints.maps().size()
+ << "):" << std::endl;
+ for (auto x : hints.maps()) out << Brief(*x) << std::endl;
+ !hints.function_blueprints().empty() &&
+ out << "\t\tBlueprints (" << hints.function_blueprints().size()
+ << "):" << std::endl;
+ for (auto x : hints.function_blueprints()) out << x;
+ return out;
+}
+
void Hints::Clear() {
constants_.clear();
maps_.clear();
function_blueprints_.clear();
+ DCHECK(IsEmpty());
}
class SerializerForBackgroundCompilation::Environment : public ZoneObject {
public:
- explicit Environment(Zone* zone, Isolate* isolate, int register_count,
- int parameter_count);
+ Environment(Zone* zone, CompilationSubject function);
+ Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments);
- Environment(SerializerForBackgroundCompilation* serializer, Isolate* isolate,
- int register_count, int parameter_count,
- const HintsVector& arguments);
+ // When control flow bytecodes are encountered, e.g. a conditional jump,
+ // the current environment needs to be stashed together with the target jump
+ // address. Later, when this target bytecode is handled, the stashed
+ // environment will be merged into the current one.
+ void Merge(Environment* other);
- int parameter_count() const { return parameter_count_; }
- int register_count() const { return register_count_; }
+ friend std::ostream& operator<<(std::ostream& out, const Environment& env);
+
+ FunctionBlueprint function() const { return function_; }
Hints& accumulator_hints() { return environment_hints_[accumulator_index()]; }
Hints& register_hints(interpreter::Register reg) {
@@ -70,30 +111,38 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
}
Hints& return_value_hints() { return return_value_hints_; }
- void ClearAccumulatorAndRegisterHints() {
- for (auto& hints : environment_hints_) hints.Clear();
+ // Clears all hints except those for the return value and the closure.
+ void ClearEphemeralHints() {
+ DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1);
+ for (int i = 0; i < function_closure_index(); ++i) {
+ environment_hints_[i].Clear();
+ }
}
- private:
- explicit Environment(Zone* zone)
- : register_count_(0),
- parameter_count_(0),
- environment_hints_(zone),
- return_value_hints_(zone) {}
- Zone* zone() const { return zone_; }
+ // Appends the hints for the given register range to {dst} (in order).
+ void ExportRegisterHints(interpreter::Register first, size_t count,
+ HintsVector& dst);
+ private:
int RegisterToLocalIndex(interpreter::Register reg) const;
- Zone* zone_;
+ Zone* zone() const { return zone_; }
+ int parameter_count() const { return parameter_count_; }
+ int register_count() const { return register_count_; }
+
+ Zone* const zone_;
+ // Instead of storing the blueprint here, we could extract it from the
+ // (closure) hints but that would be cumbersome.
+ FunctionBlueprint const function_;
+ int const parameter_count_;
+ int const register_count_;
// environment_hints_ contains hints for the contents of the registers,
// the accumulator and the parameters. The layout is as follows:
- // [ receiver | parameters | registers | accumulator | context | closure ]
- const int register_count_;
- const int parameter_count_;
+ // [ parameters | registers | accumulator | context | closure ]
+ // The first parameter is the receiver.
HintsVector environment_hints_;
- int register_base() const { return parameter_count_; }
- int accumulator_index() const { return register_base() + register_count_; }
+ int accumulator_index() const { return parameter_count() + register_count(); }
int current_context_index() const { return accumulator_index() + 1; }
int function_closure_index() const { return current_context_index() + 1; }
int environment_hints_size() const { return function_closure_index() + 1; }
@@ -102,91 +151,145 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
};
SerializerForBackgroundCompilation::Environment::Environment(
- Zone* zone, Isolate* isolate, int register_count, int parameter_count)
+ Zone* zone, CompilationSubject function)
: zone_(zone),
- register_count_(register_count),
- parameter_count_(parameter_count),
+ function_(function.blueprint()),
+ parameter_count_(function_.shared->GetBytecodeArray()->parameter_count()),
+ register_count_(function_.shared->GetBytecodeArray()->register_count()),
environment_hints_(environment_hints_size(), Hints(zone), zone),
- return_value_hints_(zone) {}
+ return_value_hints_(zone) {
+ Handle<JSFunction> closure;
+ if (function.closure().ToHandle(&closure)) {
+ environment_hints_[function_closure_index()].AddConstant(closure);
+ } else {
+ environment_hints_[function_closure_index()].AddFunctionBlueprint(
+ function.blueprint());
+ }
+}
SerializerForBackgroundCompilation::Environment::Environment(
- SerializerForBackgroundCompilation* serializer, Isolate* isolate,
- int register_count, int parameter_count, const HintsVector& arguments)
- : Environment(serializer->zone(), isolate, register_count,
- parameter_count) {
- size_t param_count = static_cast<size_t>(parameter_count);
-
+ Zone* zone, Isolate* isolate, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments)
+ : Environment(zone, function) {
// Copy the hints for the actually passed arguments, at most up to
// the parameter_count.
+ size_t param_count = static_cast<size_t>(parameter_count());
for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) {
environment_hints_[i] = arguments[i];
}
- Hints undefined_hint(serializer->zone());
- undefined_hint.AddConstant(
- serializer->broker()->isolate()->factory()->undefined_value());
// Pad the rest with "undefined".
+ Hints undefined_hint(zone);
+ undefined_hint.AddConstant(isolate->factory()->undefined_value());
for (size_t i = arguments.size(); i < param_count; ++i) {
environment_hints_[i] = undefined_hint;
}
+
+ interpreter::Register new_target_reg =
+ function_.shared->GetBytecodeArray()
+ ->incoming_new_target_or_generator_register();
+ if (new_target_reg.is_valid()) {
+ DCHECK(register_hints(new_target_reg).IsEmpty());
+ if (new_target.has_value()) {
+ register_hints(new_target_reg).Add(*new_target);
+ }
+ }
+}
+
+void SerializerForBackgroundCompilation::Environment::Merge(
+ Environment* other) {
+ // Presumably the source and the target would have the same layout
+ // so this is enforced here.
+ CHECK_EQ(parameter_count(), other->parameter_count());
+ CHECK_EQ(register_count(), other->register_count());
+ CHECK_EQ(environment_hints_size(), other->environment_hints_size());
+
+ for (size_t i = 0; i < environment_hints_.size(); ++i) {
+ environment_hints_[i].Add(other->environment_hints_[i]);
+ }
+ return_value_hints_.Add(other->return_value_hints_);
+}
+
+std::ostream& operator<<(
+ std::ostream& out,
+ const SerializerForBackgroundCompilation::Environment& env) {
+ std::ostringstream output_stream;
+ output_stream << "Function ";
+ env.function_.shared->Name()->Print(output_stream);
+ output_stream << "Parameter count: " << env.parameter_count() << std::endl;
+ output_stream << "Register count: " << env.register_count() << std::endl;
+
+ output_stream << "Hints (" << env.environment_hints_.size() << "):\n";
+ for (size_t i = 0; i < env.environment_hints_.size(); ++i) {
+ if (env.environment_hints_[i].IsEmpty()) continue;
+
+ output_stream << "\tSlot " << i << std::endl;
+ output_stream << env.environment_hints_[i];
+ }
+ output_stream << "Return value:\n";
+ output_stream << env.return_value_hints_
+ << "===========================================\n";
+
+ out << output_stream.str();
+ return out;
}
int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
interpreter::Register reg) const {
- // TODO(mslekova): We also want to gather hints for the context and
- // we already have data about the closure that we should record.
+ // TODO(mslekova): We also want to gather hints for the context.
if (reg.is_current_context()) return current_context_index();
if (reg.is_function_closure()) return function_closure_index();
if (reg.is_parameter()) {
return reg.ToParameterIndex(parameter_count());
} else {
- return register_base() + reg.index();
+ return parameter_count() + reg.index();
}
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, Zone* zone, Handle<JSFunction> function)
+ JSHeapBroker* broker, Zone* zone, Handle<JSFunction> closure)
: broker_(broker),
zone_(zone),
- shared_(function->shared(), broker->isolate()),
- feedback_(function->feedback_vector(), broker->isolate()),
- environment_(new (zone) Environment(
- zone, broker_->isolate(),
- shared_->GetBytecodeArray()->register_count(),
- shared_->GetBytecodeArray()->parameter_count())) {
- JSFunctionRef(broker, function).Serialize();
+ environment_(new (zone) Environment(zone, {closure, broker_->isolate()})),
+ stashed_environments_(zone) {
+ JSFunctionRef(broker, closure).Serialize();
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, Zone* zone, FunctionBlueprint function,
- const HintsVector& arguments)
+ JSHeapBroker* broker, Zone* zone, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments)
: broker_(broker),
zone_(zone),
- shared_(function.shared),
- feedback_(function.feedback),
- environment_(new (zone) Environment(
- this, broker->isolate(),
- shared_->GetBytecodeArray()->register_count(),
- shared_->GetBytecodeArray()->parameter_count(), arguments)) {}
+ environment_(new (zone) Environment(zone, broker_->isolate(), function,
+ new_target, arguments)),
+ stashed_environments_(zone) {
+ Handle<JSFunction> closure;
+ if (function.closure().ToHandle(&closure)) {
+ JSFunctionRef(broker, closure).Serialize();
+ }
+}
Hints SerializerForBackgroundCompilation::Run() {
- SharedFunctionInfoRef shared(broker(), shared_);
- FeedbackVectorRef feedback(broker(), feedback_);
- if (shared.IsSerializedForCompilation(feedback)) {
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared);
+ FeedbackVectorRef feedback_vector(broker(),
+ environment()->function().feedback_vector);
+ if (shared.IsSerializedForCompilation(feedback_vector)) {
return Hints(zone());
}
- shared.SetSerializedForCompilation(feedback);
- feedback.SerializeSlots();
+ shared.SetSerializedForCompilation(feedback_vector);
+ feedback_vector.SerializeSlots();
TraverseBytecode();
return environment()->return_value_hints();
}
void SerializerForBackgroundCompilation::TraverseBytecode() {
BytecodeArrayRef bytecode_array(
- broker(), handle(shared_->GetBytecodeArray(), broker()->isolate()));
- interpreter::BytecodeArrayIterator iterator(bytecode_array.object());
+ broker(), handle(environment()->function().shared->GetBytecodeArray(),
+ broker()->isolate()));
+ BytecodeArrayIterator iterator(bytecode_array.object());
for (; !iterator.done(); iterator.Advance()) {
+ MergeAfterJump(&iterator);
switch (iterator.current_bytecode()) {
#define DEFINE_BYTECODE_CASE(name) \
case interpreter::Bytecode::k##name: \
@@ -195,7 +298,7 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE)
#undef DEFINE_BYTECODE_CASE
default: {
- environment()->ClearAccumulatorAndRegisterHints();
+ environment()->ClearEphemeralHints();
break;
}
}
@@ -203,71 +306,74 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
void SerializerForBackgroundCompilation::VisitIllegal(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
UNREACHABLE();
}
void SerializerForBackgroundCompilation::VisitWide(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
UNREACHABLE();
}
void SerializerForBackgroundCompilation::VisitExtraWide(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
UNREACHABLE();
}
+void SerializerForBackgroundCompilation::VisitStackCheck(
+ BytecodeArrayIterator* iterator) {}
+
void SerializerForBackgroundCompilation::VisitLdaUndefined(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
broker()->isolate()->factory()->undefined_value());
}
void SerializerForBackgroundCompilation::VisitLdaNull(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
broker()->isolate()->factory()->null_value());
}
void SerializerForBackgroundCompilation::VisitLdaZero(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
handle(Smi::FromInt(0), broker()->isolate()));
}
void SerializerForBackgroundCompilation::VisitLdaSmi(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(handle(
Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
}
void SerializerForBackgroundCompilation::VisitLdaConstant(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
handle(iterator->GetConstantForIndexOperand(0), broker()->isolate()));
}
void SerializerForBackgroundCompilation::VisitLdar(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().Add(
environment()->register_hints(iterator->GetRegisterOperand(0)));
}
void SerializerForBackgroundCompilation::VisitStar(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
interpreter::Register reg = iterator->GetRegisterOperand(0);
environment()->register_hints(reg).Clear();
environment()->register_hints(reg).Add(environment()->accumulator_hints());
}
void SerializerForBackgroundCompilation::VisitMov(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
interpreter::Register src = iterator->GetRegisterOperand(0);
interpreter::Register dst = iterator->GetRegisterOperand(1);
environment()->register_hints(dst).Clear();
@@ -275,12 +381,13 @@ void SerializerForBackgroundCompilation::VisitMov(
}
void SerializerForBackgroundCompilation::VisitCreateClosure(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)),
broker()->isolate());
- FeedbackNexus nexus(feedback_, iterator->GetSlotOperand(1));
+ FeedbackNexus nexus(environment()->function().feedback_vector,
+ iterator->GetSlotOperand(1));
Handle<Object> cell_value(nexus.GetFeedbackCell()->value(),
broker()->isolate());
@@ -292,106 +399,98 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
- interpreter::BytecodeArrayIterator* iterator) {
- Hints receiver(zone());
- receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
- HintsVector parameters(zone());
- parameters.push_back(receiver);
- ProcessCallOrConstruct(callee, parameters);
-}
-
-void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
- interpreter::BytecodeArrayIterator* iterator) {
Hints receiver(zone());
receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+ HintsVector parameters({receiver}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+}
+
+void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
const Hints& arg0 =
environment()->register_hints(iterator->GetRegisterOperand(1));
+ FeedbackSlot slot = iterator->GetSlotOperand(2);
- HintsVector parameters(zone());
- parameters.push_back(receiver);
- parameters.push_back(arg0);
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
- ProcessCallOrConstruct(callee, parameters);
+ HintsVector parameters({receiver, arg0}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
- interpreter::BytecodeArrayIterator* iterator) {
- Hints receiver(zone());
- receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
-
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
const Hints& arg0 =
environment()->register_hints(iterator->GetRegisterOperand(1));
const Hints& arg1 =
environment()->register_hints(iterator->GetRegisterOperand(2));
+ FeedbackSlot slot = iterator->GetSlotOperand(3);
- HintsVector parameters(zone());
- parameters.push_back(receiver);
- parameters.push_back(arg0);
- parameters.push_back(arg1);
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
- ProcessCallOrConstruct(callee, parameters);
+ HintsVector parameters({receiver, arg0, arg1}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
}
void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny);
}
void SerializerForBackgroundCompilation::VisitCallNoFeedback(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty0(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
const Hints& receiver =
environment()->register_hints(iterator->GetRegisterOperand(1));
+ FeedbackSlot slot = iterator->GetSlotOperand(2);
- HintsVector parameters(zone());
- parameters.push_back(receiver);
-
- ProcessCallOrConstruct(callee, parameters);
+ HintsVector parameters({receiver}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
}
void SerializerForBackgroundCompilation::VisitCallProperty1(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
const Hints& receiver =
environment()->register_hints(iterator->GetRegisterOperand(1));
const Hints& arg0 =
environment()->register_hints(iterator->GetRegisterOperand(2));
+ FeedbackSlot slot = iterator->GetSlotOperand(3);
- HintsVector parameters(zone());
- parameters.push_back(receiver);
- parameters.push_back(arg0);
-
- ProcessCallOrConstruct(callee, parameters);
+ HintsVector parameters({receiver, arg0}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
}
void SerializerForBackgroundCompilation::VisitCallProperty2(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
const Hints& receiver =
@@ -400,122 +499,282 @@ void SerializerForBackgroundCompilation::VisitCallProperty2(
environment()->register_hints(iterator->GetRegisterOperand(2));
const Hints& arg1 =
environment()->register_hints(iterator->GetRegisterOperand(3));
+ FeedbackSlot slot = iterator->GetSlotOperand(4);
+
+ HintsVector parameters({receiver, arg0, arg1}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+}
+
+void SerializerForBackgroundCompilation::VisitCallWithSpread(
+ BytecodeArrayIterator* iterator) {
+ ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true);
+}
+
+Hints SerializerForBackgroundCompilation::RunChildSerializer(
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments, bool with_spread) {
+ if (with_spread) {
+ DCHECK_LT(0, arguments.size());
+ // Pad the missing arguments in case we were called with spread operator.
+ // Drop the last actually passed argument, which contains the spread.
+ // We don't know what the spread element produces. Therefore we pretend
+ // that the function is called with the maximal number of parameters and
+ // that we have no information about the parameters that were not
+ // explicitly provided.
+ HintsVector padded = arguments;
+ padded.pop_back(); // Remove the spread element.
+ // Fill the rest with empty hints.
+ padded.resize(
+ function.blueprint().shared->GetBytecodeArray()->parameter_count(),
+ Hints(zone()));
+ return RunChildSerializer(function, new_target, padded, false);
+ }
- HintsVector parameters(zone());
- parameters.push_back(receiver);
- parameters.push_back(arg0);
- parameters.push_back(arg1);
+ if (FLAG_trace_heap_broker) {
+ std::ostream& out = broker()->Trace();
+ out << "\nWill run child serializer with environment:\n"
+ << "===========================================\n"
+ << *environment();
+ }
+
+ SerializerForBackgroundCompilation child_serializer(
+ broker(), zone(), function, new_target, arguments);
+ return child_serializer.Run();
+}
- ProcessCallOrConstruct(callee, parameters);
+namespace {
+base::Optional<HeapObjectRef> GetHeapObjectFeedback(
+ JSHeapBroker* broker, Handle<FeedbackVector> feedback_vector,
+ FeedbackSlot slot) {
+ if (slot.IsInvalid()) return base::nullopt;
+ FeedbackNexus nexus(feedback_vector, slot);
+ VectorSlotPair feedback(feedback_vector, slot, nexus.ic_state());
+ DCHECK(feedback.IsValid());
+ if (nexus.IsUninitialized()) return base::nullopt;
+ HeapObject object;
+ if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt;
+ return HeapObjectRef(broker, handle(object, broker->isolate()));
}
+} // namespace
void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
- const Hints& callee, const HintsVector& arguments) {
+ Hints callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, FeedbackSlot slot, bool with_spread) {
+ // Incorporate feedback into hints.
+ base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
+ broker(), environment()->function().feedback_vector, slot);
+ if (feedback.has_value() && feedback->map().is_callable()) {
+ if (new_target.has_value()) {
+ // Construct; feedback is new_target, which often is also the callee.
+ new_target->AddConstant(feedback->object());
+ callee.AddConstant(feedback->object());
+ } else {
+ // Call; feedback is callee.
+ callee.AddConstant(feedback->object());
+ }
+ }
+
environment()->accumulator_hints().Clear();
for (auto hint : callee.constants()) {
if (!hint->IsJSFunction()) continue;
Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
- if (!function->shared()->IsInlineable()) continue;
-
- JSFunctionRef(broker(), function).Serialize();
+ if (!function->shared()->IsInlineable() || !function->has_feedback_vector())
+ continue;
- Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
- Handle<FeedbackVector> feedback(function->feedback_vector(),
- broker()->isolate());
- SerializerForBackgroundCompilation child_serializer(
- broker(), zone(), {shared, feedback}, arguments);
- environment()->accumulator_hints().Add(child_serializer.Run());
+ environment()->accumulator_hints().Add(RunChildSerializer(
+ {function, broker()->isolate()}, new_target, arguments, with_spread));
}
for (auto hint : callee.function_blueprints()) {
if (!hint.shared->IsInlineable()) continue;
- SerializerForBackgroundCompilation child_serializer(broker(), zone(), hint,
- arguments);
- environment()->accumulator_hints().Add(child_serializer.Run());
+ environment()->accumulator_hints().Add(RunChildSerializer(
+ CompilationSubject(hint), new_target, arguments, with_spread));
}
}
void SerializerForBackgroundCompilation::ProcessCallVarArgs(
- interpreter::BytecodeArrayIterator* iterator,
- ConvertReceiverMode receiver_mode) {
+ BytecodeArrayIterator* iterator, ConvertReceiverMode receiver_mode,
+ bool with_spread) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
interpreter::Register first_reg = iterator->GetRegisterOperand(1);
int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
-
- bool first_reg_is_receiver =
- receiver_mode != ConvertReceiverMode::kNullOrUndefined;
-
- Hints receiver(zone());
- if (first_reg_is_receiver) {
- // The receiver is the first register, followed by the arguments in the
- // consecutive registers.
- receiver.Add(environment()->register_hints(first_reg));
- } else {
- // The receiver is implicit (and undefined), the arguments are in
- // consecutive registers.
- receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+ FeedbackSlot slot;
+ if (iterator->current_bytecode() != interpreter::Bytecode::kCallNoFeedback) {
+ slot = iterator->GetSlotOperand(3);
}
HintsVector arguments(zone());
- arguments.push_back(receiver);
- int arg_base = BoolToInt(first_reg_is_receiver);
- for (int i = arg_base; i < reg_count; ++i) {
- arguments.push_back(environment()->register_hints(
- interpreter::Register(first_reg.index() + i)));
+ // The receiver is either given in the first register or it is implicitly
+ // the {undefined} value.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+ arguments.push_back(receiver);
}
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
- ProcessCallOrConstruct(callee, arguments);
+ ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
}
-void SerializerForBackgroundCompilation::VisitReturn(
+void SerializerForBackgroundCompilation::ProcessJump(
+ interpreter::BytecodeArrayIterator* iterator) {
+ int jump_target = iterator->GetJumpTargetOffset();
+ int current_offset = iterator->current_offset();
+ if (current_offset >= jump_target) return;
+
+ stashed_environments_[jump_target] = new (zone()) Environment(*environment());
+}
+
+void SerializerForBackgroundCompilation::MergeAfterJump(
interpreter::BytecodeArrayIterator* iterator) {
+ int current_offset = iterator->current_offset();
+ auto stash = stashed_environments_.find(current_offset);
+ if (stash != stashed_environments_.end()) {
+ environment()->Merge(stash->second);
+ stashed_environments_.erase(stash);
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitReturn(
+ BytecodeArrayIterator* iterator) {
environment()->return_value_hints().Add(environment()->accumulator_hints());
- environment()->ClearAccumulatorAndRegisterHints();
+ environment()->ClearEphemeralHints();
+}
+
+void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
+ interpreter::Register first, size_t count, HintsVector& dst) {
+ dst.resize(dst.size() + count, Hints(zone()));
+ int reg_base = first.index();
+ for (int i = 0; i < static_cast<int>(count); ++i) {
+ dst.push_back(register_hints(interpreter::Register(reg_base + i)));
+ }
}
void SerializerForBackgroundCompilation::VisitConstruct(
- interpreter::BytecodeArrayIterator* iterator) {
+ BytecodeArrayIterator* iterator) {
const Hints& callee =
environment()->register_hints(iterator->GetRegisterOperand(0));
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ size_t reg_count = iterator->GetRegisterCountOperand(2);
+ FeedbackSlot slot = iterator->GetSlotOperand(3);
+ const Hints& new_target = environment()->accumulator_hints();
+
+ HintsVector arguments(zone());
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+ ProcessCallOrConstruct(callee, new_target, arguments, slot);
+}
+
+void SerializerForBackgroundCompilation::VisitConstructWithSpread(
+ BytecodeArrayIterator* iterator) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
interpreter::Register first_reg = iterator->GetRegisterOperand(1);
size_t reg_count = iterator->GetRegisterCountOperand(2);
+ FeedbackSlot slot = iterator->GetSlotOperand(3);
+ const Hints& new_target = environment()->accumulator_hints();
HintsVector arguments(zone());
- // Push the target (callee) of the construct.
- arguments.push_back(callee);
-
- // The function arguments are in consecutive registers.
- int arg_base = first_reg.index();
- for (int i = 0; i < static_cast<int>(reg_count); ++i) {
- arguments.push_back(
- environment()->register_hints(interpreter::Register(arg_base + i)));
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+
+ ProcessCallOrConstruct(callee, new_target, arguments, slot, true);
+}
+
+void SerializerForBackgroundCompilation::ProcessFeedbackForKeyedPropertyAccess(
+ BytecodeArrayIterator* iterator) {
+ interpreter::Bytecode bytecode = iterator->current_bytecode();
+ DCHECK(bytecode == interpreter::Bytecode::kLdaKeyedProperty ||
+ bytecode == interpreter::Bytecode::kStaKeyedProperty ||
+ bytecode == interpreter::Bytecode::kStaInArrayLiteral);
+
+ if (environment()->function().feedback_vector.is_null()) return;
+
+ FeedbackSlot slot = iterator->GetSlotOperand(
+ bytecode == interpreter::Bytecode::kLdaKeyedProperty ? 1 : 2);
+ if (slot.IsInvalid()) return;
+
+ FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ if (broker()->HasFeedback(nexus)) return;
+
+ Handle<Name> name(nexus.GetName(), broker()->isolate());
+ CHECK_IMPLIES(nexus.GetKeyType() == ELEMENT, name->is_null());
+ if (!name->is_null() || nexus.GetKeyType() == PROPERTY) {
+ CHECK_NE(bytecode, interpreter::Bytecode::kStaInArrayLiteral);
+ return; // TODO(neis): Support named access.
}
- // Push the new_target of the construct.
- arguments.push_back(environment()->accumulator_hints());
+ if (nexus.ic_state() == MEGAMORPHIC) {
+ return;
+ }
+
+ ProcessedFeedback& processed = broker()->GetOrCreateFeedback(nexus);
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ ProcessFeedbackMapsForElementAccess(broker()->isolate(), maps, &processed);
- ProcessCallOrConstruct(callee, arguments);
+ // TODO(neis): Have something like MapRef::SerializeForElementStore() and call
+ // it for every receiver map in case of an element store.
}
-#define DEFINE_SKIPPED_JUMP(name, ...) \
+void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ ProcessFeedbackForKeyedPropertyAccess(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitStaKeyedProperty(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ ProcessFeedbackForKeyedPropertyAccess(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitStaInArrayLiteral(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ ProcessFeedbackForKeyedPropertyAccess(iterator);
+}
+
+#define DEFINE_CLEAR_ENVIRONMENT(name, ...) \
void SerializerForBackgroundCompilation::Visit##name( \
- interpreter::BytecodeArrayIterator* iterator) { \
- environment()->ClearAccumulatorAndRegisterHints(); \
+ BytecodeArrayIterator* iterator) { \
+ environment()->ClearEphemeralHints(); \
}
-CLEAR_ENVIRONMENT_LIST(DEFINE_SKIPPED_JUMP)
-#undef DEFINE_SKIPPED_JUMP
+CLEAR_ENVIRONMENT_LIST(DEFINE_CLEAR_ENVIRONMENT)
+#undef DEFINE_CLEAR_ENVIRONMENT
#define DEFINE_CLEAR_ACCUMULATOR(name, ...) \
void SerializerForBackgroundCompilation::Visit##name( \
- interpreter::BytecodeArrayIterator* iterator) { \
+ BytecodeArrayIterator* iterator) { \
environment()->accumulator_hints().Clear(); \
}
CLEAR_ACCUMULATOR_LIST(DEFINE_CLEAR_ACCUMULATOR)
#undef DEFINE_CLEAR_ACCUMULATOR
+#define DEFINE_CONDITIONAL_JUMP(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ ProcessJump(iterator); \
+ }
+CONDITIONAL_JUMPS_LIST(DEFINE_CONDITIONAL_JUMP)
+#undef DEFINE_CONDITIONAL_JUMP
+
+#define DEFINE_UNCONDITIONAL_JUMP(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ ProcessJump(iterator); \
+ environment()->ClearEphemeralHints(); \
+ }
+UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP)
+#undef DEFINE_UNCONDITIONAL_JUMP
+
+#define DEFINE_IGNORE(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) {}
+INGORED_BYTECODE_LIST(DEFINE_IGNORE)
+#undef DEFINE_IGNORE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 76bc675e66..b6c9bfa125 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -5,7 +5,10 @@
#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
+#include "src/base/optional.h"
#include "src/handles.h"
+#include "src/maybe-handles.h"
+#include "src/utils.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -33,37 +36,18 @@ namespace compiler {
V(CreateBlockContext) \
V(CreateFunctionContext) \
V(CreateEvalContext) \
- V(Jump) \
- V(JumpConstant) \
- V(JumpIfFalse) \
- V(JumpIfFalseConstant) \
- V(JumpIfJSReceiver) \
- V(JumpIfJSReceiverConstant) \
- V(JumpIfNotNull) \
- V(JumpIfNotNullConstant) \
- V(JumpIfNotUndefined) \
- V(JumpIfNotUndefinedConstant) \
- V(JumpIfNull) \
- V(JumpIfNullConstant) \
- V(JumpIfToBooleanTrueConstant) \
- V(JumpIfToBooleanFalseConstant) \
- V(JumpIfToBooleanTrue) \
- V(JumpIfToBooleanFalse) \
- V(JumpIfTrue) \
- V(JumpIfTrueConstant) \
- V(JumpIfUndefined) \
- V(JumpIfUndefinedConstant) \
- V(JumpLoop) \
+ V(Debugger) \
V(PushContext) \
V(PopContext) \
+ V(ResumeGenerator) \
V(ReThrow) \
V(StaContextSlot) \
V(StaCurrentContextSlot) \
+ V(SuspendGenerator) \
+ V(SwitchOnGeneratorState) \
V(Throw)
#define CLEAR_ACCUMULATOR_LIST(V) \
- V(CallWithSpread) \
- V(ConstructWithSpread) \
V(CreateEmptyObjectLiteral) \
V(CreateMappedArguments) \
V(CreateRestParameter) \
@@ -74,10 +58,52 @@ namespace compiler {
V(LdaGlobalInsideTypeof) \
V(LdaImmutableContextSlot) \
V(LdaImmutableCurrentContextSlot) \
- V(LdaKeyedProperty) \
V(LdaNamedProperty) \
V(LdaNamedPropertyNoFeedback)
+#define UNCONDITIONAL_JUMPS_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ V(JumpLoop)
+
+#define CONDITIONAL_JUMPS_LIST(V) \
+ V(JumpIfFalse) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotNull) \
+ V(JumpIfNotNullConstant) \
+ V(JumpIfNotUndefined) \
+ V(JumpIfNotUndefinedConstant) \
+ V(JumpIfNull) \
+ V(JumpIfNullConstant) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfToBooleanFalseConstant) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanFalse) \
+ V(JumpIfTrue) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfUndefined) \
+ V(JumpIfUndefinedConstant)
+
+#define INGORED_BYTECODE_LIST(V) \
+ V(TestEqual) \
+ V(TestEqualStrict) \
+ V(TestLessThan) \
+ V(TestGreaterThan) \
+ V(TestLessThanOrEqual) \
+ V(TestGreaterThanOrEqual) \
+ V(TestReferenceEqual) \
+ V(TestInstanceOf) \
+ V(TestIn) \
+ V(TestUndetectable) \
+ V(TestNull) \
+ V(TestUndefined) \
+ V(TestTypeOf) \
+ V(ThrowReferenceErrorIfHole) \
+ V(ThrowSuperNotCalledIfHole) \
+ V(ThrowSuperAlreadyCalledIfNotHole)
+
#define SUPPORTED_BYTECODE_LIST(V) \
V(CallAnyReceiver) \
V(CallNoFeedback) \
@@ -89,11 +115,14 @@ namespace compiler {
V(CallUndefinedReceiver0) \
V(CallUndefinedReceiver1) \
V(CallUndefinedReceiver2) \
+ V(CallWithSpread) \
V(Construct) \
+ V(ConstructWithSpread) \
V(CreateClosure) \
V(ExtraWide) \
V(Illegal) \
V(LdaConstant) \
+ V(LdaKeyedProperty) \
V(LdaNull) \
V(Ldar) \
V(LdaSmi) \
@@ -101,25 +130,65 @@ namespace compiler {
V(LdaZero) \
V(Mov) \
V(Return) \
+ V(StackCheck) \
+ V(StaInArrayLiteral) \
+ V(StaKeyedProperty) \
V(Star) \
V(Wide) \
CLEAR_ENVIRONMENT_LIST(V) \
- CLEAR_ACCUMULATOR_LIST(V)
+ CLEAR_ACCUMULATOR_LIST(V) \
+ CONDITIONAL_JUMPS_LIST(V) \
+ UNCONDITIONAL_JUMPS_LIST(V) \
+ INGORED_BYTECODE_LIST(V)
class JSHeapBroker;
+template <typename T>
+struct HandleComparator {
+ bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
+ return lhs.address() < rhs.address();
+ }
+};
+
struct FunctionBlueprint {
Handle<SharedFunctionInfo> shared;
- Handle<FeedbackVector> feedback;
+ Handle<FeedbackVector> feedback_vector;
+
+ bool operator<(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI, so it can
+ // be used for strict ordering of blueprints.
+ DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector),
+ shared.equals(other.shared));
+ return HandleComparator<FeedbackVector>()(feedback_vector,
+ other.feedback_vector);
+ }
+};
+
+class CompilationSubject {
+ public:
+ explicit CompilationSubject(FunctionBlueprint blueprint)
+ : blueprint_(blueprint) {}
+ CompilationSubject(Handle<JSFunction> closure, Isolate* isolate);
+
+ FunctionBlueprint blueprint() const { return blueprint_; }
+ MaybeHandle<JSFunction> closure() const { return closure_; }
+
+ private:
+ FunctionBlueprint blueprint_;
+ MaybeHandle<JSFunction> closure_;
};
+typedef ZoneSet<Handle<Object>, HandleComparator<Object>> ConstantsSet;
+typedef ZoneSet<Handle<Map>, HandleComparator<Map>> MapsSet;
+typedef ZoneSet<FunctionBlueprint> BlueprintsSet;
+
class Hints {
public:
explicit Hints(Zone* zone);
- const ZoneVector<Handle<Object>>& constants() const;
- const ZoneVector<Handle<Map>>& maps() const;
- const ZoneVector<FunctionBlueprint>& function_blueprints() const;
+ const ConstantsSet& constants() const;
+ const MapsSet& maps() const;
+ const BlueprintsSet& function_blueprints() const;
void AddConstant(Handle<Object> constant);
void AddMap(Handle<Map> map);
@@ -128,11 +197,12 @@ class Hints {
void Add(const Hints& other);
void Clear();
+ bool IsEmpty() const;
private:
- ZoneVector<Handle<Object>> constants_;
- ZoneVector<Handle<Map>> maps_;
- ZoneVector<FunctionBlueprint> function_blueprints_;
+ ConstantsSet constants_;
+ MapsSet maps_;
+ BlueprintsSet function_blueprints_;
};
typedef ZoneVector<Hints> HintsVector;
@@ -143,12 +213,15 @@ typedef ZoneVector<Hints> HintsVector;
class SerializerForBackgroundCompilation {
public:
SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
- Handle<JSFunction> function);
+ Handle<JSFunction> closure);
Hints Run(); // NOTE: Returns empty for an already-serialized function.
+ class Environment;
+
private:
SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
- FunctionBlueprint function,
+ CompilationSubject function,
+ base::Optional<Hints> new_target,
const HintsVector& arguments);
void TraverseBytecode();
@@ -158,22 +231,30 @@ class SerializerForBackgroundCompilation {
SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
- class Environment;
+ void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, FeedbackSlot slot,
+ bool with_spread = false);
+ void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode,
+ bool with_spread = false);
+ void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
+ void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator);
+
+ Hints RunChildSerializer(CompilationSubject function,
+ base::Optional<Hints> new_target,
+ const HintsVector& arguments, bool with_spread);
+
+ void ProcessFeedbackForKeyedPropertyAccess(
+ interpreter::BytecodeArrayIterator* iterator);
- Zone* zone() const { return zone_; }
JSHeapBroker* broker() const { return broker_; }
+ Zone* zone() const { return zone_; }
Environment* environment() const { return environment_; }
- void ProcessCallOrConstruct(const Hints& callee,
- const HintsVector& arguments);
- void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
- ConvertReceiverMode receiver_mode);
-
- JSHeapBroker* broker_;
- Zone* zone_;
- Handle<SharedFunctionInfo> shared_;
- Handle<FeedbackVector> feedback_;
- Environment* environment_;
+ JSHeapBroker* const broker_;
+ Zone* const zone_;
+ Environment* const environment_;
+ ZoneUnorderedMap<int, Environment*> stashed_environments_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 4a8935b855..7b612ee70b 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1041,7 +1041,8 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ VisitUnop(node,
+ UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
MachineRepresentation::kTaggedPointer);
}
}
@@ -1527,9 +1528,10 @@ class RepresentationSelector {
if (node->op()->ValueOutputCount() > 0 &&
node->op()->EffectOutputCount() > 0 &&
node->opcode() != IrOpcode::kUnreachable && TypeOf(node).IsNone()) {
- Node* control = node->op()->ControlOutputCount() > 0
- ? node
- : NodeProperties::GetControlInput(node, 0);
+ Node* control =
+ (node->op()->ControlOutputCount() == 0)
+ ? NodeProperties::GetControlInput(node, 0)
+ : NodeProperties::FindSuccessfulControlProjection(node);
Node* unreachable =
graph()->NewNode(common()->Unreachable(), node, control);
@@ -1537,9 +1539,18 @@ class RepresentationSelector {
// Insert unreachable node and replace all the effect uses of the {node}
// with the new unreachable node.
for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge) && edge.from() != unreachable) {
- edge.UpdateTo(unreachable);
+ if (!NodeProperties::IsEffectEdge(edge)) continue;
+ // Make sure to not overwrite the unreachable node's input. That would
+ // create a cycle.
+ if (edge.from() == unreachable) continue;
+ // Avoid messing up the exceptional path.
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+ DCHECK_EQ(NodeProperties::GetControlInput(edge.from()), node);
+ continue;
}
+
+ edge.UpdateTo(unreachable);
}
}
}
@@ -1556,6 +1567,8 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
+ CheckBoundsParameters::Mode mode =
+ CheckBoundsParameters::kDeoptOnOutOfBounds;
if (lowering->poisoning_level_ ==
PoisoningMitigationLevel::kDontPoison &&
(index_type.IsNone() || length_type.IsNone() ||
@@ -1563,11 +1576,10 @@ class RepresentationSelector {
index_type.Max() < length_type.Min()))) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
- DeferReplacement(node, node->InputAt(0));
- } else {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(p.feedback()));
+ mode = CheckBoundsParameters::kAbortOnOutOfBounds;
}
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32Bounds(p.feedback(), mode));
}
} else {
VisitBinop(
@@ -1576,7 +1588,9 @@ class RepresentationSelector {
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(p.feedback()));
+ node,
+ simplified()->CheckedUint32Bounds(
+ p.feedback(), CheckBoundsParameters::kDeoptOnOutOfBounds));
}
}
} else {
@@ -2653,7 +2667,8 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
} else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ VisitUnop(node,
+ UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
MachineRepresentation::kTaggedPointer);
}
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -2703,7 +2718,17 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckString: {
- VisitCheck(node, Type::String(), lowering);
+ const CheckParameters& params = CheckParametersOf(node->op());
+ if (InputIs(node, Type::String())) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(
+ node,
+ UseInfo::CheckedHeapObjectAsTaggedPointer(params.feedback()),
+ MachineRepresentation::kTaggedPointer);
+ }
return;
}
case IrOpcode::kCheckSymbol: {
@@ -2717,6 +2742,18 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadMessage: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
+ case IrOpcode::kStoreMessage: {
+ ProcessInput(node, 0, UseInfo::Word());
+ ProcessInput(node, 1, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 2);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kLoadFieldByIndex: {
if (truncation.IsUnused()) return VisitUnused(node);
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2768,6 +2805,11 @@ class RepresentationSelector {
access.machine_type.representation());
return;
}
+ case IrOpcode::kLoadStackArgument: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
Node* value_node = node->InputAt(2);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index c2831bf293..3591c1914c 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -223,40 +223,6 @@ std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
return os;
}
-MapsParameterInfo::MapsParameterInfo(ZoneHandleSet<Map> const& maps)
- : maps_(maps), instance_type_(Nothing<InstanceType>()) {
- DCHECK_LT(0, maps.size());
- instance_type_ = Just(maps.at(0)->instance_type());
- for (size_t i = 1; i < maps.size(); ++i) {
- if (instance_type_.FromJust() != maps.at(i)->instance_type()) {
- instance_type_ = Nothing<InstanceType>();
- break;
- }
- }
-}
-
-std::ostream& operator<<(std::ostream& os, MapsParameterInfo const& p) {
- ZoneHandleSet<Map> const& maps = p.maps();
- InstanceType instance_type;
- if (p.instance_type().To(&instance_type)) {
- os << ", " << instance_type;
- }
- for (size_t i = 0; i < maps.size(); ++i) {
- os << ", " << Brief(*maps[i]);
- }
- return os;
-}
-
-bool operator==(MapsParameterInfo const& lhs, MapsParameterInfo const& rhs) {
- return lhs.maps() == rhs.maps();
-}
-
-bool operator!=(MapsParameterInfo const& lhs, MapsParameterInfo const& rhs) {
- return !(lhs == rhs);
-}
-
-size_t hash_value(MapsParameterInfo const& p) { return hash_value(p.maps()); }
-
bool operator==(CheckMapsParameters const& lhs,
CheckMapsParameters const& rhs) {
return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps() &&
@@ -268,7 +234,7 @@ size_t hash_value(CheckMapsParameters const& p) {
}
std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
- os << p.flags() << p.maps_info();
+ os << p.flags() << p.maps();
if (p.feedback().IsValid()) {
os << "; " << p.feedback();
}
@@ -280,14 +246,14 @@ CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
return OpParameter<CheckMapsParameters>(op);
}
-MapsParameterInfo const& CompareMapsParametersOf(Operator const* op) {
+ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCompareMaps, op->opcode());
- return OpParameter<MapsParameterInfo>(op);
+ return OpParameter<ZoneHandleSet<Map>>(op);
}
-MapsParameterInfo const& MapGuardMapsOf(Operator const* op) {
+ZoneHandleSet<Map> const& MapGuardMapsOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kMapGuard, op->opcode());
- return OpParameter<MapsParameterInfo>(op);
+ return OpParameter<ZoneHandleSet<Map>>(op);
}
size_t hash_value(CheckTaggedInputMode mode) {
@@ -812,13 +778,14 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckedTaggedSignedToInt32, 1, 1) \
V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedUint32Bounds, 2, 1) \
V(CheckedUint32ToInt32, 1, 1) \
V(CheckedUint32ToTaggedSigned, 1, 1) \
V(CheckedUint64Bounds, 2, 1) \
V(CheckedUint64ToInt32, 1, 1) \
V(CheckedUint64ToTaggedSigned, 1, 1)
+#define CHECKED_BOUNDS_OP_LIST(V) V(CheckedUint32Bounds)
+
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
struct Name##Operator final : public Operator { \
@@ -867,6 +834,21 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
#undef CHECKED_WITH_FEEDBACK
+#define CHECKED_BOUNDS(Name) \
+ struct Name##Operator final : public Operator1<CheckBoundsParameters> { \
+ Name##Operator(VectorSlotPair feedback, CheckBoundsParameters::Mode mode) \
+ : Operator1<CheckBoundsParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, 2, 1, 1, 1, 1, 0, \
+ CheckBoundsParameters(feedback, mode)) {} \
+ }; \
+ Name##Operator k##Name##Deopting = { \
+ VectorSlotPair(), CheckBoundsParameters::kDeoptOnOutOfBounds}; \
+ Name##Operator k##Name##Aborting = { \
+ VectorSlotPair(), CheckBoundsParameters::kAbortOnOutOfBounds};
+ CHECKED_BOUNDS_OP_LIST(CHECKED_BOUNDS)
+#undef CHECKED_BOUNDS
+
template <DeoptimizeReason kDeoptimizeReason>
struct CheckIfOperator final : public Operator1<CheckIfParameters> {
CheckIfOperator()
@@ -1112,6 +1094,39 @@ struct SimplifiedOperatorGlobalCache final {
};
LoadFieldByIndexOperator kLoadFieldByIndex;
+ struct LoadStackArgumentOperator final : public Operator {
+ LoadStackArgumentOperator()
+ : Operator( // --
+ IrOpcode::kLoadStackArgument, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow |
+ Operator::kNoWrite, // flags
+ "LoadStackArgument", // name
+ 2, 1, 1, 1, 1, 0) {} // counts
+ };
+ LoadStackArgumentOperator kLoadStackArgument;
+
+ struct LoadMessageOperator final : public Operator {
+ LoadMessageOperator()
+ : Operator( // --
+ IrOpcode::kLoadMessage, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow |
+ Operator::kNoWrite, // flags
+ "LoadMessage", // name
+ 1, 1, 1, 1, 1, 0) {} // counts
+ };
+ LoadMessageOperator kLoadMessage;
+
+ struct StoreMessageOperator final : public Operator {
+ StoreMessageOperator()
+ : Operator( // --
+ IrOpcode::kStoreMessage, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow |
+ Operator::kNoRead, // flags
+ "StoreMessage", // name
+ 2, 1, 1, 0, 1, 0) {} // counts
+ };
+ StoreMessageOperator kStoreMessage;
+
#define SPECULATIVE_NUMBER_BINOP(Name) \
template <NumberOperationHint kHint> \
struct Name##Operator final : public Operator1<NumberOperationHint> { \
@@ -1153,7 +1168,7 @@ struct SimplifiedOperatorGlobalCache final {
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(SimplifiedOperatorGlobalCache,
- GetSimplifiedOperatorGlobalCache);
+ GetSimplifiedOperatorGlobalCache)
}
SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
@@ -1185,6 +1200,23 @@ GET_FROM_CACHE(LoadFieldByIndex)
CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
#undef GET_FROM_CACHE_WITH_FEEDBACK
+#define GET_FROM_CACHE_WITH_FEEDBACK(Name) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ const VectorSlotPair& feedback, CheckBoundsParameters::Mode mode) { \
+ if (!feedback.IsValid()) { \
+ switch (mode) { \
+ case CheckBoundsParameters::kDeoptOnOutOfBounds: \
+ return &cache_.k##Name##Deopting; \
+ case CheckBoundsParameters::kAbortOnOutOfBounds: \
+ return &cache_.k##Name##Aborting; \
+ } \
+ } \
+ return new (zone()) \
+ SimplifiedOperatorGlobalCache::Name##Operator(feedback, mode); \
+ }
+CHECKED_BOUNDS_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
+#undef GET_FROM_CACHE_WITH_FEEDBACK
+
bool IsCheckedWithFeedback(const Operator* op) {
#define CASE(Name, ...) case IrOpcode::k##Name:
switch (op->opcode()) {
@@ -1351,21 +1383,23 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
}
const Operator* SimplifiedOperatorBuilder::MapGuard(ZoneHandleSet<Map> maps) {
- return new (zone()) Operator1<MapsParameterInfo>( // --
- IrOpcode::kMapGuard, Operator::kEliminatable, // opcode
- "MapGuard", // name
- 1, 1, 1, 0, 1, 0, // counts
- MapsParameterInfo(maps)); // parameter
+ DCHECK_LT(0, maps.size());
+ return new (zone()) Operator1<ZoneHandleSet<Map>>( // --
+ IrOpcode::kMapGuard, Operator::kEliminatable, // opcode
+ "MapGuard", // name
+ 1, 1, 1, 0, 1, 0, // counts
+ maps); // parameter
}
const Operator* SimplifiedOperatorBuilder::CompareMaps(
ZoneHandleSet<Map> maps) {
- return new (zone()) Operator1<MapsParameterInfo>( // --
- IrOpcode::kCompareMaps, // opcode
- Operator::kEliminatable, // flags
- "CompareMaps", // name
- 1, 1, 1, 1, 1, 0, // counts
- MapsParameterInfo(maps)); // parameter
+ DCHECK_LT(0, maps.size());
+ return new (zone()) Operator1<ZoneHandleSet<Map>>( // --
+ IrOpcode::kCompareMaps, // opcode
+ Operator::kEliminatable, // flags
+ "CompareMaps", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ maps); // parameter
}
const Operator* SimplifiedOperatorBuilder::ConvertReceiver(
@@ -1509,12 +1543,43 @@ std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
}
CheckParameters const& CheckParametersOf(Operator const* op) {
+ if (op->opcode() == IrOpcode::kCheckedUint32Bounds) {
+ return OpParameter<CheckBoundsParameters>(op).check_parameters();
+ }
#define MAKE_OR(name, arg2, arg3) op->opcode() == IrOpcode::k##name ||
CHECK((CHECKED_WITH_FEEDBACK_OP_LIST(MAKE_OR) false));
#undef MAKE_OR
return OpParameter<CheckParameters>(op);
}
+bool operator==(CheckBoundsParameters const& lhs,
+ CheckBoundsParameters const& rhs) {
+ return lhs.check_parameters() == rhs.check_parameters() &&
+ lhs.mode() == rhs.mode();
+}
+
+size_t hash_value(CheckBoundsParameters const& p) {
+ return base::hash_combine(hash_value(p.check_parameters()), p.mode());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckBoundsParameters const& p) {
+ os << p.check_parameters() << ",";
+ switch (p.mode()) {
+ case CheckBoundsParameters::kDeoptOnOutOfBounds:
+ os << "deopt";
+ break;
+ case CheckBoundsParameters::kAbortOnOutOfBounds:
+ os << "abort";
+ break;
+ }
+ return os;
+}
+
+CheckBoundsParameters const& CheckBoundsParametersOf(Operator const* op) {
+ CHECK_EQ(op->opcode(), IrOpcode::kCheckedUint32Bounds);
+ return OpParameter<CheckBoundsParameters>(op);
+}
+
bool operator==(CheckIfParameters const& lhs, CheckIfParameters const& rhs) {
return lhs.reason() == rhs.reason() && lhs.feedback() == rhs.feedback();
}
@@ -1647,6 +1712,18 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::LoadMessage() {
+ return &cache_.kLoadMessage;
+}
+
+const Operator* SimplifiedOperatorBuilder::StoreMessage() {
+ return &cache_.kStoreMessage;
+}
+
+const Operator* SimplifiedOperatorBuilder::LoadStackArgument() {
+ return &cache_.kLoadStackArgument;
+}
+
const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
Handle<Map> double_map, Handle<Map> fast_map) {
TransitionAndStoreElementParameters parameters(double_map, fast_map);
@@ -1684,6 +1761,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
#undef EFFECT_DEPENDENT_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
#undef CHECKED_WITH_FEEDBACK_OP_LIST
+#undef CHECKED_BOUNDS_OP_LIST
#undef CHECKED_OP_LIST
#undef ACCESS_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 9dddab4861..f4691bff93 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -163,6 +163,30 @@ std::ostream& operator<<(std::ostream&, CheckParameters const&);
CheckParameters const& CheckParametersOf(Operator const*) V8_WARN_UNUSED_RESULT;
+class CheckBoundsParameters final {
+ public:
+ enum Mode { kAbortOnOutOfBounds, kDeoptOnOutOfBounds };
+
+ CheckBoundsParameters(const VectorSlotPair& feedback, Mode mode)
+ : check_parameters_(feedback), mode_(mode) {}
+
+ Mode mode() const { return mode_; }
+ const CheckParameters& check_parameters() const { return check_parameters_; }
+
+ private:
+ CheckParameters check_parameters_;
+ Mode mode_;
+};
+
+bool operator==(CheckBoundsParameters const&, CheckBoundsParameters const&);
+
+size_t hash_value(CheckBoundsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckBoundsParameters const&);
+
+CheckBoundsParameters const& CheckBoundsParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
class CheckIfParameters final {
public:
explicit CheckIfParameters(DeoptimizeReason reason,
@@ -303,25 +327,6 @@ DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
std::ostream& operator<<(std::ostream&, CheckMapsFlags);
-class MapsParameterInfo {
- public:
- explicit MapsParameterInfo(ZoneHandleSet<Map> const& maps);
-
- Maybe<InstanceType> instance_type() const { return instance_type_; }
- ZoneHandleSet<Map> const& maps() const { return maps_; }
-
- private:
- ZoneHandleSet<Map> const maps_;
- Maybe<InstanceType> instance_type_;
-};
-
-std::ostream& operator<<(std::ostream&, MapsParameterInfo const&);
-
-bool operator==(MapsParameterInfo const&, MapsParameterInfo const&);
-bool operator!=(MapsParameterInfo const&, MapsParameterInfo const&);
-
-size_t hash_value(MapsParameterInfo const&);
-
// A descriptor for map checks. The {feedback} parameter is optional.
// If {feedback} references a valid CallIC slot and this MapCheck fails,
// then speculation on that CallIC slot will be disabled.
@@ -329,16 +334,15 @@ class CheckMapsParameters final {
public:
CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps,
const VectorSlotPair& feedback)
- : flags_(flags), maps_info_(maps), feedback_(feedback) {}
+ : flags_(flags), maps_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
- ZoneHandleSet<Map> const& maps() const { return maps_info_.maps(); }
- MapsParameterInfo const& maps_info() const { return maps_info_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
VectorSlotPair const& feedback() const { return feedback_; }
private:
CheckMapsFlags const flags_;
- MapsParameterInfo const maps_info_;
+ ZoneHandleSet<Map> const maps_;
VectorSlotPair const feedback_;
};
@@ -351,10 +355,10 @@ std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
-MapsParameterInfo const& MapGuardMapsOf(Operator const*) V8_WARN_UNUSED_RESULT;
+ZoneHandleSet<Map> const& MapGuardMapsOf(Operator const*) V8_WARN_UNUSED_RESULT;
// Parameters for CompareMaps operator.
-MapsParameterInfo const& CompareMapsParametersOf(Operator const*)
+ZoneHandleSet<Map> const& CompareMapsParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
// A descriptor for growing elements backing stores.
@@ -709,7 +713,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const VectorSlotPair& feedback);
const Operator* CheckedUint32Div();
const Operator* CheckedUint32Mod();
- const Operator* CheckedUint32Bounds(const VectorSlotPair& feedback);
+ const Operator* CheckedUint32Bounds(const VectorSlotPair& feedback,
+ CheckBoundsParameters::Mode mode);
const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedUint64Bounds(const VectorSlotPair& feedback);
@@ -775,9 +780,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
// load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
+ // load-stack-argument [base + index]
+ const Operator* LoadStackArgument();
+
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
diff --git a/deps/v8/src/compiler/type-cache.cc b/deps/v8/src/compiler/type-cache.cc
index ecb6c1c6a8..1c007df507 100644
--- a/deps/v8/src/compiler/type-cache.cc
+++ b/deps/v8/src/compiler/type-cache.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(const TypeCache, TypeCache::Get);
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(const TypeCache, TypeCache::Get)
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index 79687fe2a7..723293dfb8 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -5,7 +5,6 @@
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 3145e1bbff..9b0df88da3 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -747,8 +747,9 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
}
if (input_type.IsHeapConstant()) {
HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
- if (input_value.map().oddball_type() != OddballType::kNone) {
- return Replace(jsgraph()->Constant(input_value.OddballToNumber()));
+ double value;
+ if (input_value.OddballToNumber().To(&value)) {
+ return Replace(jsgraph()->Constant(value));
}
}
if (input_type.Is(Type::Number())) {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 248de5a0d5..449a25b755 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -2131,10 +2131,16 @@ Type Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadMessage(Node* node) { return Type::Any(); }
+
Type Typer::Visitor::TypeLoadElement(Node* node) {
return ElementAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
+ return Type::NonInternal();
+}
+
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
@@ -2158,6 +2164,7 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
}
Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index ff361727cd..7d8c39c8eb 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -56,7 +56,7 @@ class V8_EXPORT_PRIVATE Typer {
DISALLOW_COPY_AND_ASSIGN(Typer);
};
-DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags);
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index e2ff1e6c72..f5106fb2d5 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -145,19 +145,15 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
return kInternalizedString;
@@ -241,9 +237,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_WEAK_CELL_TYPE:
- case JS_WEAK_FACTORY_TYPE:
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
@@ -254,6 +249,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
+ case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
return kOtherObject;
@@ -342,8 +338,10 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case PROMISE_CAPABILITY_TYPE:
case PROMISE_REACTION_TYPE:
+ case CLASS_POSITIONS_TYPE:
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
+ case STACK_TRACE_FRAME_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
@@ -363,7 +361,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
- case WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE:
+ case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE:
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 38ffbe63fc..75ca566330 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -166,6 +166,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
if (!node->op()->HasProperty(Operator::kNoThrow)) {
Node* discovered_if_exception = nullptr;
Node* discovered_if_success = nullptr;
+ Node* discovered_direct_use = nullptr;
int total_number_of_control_uses = 0;
for (Edge edge : node->use_edges()) {
if (!NodeProperties::IsControlEdge(edge)) {
@@ -176,10 +177,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
if (control_use->opcode() == IrOpcode::kIfSuccess) {
CHECK_NULL(discovered_if_success); // Only one allowed.
discovered_if_success = control_use;
- }
- if (control_use->opcode() == IrOpcode::kIfException) {
+ } else if (control_use->opcode() == IrOpcode::kIfException) {
CHECK_NULL(discovered_if_exception); // Only one allowed.
discovered_if_exception = control_use;
+ } else {
+ discovered_direct_use = control_use;
}
}
if (discovered_if_success && !discovered_if_exception) {
@@ -196,8 +198,13 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
node->id(), node->op()->mnemonic(), discovered_if_exception->id(),
discovered_if_exception->op()->mnemonic());
}
- if (discovered_if_success || discovered_if_exception) {
- CHECK_EQ(2, total_number_of_control_uses);
+ if ((discovered_if_success || discovered_if_exception) &&
+ total_number_of_control_uses != 2) {
+ FATAL(
+ "#%d:%s if followed by IfSuccess/IfException, there should be "
+ "no direct control uses, but direct use #%d:%s was found",
+ node->id(), node->op()->mnemonic(), discovered_direct_use->id(),
+ discovered_direct_use->op()->mnemonic());
}
}
}
@@ -1534,12 +1541,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadMessage:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
// CheckTypeIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadElement:
+ case IrOpcode::kLoadStackArgument:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1550,6 +1559,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadDataViewElement:
break;
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreMessage:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b52f6f0640..30f9e94f1f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -40,6 +40,7 @@
#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/vector.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -1063,7 +1064,9 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
}
buf[0] = mcgraph()->Int32Constant(0);
- memcpy(buf + 1, vals, sizeof(void*) * count);
+ if (count > 0) {
+ memcpy(buf + 1, vals, sizeof(void*) * count);
+ }
buf[count + 1] = Effect();
buf[count + 2] = Control();
Node* ret =
@@ -2068,59 +2071,11 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) {
call_target, input, Effect(), Control())));
}
-#ifdef DEBUG
-
-namespace {
-
-constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
-
-size_t ComputeEncodedElementSize(wasm::ValueType type) {
- size_t byte_size =
- static_cast<size_t>(wasm::ValueTypes::ElementSizeInBytes(type));
- DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
- DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
- return byte_size / kBytesPerExceptionValuesArrayElement;
-}
-
-} // namespace
-
-#endif // DEBUG
-
-uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
- const wasm::WasmException* exception) const {
- const wasm::WasmExceptionSig* sig = exception->sig;
- uint32_t encoded_size = 0;
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- switch (sig->GetParam(i)) {
- case wasm::kWasmI32:
- case wasm::kWasmF32:
- DCHECK_EQ(2, ComputeEncodedElementSize(sig->GetParam(i)));
- encoded_size += 2;
- break;
- case wasm::kWasmI64:
- case wasm::kWasmF64:
- DCHECK_EQ(4, ComputeEncodedElementSize(sig->GetParam(i)));
- encoded_size += 4;
- break;
- case wasm::kWasmS128:
- DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
- encoded_size += 8;
- break;
- case wasm::kWasmAnyRef:
- encoded_size += 1;
- break;
- default:
- UNREACHABLE();
- }
- }
- return encoded_size;
-}
-
Node* WasmGraphBuilder::Throw(uint32_t exception_index,
const wasm::WasmException* exception,
const Vector<Node*> values) {
needs_stack_check_ = true;
- uint32_t encoded_size = GetExceptionEncodedSize(exception);
+ uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Node* create_parameters[] = {
LoadExceptionTagFromTable(exception_index),
BuildChangeUint31ToSmi(Uint32Constant(encoded_size))};
@@ -2308,7 +2263,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
}
values[i] = value;
}
- DCHECK_EQ(index, GetExceptionEncodedSize(exception));
+ DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
return values;
}
@@ -2737,8 +2692,16 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
// Load the target from the imported_targets array at the offset of
// {func_index}.
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- Node* func_index_times_pointersize = func_index_times_tagged_size;
+ Node* func_index_times_pointersize;
+ if (kSystemPointerSize == kTaggedSize) {
+ func_index_times_pointersize = func_index_times_tagged_size;
+
+ } else {
+ DCHECK_EQ(kSystemPointerSize, kTaggedSize + kTaggedSize);
+ func_index_times_pointersize = graph()->NewNode(
+ mcgraph()->machine()->Int32Add(), func_index_times_tagged_size,
+ func_index_times_tagged_size);
+ }
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = SetEffect(graph()->NewNode(
@@ -2820,15 +2783,14 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableRefs,
MachineType::TaggedPointer());
- Node* intptr_scaled_key = graph()->NewNode(
- machine->Word32Shl(), key, Int32Constant(kSystemPointerSizeLog2));
-
- Node* target = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Pointer()), ift_targets,
- intptr_scaled_key, Effect(), Control()));
-
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- Node* tagged_scaled_key = intptr_scaled_key;
+ Node* tagged_scaled_key;
+ if (kTaggedSize == kInt32Size) {
+ tagged_scaled_key = int32_scaled_key;
+ } else {
+ DCHECK_EQ(kTaggedSize, kInt32Size * 2);
+ tagged_scaled_key = graph()->NewNode(machine->Int32Add(), int32_scaled_key,
+ int32_scaled_key);
+ }
Node* target_instance = SetEffect(graph()->NewNode(
machine->Load(MachineType::TaggedPointer()),
@@ -2836,8 +2798,20 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Int32Constant(wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)),
Effect(), Control()));
- args[0] = target;
+ Node* intptr_scaled_key;
+ if (kSystemPointerSize == kTaggedSize) {
+ intptr_scaled_key = tagged_scaled_key;
+ } else {
+ DCHECK_EQ(kSystemPointerSize, kTaggedSize + kTaggedSize);
+ intptr_scaled_key = graph()->NewNode(machine->Int32Add(), tagged_scaled_key,
+ tagged_scaled_key);
+ }
+ Node* target = SetEffect(
+ graph()->NewNode(machine->Load(MachineType::Pointer()), ift_targets,
+ intptr_scaled_key, Effect(), Control()));
+
+ args[0] = target;
return BuildWasmCall(sig, args, rets, position, target_instance,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
@@ -3261,6 +3235,69 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
graph()->NewNode(op, base, offset, val, Effect(), Control()));
}
+void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index, Node* index,
+ wasm::WasmCodePosition position,
+ Node** base_node,
+ Node** offset_node) {
+ Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer());
+ Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index);
+
+ int storage_field_size = WasmTableObject::kElementsOffsetEnd -
+ WasmTableObject::kElementsOffset + 1;
+ Node* storage = LOAD_RAW(
+ table, wasm::ObjectAccess::ToTagged(WasmTableObject::kElementsOffset),
+ assert_size(storage_field_size, MachineType::TaggedPointer()));
+
+ int length_field_size =
+ FixedArray::kLengthOffsetEnd - FixedArray::kLengthOffset + 1;
+ Node* storage_size =
+ LOAD_RAW(storage, wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset),
+ assert_size(length_field_size, MachineType::TaggedSigned()));
+
+ storage_size = BuildChangeSmiToInt32(storage_size);
+ // Bounds check against the table size.
+ Node* in_bounds = graph()->NewNode(mcgraph()->machine()->Uint32LessThan(),
+ index, storage_size);
+ TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
+
+ // From the index, calculate the actual offset in the FixeArray. This
+ // is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
+ // wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
+ Node* index_times_tagged_size =
+ graph()->NewNode(mcgraph()->machine()->IntMul(), Uint32ToUintptr(index),
+ mcgraph()->Int32Constant(kTaggedSize));
+
+ *offset_node = graph()->NewNode(
+ mcgraph()->machine()->IntAdd(), index_times_tagged_size,
+ mcgraph()->IntPtrConstant(
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
+
+ *base_node = storage;
+}
+
+Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
+ wasm::WasmCodePosition position) {
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetTableBaseAndOffset(table_index, index, position, &base, &offset);
+ return SetEffect(
+ graph()->NewNode(mcgraph()->machine()->Load(MachineType::AnyTagged()),
+ base, offset, Effect(), Control()));
+}
+
+Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
+ wasm::WasmCodePosition position) {
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetTableBaseAndOffset(table_index, index, position, &base, &offset);
+
+ const Operator* op = mcgraph()->machine()->Store(
+ StoreRepresentation(MachineRepresentation::kTagged, kFullWriteBarrier));
+
+ Node* store = graph()->NewNode(op, base, offset, val, Effect(), Control());
+ return SetEffect(store);
+}
+
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
@@ -4278,7 +4315,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
case wasm::kExprI64AtomicWait: {
Node* index = CheckBoundsAndAlignment(
- wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
+ wasm::ValueTypes::MemSize(MachineType::Uint64()), inputs[0], offset,
position);
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
@@ -4364,10 +4401,10 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
// Load segment's base pointer from WasmInstanceObject::data_segment_starts.
Node* seg_start_array =
LOAD_INSTANCE_FIELD(DataSegmentStarts, MachineType::Pointer());
- STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >>
- kPointerSizeLog2);
+ STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <=
+ kMaxUInt32 / kSystemPointerSize);
Node* scaled_index = Uint32ToUintptr(graph()->NewNode(
- m->Word32Shl(), seg_index, Int32Constant(kPointerSizeLog2)));
+ m->Word32Shl(), seg_index, Int32Constant(kSystemPointerSizeLog2)));
Node* seg_start = SetEffect(
graph()->NewNode(m->Load(MachineType::Pointer()), seg_start_array,
scaled_index, Effect(), Control()));
@@ -4384,8 +4421,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
return BuildCCall(&sig, function, dst, src, size);
}
-Node* WasmGraphBuilder::MemoryDrop(uint32_t data_segment_index,
- wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
+ wasm::WasmCodePosition position) {
Node* dropped_data_segments =
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
const Operator* store_op = mcgraph()->machine()->Store(
@@ -4438,20 +4475,21 @@ Node* WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
+ CheckElemSegmentIsPassiveAndNotDropped(elem_segment_index, position);
Node* args[] = {
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
graph()->NewNode(mcgraph()->common()->NumberConstant(elem_segment_index)),
- BuildConvertUint32ToSmiWithSaturation(dst, wasm::kV8MaxWasmTableSize),
- BuildConvertUint32ToSmiWithSaturation(src, wasm::kV8MaxWasmTableSize),
- BuildConvertUint32ToSmiWithSaturation(size, wasm::kV8MaxWasmTableSize)};
+ BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
+ BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
+ BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
Node* result =
BuildCallToRuntime(Runtime::kWasmTableInit, args, arraysize(args));
return result;
}
-Node* WasmGraphBuilder::TableDrop(uint32_t elem_segment_index,
- wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
+ wasm::WasmCodePosition position) {
Node* dropped_elem_segments =
CheckElemSegmentIsPassiveAndNotDropped(elem_segment_index, position);
const Operator* store_op = mcgraph()->machine()->Store(
@@ -4462,13 +4500,16 @@ Node* WasmGraphBuilder::TableDrop(uint32_t elem_segment_index,
mcgraph()->Int32Constant(1), Effect(), Control()));
}
-Node* WasmGraphBuilder::TableCopy(uint32_t table_index, Node* dst, Node* src,
- Node* size, wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index,
+ uint32_t table_dst_index, Node* dst,
+ Node* src, Node* size,
+ wasm::WasmCodePosition position) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- BuildConvertUint32ToSmiWithSaturation(dst, wasm::kV8MaxWasmTableSize),
- BuildConvertUint32ToSmiWithSaturation(src, wasm::kV8MaxWasmTableSize),
- BuildConvertUint32ToSmiWithSaturation(size, wasm::kV8MaxWasmTableSize)};
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
+ BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
+ BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
+ BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
Node* result =
BuildCallToRuntime(Runtime::kWasmTableCopy, args, arraysize(args));
@@ -5500,14 +5541,17 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
bool has_bigint_feature) {
if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
auto imported_function = WasmExportedFunction::cast(*target);
- wasm::FunctionSig* imported_sig =
- imported_function->instance()
- ->module()
- ->functions[imported_function->function_index()]
- .sig;
+ auto func_index = imported_function->function_index();
+ auto module = imported_function->instance()->module();
+ wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
return WasmImportCallKind::kLinkError;
}
+ if (static_cast<uint32_t>(func_index) < module->num_imported_functions) {
+ // TODO(wasm): this redirects all imported-reexported functions
+ // through the call builtin. Fall through to JS function cases below?
+ return WasmImportCallKind::kUseCallBuiltin;
+ }
return WasmImportCallKind::kWasmToWasm;
}
// Assuming we are calling to JS, check whether this would be a runtime error.
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 5cb24afa81..efd6113f84 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -272,12 +272,16 @@ class WasmGraphBuilder {
Node* Invert(Node* node);
+ Node* GetGlobal(uint32_t index);
+ Node* SetGlobal(uint32_t index, Node* val);
+ Node* GetTable(uint32_t table_index, Node* index,
+ wasm::WasmCodePosition position);
+ Node* SetTable(uint32_t table_index, Node* index, Node* val,
+ wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
//-----------------------------------------------------------------------
Node* CurrentMemoryPages();
- Node* GetGlobal(uint32_t index);
- Node* SetGlobal(uint32_t index, Node* val);
Node* TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
uint32_t offset, wasm::WasmCodePosition);
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
@@ -321,6 +325,10 @@ class WasmGraphBuilder {
void GetBaseAndOffsetForImportedMutableAnyRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset);
+ void GetTableBaseAndOffset(uint32_t table_index, Node* index,
+ wasm::WasmCodePosition position, Node** base_node,
+ Node** offset_node);
+
// Utilities to manipulate sets of instance cache nodes.
void InitInstanceCache(WasmInstanceCacheNodes* instance_cache);
void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache,
@@ -370,16 +378,15 @@ class WasmGraphBuilder {
Node* size, wasm::WasmCodePosition position);
Node* MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position);
- Node* MemoryDrop(uint32_t data_segment_index,
- wasm::WasmCodePosition position);
+ Node* DataDrop(uint32_t data_segment_index, wasm::WasmCodePosition position);
Node* MemoryFill(Node* dst, Node* fill, Node* size,
wasm::WasmCodePosition position);
Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
- Node* TableDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
- Node* TableCopy(uint32_t table_index, Node* dst, Node* src, Node* size,
- wasm::WasmCodePosition position);
+ Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
+ Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst,
+ Node* src, Node* size, wasm::WasmCodePosition position);
bool has_simd() const { return has_simd_; }
@@ -538,7 +545,6 @@ class WasmGraphBuilder {
Node* BuildAsmjsLoadMem(MachineType type, Node* index);
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
- uint32_t GetExceptionEncodedSize(const wasm::WasmException* exception) const;
void BuildEncodeException32BitValue(Node* values_array, uint32_t* index,
Node* value);
Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
diff --git a/deps/v8/src/constant-pool.h b/deps/v8/src/constant-pool.h
index 15faeeaaa2..5b87d9a4a5 100644
--- a/deps/v8/src/constant-pool.h
+++ b/deps/v8/src/constant-pool.h
@@ -88,6 +88,14 @@ class ConstantPoolBuilder {
public:
ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
+#ifdef DEBUG
+ ~ConstantPoolBuilder() {
+ // Unused labels to prevent DCHECK failures.
+ emitted_label_.Unuse();
+ emitted_label_.UnuseNear();
+ }
+#endif
+
// Add pointer-sized constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
bool sharing_ok) {
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index b132a3e793..faca53c13e 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -11,6 +11,7 @@
#include "src/objects-inl.h"
#include "src/objects/dictionary-inl.h"
#include "src/objects/fixed-array-inl.h"
+#include "src/objects/js-objects-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
@@ -35,9 +36,12 @@ void ScriptContextTable::set_used(int used) {
Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
Handle<ScriptContextTable> table,
int i) {
- DCHECK(i < table->used());
- return Handle<Context>::cast(
- FixedArray::get(*table, i + kFirstContextSlotIndex, isolate));
+ return handle(table->get_context(i), isolate);
+}
+
+Context ScriptContextTable::get_context(int i) const {
+ DCHECK_LT(i, used());
+ return Context::cast(this->get(i + kFirstContextSlotIndex));
}
OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject)
@@ -73,6 +77,8 @@ void Context::set_scope_info(ScopeInfo scope_info) {
set(SCOPE_INFO_INDEX, scope_info);
}
+Object Context::unchecked_previous() { return get(PREVIOUS_INDEX); }
+
Context Context::previous() {
Object result = get(PREVIOUS_INDEX);
DCHECK(IsBootstrappingOrValidParentContext(result, *this));
@@ -162,8 +168,7 @@ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
CHECK_FOLLOWS2(v3, v4)
int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
- bool has_prototype_slot, bool has_shared_name,
- bool needs_home_object) {
+ bool has_shared_name, bool needs_home_object) {
if (IsClassConstructor(kind)) {
// Like the strict function map, but with no 'name' accessor. 'name'
// needs to be the last property and it is added during instantiation,
@@ -193,8 +198,7 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
base = ASYNC_FUNCTION_MAP_INDEX;
- } else if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
- IsAccessorFunction(kind)) {
+ } else if (IsStrictFunctionWithoutPrototype(kind)) {
DCHECK_IMPLIES(IsArrowFunction(kind), !needs_home_object);
CHECK_FOLLOWS4(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
METHOD_WITH_NAME_MAP_INDEX,
@@ -234,11 +238,11 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
MicrotaskQueue* NativeContext::microtask_queue() const {
return reinterpret_cast<MicrotaskQueue*>(
- READ_INTPTR_FIELD(this, kMicrotaskQueueOffset));
+ READ_INTPTR_FIELD(*this, kMicrotaskQueueOffset));
}
void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
- WRITE_INTPTR_FIELD(this, kMicrotaskQueueOffset,
+ WRITE_INTPTR_FIELD(*this, kMicrotaskQueueOffset,
reinterpret_cast<intptr_t>(microtask_queue));
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 24bd9bc8b4..22c869cf60 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -37,15 +37,14 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
return result;
}
-bool ScriptContextTable::Lookup(Isolate* isolate,
- Handle<ScriptContextTable> table,
- Handle<String> name, LookupResult* result) {
+bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
+ String name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
for (int i = 0; i < table->used(); i++) {
- Handle<Context> context = GetContext(isolate, table, i);
+ Context context = table->get_context(i);
DCHECK(context->IsScriptContext());
- Handle<ScopeInfo> scope_info(context->scope_info(), context->GetIsolate());
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &result->mode, &result->init_flag,
+ context->scope_info(), name, &result->mode, &result->init_flag,
&result->maybe_assigned_flag);
if (slot_index >= 0) {
@@ -57,7 +56,6 @@ bool ScriptContextTable::Lookup(Isolate* isolate,
return false;
}
-
bool Context::is_declaration_context() {
if (IsFunctionContext() || IsNativeContext() || IsScriptContext() ||
IsModuleContext()) {
@@ -211,27 +209,25 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
Handle<JSReceiver> object(context->extension_receiver(), isolate);
if (context->IsNativeContext()) {
+ DisallowHeapAllocation no_gc;
if (FLAG_trace_contexts) {
PrintF(" - trying other script contexts\n");
}
// Try other script contexts.
- Handle<ScriptContextTable> script_contexts(
- context->global_object()->native_context()->script_context_table(),
- isolate);
+ ScriptContextTable script_contexts =
+ context->global_object()->native_context()->script_context_table();
ScriptContextTable::LookupResult r;
- if (ScriptContextTable::Lookup(isolate, script_contexts, name, &r)) {
+ if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) {
+ Context context = script_contexts->get_context(r.context_index);
if (FLAG_trace_contexts) {
- Handle<Context> c = ScriptContextTable::GetContext(
- isolate, script_contexts, r.context_index);
PrintF("=> found property in script context %d: %p\n",
- r.context_index, reinterpret_cast<void*>(c->ptr()));
+ r.context_index, reinterpret_cast<void*>(context->ptr()));
}
*index = r.slot_index;
*variable_mode = r.mode;
*init_flag = r.init_flag;
*attributes = GetAttributesForMode(r.mode);
- return ScriptContextTable::GetContext(isolate, script_contexts,
- r.context_index);
+ return handle(context, isolate);
}
}
@@ -285,13 +281,14 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
if (context->IsFunctionContext() || context->IsBlockContext() ||
context->IsScriptContext() || context->IsEvalContext() ||
context->IsModuleContext() || context->IsCatchContext()) {
+ DisallowHeapAllocation no_gc;
// Use serialized scope information of functions and blocks to search
// for the context index.
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
+ ScopeInfo scope_info = context->scope_info();
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index = ScopeInfo::ContextSlotIndex(scope_info, name, &mode,
+ int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
&flag, &maybe_assigned_flag);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
@@ -334,7 +331,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
int cell_index =
- scope_info->ModuleIndex(name, &mode, &flag, &maybe_assigned_flag);
+ scope_info->ModuleIndex(*name, &mode, &flag, &maybe_assigned_flag);
if (cell_index != 0) {
if (FLAG_trace_contexts) {
PrintF("=> found in module imports or exports\n");
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index dec66691d5..af78d2eae2 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -5,8 +5,8 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
+#include "src/function-kind.h"
#include "src/objects/fixed-array.h"
-
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -54,7 +54,6 @@ enum ContextLookupFlags {
V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
@@ -189,9 +188,9 @@ enum ContextLookupFlags {
V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
- V(JS_WEAK_CELL_MAP_INDEX, Map, js_weak_cell_map) \
- V(JS_WEAK_FACTORY_CLEANUP_ITERATOR_MAP_INDEX, Map, \
- js_weak_factory_cleanup_iterator_map) \
+ V(WEAK_CELL_MAP_INDEX, Map, weak_cell_map) \
+ V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \
+ js_finalization_group_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
@@ -234,8 +233,6 @@ enum ContextLookupFlags {
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
- V(REGEXP_INTERNAL_MATCH_INFO_INDEX, RegExpMatchInfo, \
- regexp_internal_match_info) \
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
@@ -305,6 +302,7 @@ enum ContextLookupFlags {
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(TEMPLATE_WEAKMAP_INDEX, HeapObject, template_weakmap) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
@@ -370,14 +368,15 @@ class ScriptContextTable : public FixedArray {
static inline Handle<Context> GetContext(Isolate* isolate,
Handle<ScriptContextTable> table,
int i);
+ inline Context get_context(int i) const;
// Lookup a variable `name` in a ScriptContextTable.
// If it returns true, the variable is found and `result` contains
// valid information about its location.
// If it returns false, `result` is untouched.
V8_WARN_UNUSED_RESULT
- static bool Lookup(Isolate* isolate, Handle<ScriptContextTable> table,
- Handle<String> name, LookupResult* result);
+ static bool Lookup(Isolate* isolate, ScriptContextTable table, String name,
+ LookupResult* result);
V8_WARN_UNUSED_RESULT
static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
@@ -540,6 +539,8 @@ class Context : public HeapObject {
// Direct slot access.
inline void set_scope_info(ScopeInfo scope_info);
+
+ inline Object unchecked_previous();
inline Context previous();
inline void set_previous(Context context);
@@ -641,8 +642,7 @@ class Context : public HeapObject {
bool* is_sloppy_function_name = nullptr);
static inline int FunctionMapIndex(LanguageMode language_mode,
- FunctionKind kind, bool has_prototype_slot,
- bool has_shared_name,
+ FunctionKind kind, bool has_shared_name,
bool needs_home_object);
static int ArrayMapIndex(ElementsKind elements_kind) {
@@ -668,7 +668,7 @@ class Context : public HeapObject {
static bool IsBootstrappingOrValidParentContext(Object object, Context kid);
#endif
- OBJECT_CONSTRUCTORS(Context, HeapObject)
+ OBJECT_CONSTRUCTORS(Context, HeapObject);
};
class NativeContext : public Context {
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 3bdb2efcb4..8aaeae9e7a 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -59,10 +59,10 @@ inline unsigned int FastD2UI(double x) {
inline float DoubleToFloat32(double x) {
- // TODO(yangguo): This static_cast is implementation-defined behaviour in C++,
- // so we may need to do the conversion manually instead to match the spec.
- volatile float f = static_cast<float>(x);
- return f;
+ typedef std::numeric_limits<float> limits;
+ if (x > limits::max()) return limits::infinity();
+ if (x < limits::lowest()) return -limits::infinity();
+ return static_cast<float>(x);
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index dd25b74aed..9bf4b08731 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -5,10 +5,9 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
-#include <limits>
-
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/globals.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 4b05a6693e..0eb2996087 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -723,10 +723,10 @@ class RuntimeCallTimer final {
V(Context_New) \
V(Context_NewRemoteContext) \
V(DataView_New) \
- V(Date_DateTimeConfigurationChangeNotification) \
V(Date_New) \
V(Date_NumberValue) \
V(Debug_Call) \
+ V(debug_GetPrivateFields) \
V(Error_New) \
V(External_New) \
V(Float32Array_New) \
@@ -742,6 +742,8 @@ class RuntimeCallTimer final {
V(Int16Array_New) \
V(Int32Array_New) \
V(Int8Array_New) \
+ V(Isolate_DateTimeConfigurationChangeNotification) \
+ V(Isolate_LocaleConfigurationChangeNotification) \
V(JSON_Parse) \
V(JSON_Stringify) \
V(Map_AsArray) \
@@ -839,6 +841,7 @@ class RuntimeCallTimer final {
V(SymbolObject_New) \
V(SymbolObject_SymbolValue) \
V(SyntaxError_New) \
+ V(TracedGlobal_New) \
V(TryCatch_StackTrace) \
V(TypeError_New) \
V(Uint16Array_New) \
@@ -879,6 +882,7 @@ class RuntimeCallTimer final {
V(CompileBackgroundRewriteReturnResult) \
V(CompileBackgroundScopeAnalysis) \
V(CompileBackgroundScript) \
+ V(CompileCollectSourcePositions) \
V(CompileDeserialize) \
V(CompileEnqueueOnDispatcher) \
V(CompileEval) \
@@ -998,6 +1002,7 @@ class RuntimeCallTimer final {
V(LoadIC_StringWrapperLength) \
V(StoreGlobalIC_SlowStub) \
V(StoreGlobalIC_StoreScriptContextField) \
+ V(StoreGlobalIC_Premonomorphic) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
@@ -1192,10 +1197,10 @@ class RuntimeCallTimerScope {
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
- HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 100000, \
+ HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
51) \
HR(wasm_functions_per_wasm_module, V8.WasmFunctionsPerModule.wasm, 1, \
- 100000, 51) \
+ 1000000, 51) \
HR(array_buffer_big_allocations, V8.ArrayBufferLargeAllocations, 0, 4096, \
13) \
HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, \
@@ -1244,6 +1249,8 @@ class RuntimeCallTimerScope {
HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
MILLISECOND) \
/* Compilation times. */ \
+ HT(collect_source_positions, V8.CollectSourcePositions, 1000000, \
+ MICROSECOND) \
HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
/* Serialization as part of compilation (code caching) */ \
@@ -1323,9 +1330,7 @@ class RuntimeCallTimerScope {
HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(compile_function_on_background, \
- V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
- HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
- MICROSECOND)
+ V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
@@ -1424,7 +1429,6 @@ class RuntimeCallTimerScope {
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(string_add_runtime, V8.StringAddRuntime) \
SC(string_add_native, V8.StringAddNative) \
- SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte) \
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(sub_string_native, V8.SubStringNative) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
diff --git a/deps/v8/src/cpu-features.h b/deps/v8/src/cpu-features.h
index 310fafe272..bff6ef6f0b 100644
--- a/deps/v8/src/cpu-features.h
+++ b/deps/v8/src/cpu-features.h
@@ -104,8 +104,8 @@ class CpuFeatures : public AllStatic {
static void PrintFeatures();
private:
+ friend void V8_EXPORT_PRIVATE FlushInstructionCache(void*, size_t);
friend class ExternalReference;
- friend class AssemblerBase;
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index cd54285d3a..0c069ba713 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -36,10 +36,14 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/ostreams.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parsing.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/v8.h"
+#include "src/vm-state-inl.h"
#include "src/wasm/wasm-engine.h"
#if !defined(_WIN32) && !defined(_WIN64)
@@ -464,6 +468,27 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, PrintResult print_result,
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue) {
+ if (i::FLAG_parse_only) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::VMState<PARSER> state(i_isolate);
+ i::Handle<i::String> str = Utils::OpenHandle(*(source));
+
+ // Set up ParseInfo.
+ i::ParseInfo parse_info(i_isolate);
+ parse_info.set_toplevel();
+ parse_info.set_allow_lazy_parsing();
+ parse_info.set_language_mode(
+ i::construct_language_mode(i::FLAG_use_strict));
+ parse_info.set_script(
+ parse_info.CreateScript(i_isolate, str, options.compile_options));
+
+ if (!i::parsing::ParseProgram(&parse_info, i_isolate)) {
+ fprintf(stderr, "Failed parsing\n");
+ return false;
+ }
+ return true;
+ }
+
HandleScope handle_scope(isolate);
TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
@@ -2285,6 +2310,7 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
void Send(const v8_inspector::StringView& string) {
v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_);
+ v8::HandleScope handle_scope(isolate_);
int length = static_cast<int>(string.length());
DCHECK_LT(length, v8::String::kMaxLength);
Local<String> message =
@@ -2382,7 +2408,10 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
std::unique_ptr<uint16_t[]> buffer(new uint16_t[length]);
message->Write(isolate, buffer.get(), 0, length);
v8_inspector::StringView message_view(buffer.get(), length);
- session->dispatchProtocolMessage(message_view);
+ {
+ v8::SealHandleScope seal_handle_scope(isolate);
+ session->dispatchProtocolMessage(message_view);
+ }
args.GetReturnValue().Set(True(isolate));
}
@@ -2409,8 +2438,8 @@ bool ends_with(const char* input, const char* suffix) {
return false;
}
-void SourceGroup::Execute(Isolate* isolate) {
- bool exception_was_thrown = false;
+bool SourceGroup::Execute(Isolate* isolate) {
+ bool success = true;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@@ -2426,7 +2455,7 @@ void SourceGroup::Execute(Isolate* isolate) {
if (!Shell::ExecuteString(isolate, source, file_name,
Shell::kNoPrintResult, Shell::kReportExceptions,
Shell::kNoProcessMessageQueue)) {
- exception_was_thrown = true;
+ success = false;
break;
}
++i;
@@ -2434,7 +2463,7 @@ void SourceGroup::Execute(Isolate* isolate) {
} else if (ends_with(arg, ".mjs")) {
Shell::set_script_executed();
if (!Shell::ExecuteModule(isolate, arg)) {
- exception_was_thrown = true;
+ success = false;
break;
}
continue;
@@ -2443,7 +2472,7 @@ void SourceGroup::Execute(Isolate* isolate) {
arg = argv_[++i];
Shell::set_script_executed();
if (!Shell::ExecuteModule(isolate, arg)) {
- exception_was_thrown = true;
+ success = false;
break;
}
continue;
@@ -2466,13 +2495,11 @@ void SourceGroup::Execute(Isolate* isolate) {
if (!Shell::ExecuteString(isolate, source, file_name, Shell::kNoPrintResult,
Shell::kReportExceptions,
Shell::kProcessMessageQueue)) {
- exception_was_thrown = true;
+ success = false;
break;
}
}
- if (exception_was_thrown != Shell::options.expected_to_throw) {
- base::OS::ExitProcess(1);
- }
+ return success;
}
Local<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
@@ -2929,10 +2956,11 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
+ bool success = true;
{
SetWaitUntilDone(isolate, false);
if (options.lcov_file) {
- debug::Coverage::SelectMode(isolate, debug::Coverage::kBlockCount);
+ debug::Coverage::SelectMode(isolate, debug::CoverageMode::kBlockCount);
}
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
@@ -2945,8 +2973,8 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
Context::Scope cscope(context);
InspectorClient inspector_client(context, options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- options.isolate_sources[0].Execute(isolate);
- CompleteMessageLoop(isolate);
+ if (!options.isolate_sources[0].Execute(isolate)) success = false;
+ if (!CompleteMessageLoop(isolate)) success = false;
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -2962,7 +2990,8 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
}
}
CleanupWorkers();
- return 0;
+ // In order to finish successfully, success must be != expected_to_throw.
+ return success == Shell::options.expected_to_throw ? 1 : 0;
}
@@ -2996,8 +3025,7 @@ bool ProcessMessages(
const std::function<platform::MessageLoopBehavior()>& behavior) {
while (true) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::SaveContext saved_context(i_isolate);
- i_isolate->set_context(i::Context());
+ i::SaveAndSwitchContext saved_context(i_isolate, i::Context());
SealHandleScope shs(isolate);
while (v8::platform::PumpMessageLoop(g_default_platform, isolate,
behavior())) {
@@ -3025,7 +3053,7 @@ bool ProcessMessages(
}
} // anonymous namespace
-void Shell::CompleteMessageLoop(Isolate* isolate) {
+bool Shell::CompleteMessageLoop(Isolate* isolate) {
auto get_waiting_behaviour = [isolate]() {
base::MutexGuard guard(isolate_status_lock_.Pointer());
DCHECK_GT(isolate_status_.count(isolate), 0);
@@ -3037,7 +3065,7 @@ void Shell::CompleteMessageLoop(Isolate* isolate) {
return should_wait ? platform::MessageLoopBehavior::kWaitForWork
: platform::MessageLoopBehavior::kDoNotWait;
};
- ProcessMessages(isolate, get_waiting_behaviour);
+ return ProcessMessages(isolate, get_waiting_behaviour);
}
bool Shell::EmptyMessageQueues(Isolate* isolate) {
@@ -3244,10 +3272,9 @@ class Deserializer : public ValueDeserializer::Delegate {
Isolate* isolate, uint32_t clone_id) override {
DCHECK_NOT_NULL(data_);
if (clone_id < data_->shared_array_buffer_contents().size()) {
- SharedArrayBuffer::Contents contents =
+ const SharedArrayBuffer::Contents contents =
data_->shared_array_buffer_contents().at(clone_id);
- return SharedArrayBuffer::New(isolate_, contents.Data(),
- contents.ByteLength());
+ return SharedArrayBuffer::New(isolate_, contents);
}
return MaybeLocal<SharedArrayBuffer>();
}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 568a54188f..8a04f89153 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -94,7 +94,8 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
- void Execute(Isolate* isolate);
+ // Returns true on success, false if an uncaught exception was thrown.
+ bool Execute(Isolate* isolate);
void StartExecuteInThread();
void WaitForThread();
@@ -417,7 +418,7 @@ class Shell : public i::AllStatic {
static void OnExit(Isolate* isolate);
static void CollectGarbage(Isolate* isolate);
static bool EmptyMessageQueues(Isolate* isolate);
- static void CompleteMessageLoop(Isolate* isolate);
+ static bool CompleteMessageLoop(Isolate* isolate);
static std::unique_ptr<SerializationData> SerializeValue(
Isolate* isolate, Local<Value> value, Local<Value> transfer);
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 8562eb279a..7b6c9e3394 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -34,10 +34,11 @@ DateCache::DateCache()
base::OS::CreateTimezoneCache()
#endif
) {
- ResetDateCache();
+ ResetDateCache(base::TimezoneCache::TimeZoneDetection::kSkip);
}
-void DateCache::ResetDateCache() {
+void DateCache::ResetDateCache(
+ base::TimezoneCache::TimeZoneDetection time_zone_detection) {
if (stamp_->value() >= Smi::kMaxValue) {
stamp_ = Smi::zero();
} else {
@@ -58,7 +59,7 @@ void DateCache::ResetDateCache() {
#ifdef V8_INTL_SUPPORT
}
#endif
- tz_cache_->Clear();
+ tz_cache_->Clear(time_zone_detection);
tz_name_ = nullptr;
dst_tz_name_ = nullptr;
}
@@ -284,14 +285,13 @@ int DateCache::GetLocalOffsetFromOS(int64_t time_ms, bool is_utc) {
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
- after_->start_sec <=
- base::AddWithWraparound(time_sec, kDefaultDSTDeltaInSec) &&
+ after_->start_sec - kDefaultDSTDeltaInSec <= time_sec &&
time_sec <= after_->end_sec) {
// Extend the after_ segment.
after_->start_sec = time_sec;
} else {
// The after_ segment is either invalid or starts too late.
- if (after_->start_sec <= after_->end_sec) {
+ if (!InvalidSegment(after_)) {
// If the after_ segment is valid, replace it with a new segment.
after_ = LeastRecentlyUsedDST(before_);
}
@@ -346,7 +346,7 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
return before_->offset_ms;
}
- if (time_sec > before_->end_sec + kDefaultDSTDeltaInSec) {
+ if (time_sec - kDefaultDSTDeltaInSec > before_->end_sec) {
// If the before_ segment ends too early, then just
// query for the offset of the time_sec
int offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
@@ -365,8 +365,11 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
// Check if after_ segment is invalid or starts too late.
// Note that start_sec of invalid segments is kMaxEpochTimeInSec.
- if (before_->end_sec + kDefaultDSTDeltaInSec <= after_->start_sec) {
- int new_after_start_sec = before_->end_sec + kDefaultDSTDeltaInSec;
+ int new_after_start_sec =
+ before_->end_sec < kMaxEpochTimeInSec - kDefaultDSTDeltaInSec
+ ? before_->end_sec + kDefaultDSTDeltaInSec
+ : kMaxEpochTimeInSec;
+ if (new_after_start_sec <= after_->start_sec) {
int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
} else {
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index 066eb8edaa..991e5b6dd7 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -45,10 +45,9 @@ class DateCache {
tz_cache_ = nullptr;
}
-
// Clears cached timezone information and increments the cache stamp.
- void ResetDateCache();
-
+ void ResetDateCache(
+ base::TimezoneCache::TimeZoneDetection time_zone_detection);
// Computes floor(time_ms / kMsPerDay).
static int DaysFromTime(int64_t time_ms) {
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index c130524f37..e617964e51 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -40,7 +40,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ Mov(sp, fp);
__ Pop(fp, lr); // Frame, Return address.
- __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldrh(x0,
FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(x2, x0);
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 33223652cf..b693779f5b 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -375,20 +375,20 @@ void ResetAllBlockCounts(SharedFunctionInfo shared) {
}
}
-bool IsBlockMode(debug::Coverage::Mode mode) {
+bool IsBlockMode(debug::CoverageMode mode) {
switch (mode) {
- case debug::Coverage::kBlockBinary:
- case debug::Coverage::kBlockCount:
+ case debug::CoverageMode::kBlockBinary:
+ case debug::CoverageMode::kBlockCount:
return true;
default:
return false;
}
}
-bool IsBinaryMode(debug::Coverage::Mode mode) {
+bool IsBinaryMode(debug::CoverageMode mode) {
switch (mode) {
- case debug::Coverage::kBlockBinary:
- case debug::Coverage::kPreciseBinary:
+ case debug::CoverageMode::kBlockBinary:
+ case debug::CoverageMode::kPreciseBinary:
return true;
default:
return false;
@@ -396,14 +396,14 @@ bool IsBinaryMode(debug::Coverage::Mode mode) {
}
void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
- debug::Coverage::Mode mode) {
+ debug::CoverageMode mode) {
DCHECK(IsBlockMode(mode));
function->has_block_coverage = true;
function->blocks = GetSortedBlockData(info);
// If in binary mode, only report counts of 0/1.
- if (mode == debug::Coverage::kBlockBinary) ClampToBinary(function);
+ if (mode == debug::CoverageMode::kBlockBinary) ClampToBinary(function);
// Remove singleton ranges with the same start position as a full range and
// throw away their counts.
@@ -456,20 +456,21 @@ std::unique_ptr<Coverage> Coverage::CollectPrecise(Isolate* isolate) {
}
std::unique_ptr<Coverage> Coverage::CollectBestEffort(Isolate* isolate) {
- return Collect(isolate, v8::debug::Coverage::kBestEffort);
+ return Collect(isolate, v8::debug::CoverageMode::kBestEffort);
}
std::unique_ptr<Coverage> Coverage::Collect(
- Isolate* isolate, v8::debug::Coverage::Mode collectionMode) {
+ Isolate* isolate, v8::debug::CoverageMode collectionMode) {
SharedToCounterMap counter_map;
- const bool reset_count = collectionMode != v8::debug::Coverage::kBestEffort;
+ const bool reset_count =
+ collectionMode != v8::debug::CoverageMode::kBestEffort;
switch (isolate->code_coverage_mode()) {
- case v8::debug::Coverage::kBlockBinary:
- case v8::debug::Coverage::kBlockCount:
- case v8::debug::Coverage::kPreciseBinary:
- case v8::debug::Coverage::kPreciseCount: {
+ case v8::debug::CoverageMode::kBlockBinary:
+ case v8::debug::CoverageMode::kBlockCount:
+ case v8::debug::CoverageMode::kPreciseBinary:
+ case v8::debug::CoverageMode::kPreciseCount: {
// Feedback vectors are already listed to prevent losing them to GC.
DCHECK(isolate->factory()
->feedback_vectors_for_profiling_tools()
@@ -486,11 +487,11 @@ std::unique_ptr<Coverage> Coverage::Collect(
}
break;
}
- case v8::debug::Coverage::kBestEffort: {
+ case v8::debug::CoverageMode::kBestEffort: {
DCHECK(!isolate->factory()
->feedback_vectors_for_profiling_tools()
->IsArrayList());
- DCHECK_EQ(v8::debug::Coverage::kBestEffort, collectionMode);
+ DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, collectionMode);
HeapIterator heap_iterator(isolate->heap());
for (HeapObject current_obj = heap_iterator.next();
!current_obj.is_null(); current_obj = heap_iterator.next()) {
@@ -544,15 +545,15 @@ std::unique_ptr<Coverage> Coverage::Collect(
}
if (count != 0) {
switch (collectionMode) {
- case v8::debug::Coverage::kBlockCount:
- case v8::debug::Coverage::kPreciseCount:
+ case v8::debug::CoverageMode::kBlockCount:
+ case v8::debug::CoverageMode::kPreciseCount:
break;
- case v8::debug::Coverage::kBlockBinary:
- case v8::debug::Coverage::kPreciseBinary:
+ case v8::debug::CoverageMode::kBlockBinary:
+ case v8::debug::CoverageMode::kPreciseBinary:
count = info->has_reported_binary_coverage() ? 0 : 1;
info->set_has_reported_binary_coverage(true);
break;
- case v8::debug::Coverage::kBestEffort:
+ case v8::debug::CoverageMode::kBestEffort:
count = 1;
break;
}
@@ -583,9 +584,9 @@ std::unique_ptr<Coverage> Coverage::Collect(
return result;
}
-void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
+void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
switch (mode) {
- case debug::Coverage::kBestEffort:
+ case debug::CoverageMode::kBestEffort:
// Note that DevTools switches back to best-effort coverage once the
// recording is stopped. Since we delete coverage infos at that point, any
// following coverage recording (without reloads) will be at function
@@ -596,10 +597,10 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
ReadOnlyRoots(isolate).undefined_value());
}
break;
- case debug::Coverage::kBlockBinary:
- case debug::Coverage::kBlockCount:
- case debug::Coverage::kPreciseBinary:
- case debug::Coverage::kPreciseCount: {
+ case debug::CoverageMode::kBlockBinary:
+ case debug::CoverageMode::kBlockCount:
+ case debug::CoverageMode::kPreciseBinary:
+ case debug::CoverageMode::kPreciseCount: {
HandleScope scope(isolate);
// Remove all optimized function. Optimized and inlined functions do not
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index fc46ebc66e..e319f01a32 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -58,11 +58,11 @@ class Coverage : public std::vector<CoverageScript> {
static std::unique_ptr<Coverage> CollectBestEffort(Isolate* isolate);
// Select code coverage mode.
- static void SelectMode(Isolate* isolate, debug::Coverage::Mode mode);
+ static void SelectMode(Isolate* isolate, debug::CoverageMode mode);
private:
static std::unique_ptr<Coverage> Collect(
- Isolate* isolate, v8::debug::Coverage::Mode collectionMode);
+ Isolate* isolate, v8::debug::CoverageMode collectionMode);
Coverage() = default;
};
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index a0427647d8..40f2dc4f3a 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -283,7 +283,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowReferenceError) \
V(ThrowSymbolIteratorInvalid) \
/* Strings */ \
- V(RegExpInternalReplace) \
V(StringIncludes) \
V(StringIndexOf) \
V(StringReplaceOneCharWithString) \
@@ -925,6 +924,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kArraySomeLoopContinuation:
case Builtins::kArrayTimSort:
case Builtins::kCall_ReceiverIsAny:
+ case Builtins::kCall_ReceiverIsNullOrUndefined:
case Builtins::kCallWithArrayLike:
case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
@@ -949,7 +949,6 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kFlattenIntoArray:
case Builtins::kGetProperty:
case Builtins::kHasProperty:
- case Builtins::kMathPowInternal:
case Builtins::kNonNumberToNumber:
case Builtins::kNonPrimitiveToPrimitive_Number:
case Builtins::kNumberToString:
@@ -981,6 +980,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
default:
return false;
}
+ case Builtins::kFastCreateDataProperty:
+ switch (caller) {
+ case Builtins::kArrayPrototypeSlice:
+ case Builtins::kArrayFilter:
+ return true;
+ default:
+ return false;
+ }
case Builtins::kSetProperty:
switch (caller) {
case Builtins::kArrayPrototypeSlice:
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index a67ca5bc6b..a3a5449d47 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -89,7 +89,7 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
+ return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag,
&maybe_assigned_flag) != -1;
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 402130bb63..c30b96387c 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -49,6 +49,13 @@ void ClearBreakOnNextFunctionCall(Isolate* isolate);
*/
MaybeLocal<Array> GetInternalProperties(Isolate* isolate, Local<Value> value);
+/**
+ * Returns array of private fields specific to the value type. Result has
+ * the following format: [<name>, <value>,...,<name>, <value>]. Result array
+ * will be allocated in the current context.
+ */
+MaybeLocal<Array> GetPrivateFields(Local<Context> context, Local<Object> value);
+
enum ExceptionBreakState {
NoBreakOnException = 0,
BreakOnUncaughtException = 1,
@@ -225,25 +232,6 @@ class V8_EXPORT_PRIVATE Coverage {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Coverage);
- enum Mode {
- // Make use of existing information in feedback vectors on the heap.
- // Only return a yes/no result. Optimization and GC are not affected.
- // Collecting best effort coverage does not reset counters.
- kBestEffort,
- // Disable optimization and prevent feedback vectors from being garbage
- // collected in order to preserve precise invocation counts. Collecting
- // precise count coverage resets counters to get incremental updates.
- kPreciseCount,
- // We are only interested in a yes/no result for the function. Optimization
- // and GC can be allowed once a function has been invoked. Collecting
- // precise binary coverage resets counters for incremental updates.
- kPreciseBinary,
- // Similar to the precise coverage modes but provides coverage at a
- // lower granularity. Design doc: goo.gl/lA2swZ.
- kBlockCount,
- kBlockBinary,
- };
-
// Forward declarations.
class ScriptData;
class FunctionData;
@@ -310,7 +298,7 @@ class V8_EXPORT_PRIVATE Coverage {
static Coverage CollectPrecise(Isolate* isolate);
static Coverage CollectBestEffort(Isolate* isolate);
- static void SelectMode(Isolate* isolate, Mode mode);
+ static void SelectMode(Isolate* isolate, CoverageMode mode);
size_t ScriptCount() const;
ScriptData GetScriptData(size_t i) const;
@@ -329,10 +317,6 @@ class V8_EXPORT_PRIVATE TypeProfile {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TypeProfile);
- enum Mode {
- kNone,
- kCollect,
- };
class ScriptData; // Forward declaration.
class V8_EXPORT_PRIVATE Entry {
@@ -372,7 +356,7 @@ class V8_EXPORT_PRIVATE TypeProfile {
static TypeProfile Collect(Isolate* isolate);
- static void SelectMode(Isolate* isolate, Mode mode);
+ static void SelectMode(Isolate* isolate, TypeProfileMode mode);
size_t ScriptCount() const;
ScriptData GetScriptData(size_t i) const;
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 65794e85fb..816ed90b62 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -155,6 +155,11 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
DCHECK(non_locals_.is_null());
non_locals_ = info_->literal()->scope()->CollectNonLocals(
isolate_, info_, StringSet::New(isolate_));
+ if (!closure_scope_->has_this_declaration() &&
+ closure_scope_->HasThisReference()) {
+ non_locals_ = StringSet::Add(isolate_, non_locals_,
+ isolate_->factory()->this_string());
+ }
}
CHECK(DeclarationScope::Analyze(info_));
@@ -304,7 +309,8 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
switch (current_scope_->scope_type()) {
case FUNCTION_SCOPE:
DCHECK_IMPLIES(current_scope_->NeedsContext(),
- context_->IsFunctionContext());
+ context_->IsFunctionContext() ||
+ context_->IsDebugEvaluateContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
DCHECK_IMPLIES(current_scope_->NeedsContext(),
@@ -316,9 +322,8 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
context_->IsScriptContext() || context_->IsNativeContext());
return ScopeTypeScript;
case WITH_SCOPE:
- DCHECK_IMPLIES(
- current_scope_->NeedsContext(),
- context_->IsWithContext() || context_->IsDebugEvaluateContext());
+ DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ context_->IsWithContext());
return ScopeTypeWith;
case CATCH_SCOPE:
DCHECK(context_->IsCatchContext());
@@ -340,7 +345,8 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
// fake it.
return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
}
- if (context_->IsFunctionContext() || context_->IsEvalContext()) {
+ if (context_->IsFunctionContext() || context_->IsEvalContext() ||
+ context_->IsDebugEvaluateContext()) {
return ScopeTypeClosure;
}
if (context_->IsCatchContext()) {
@@ -355,7 +361,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
if (context_->IsScriptContext()) {
return ScopeTypeScript;
}
- DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
+ DCHECK(context_->IsWithContext());
return ScopeTypeWith;
}
@@ -513,6 +519,8 @@ int ScopeIterator::GetSourcePosition() {
return frame_inspector_->GetSourcePosition();
} else {
DCHECK(!generator_.is_null());
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate_, handle(generator_->function()->shared(), isolate_));
return generator_->source_position();
}
}
@@ -577,7 +585,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
{
String raw_name;
scope_info->ModuleVariable(i, &raw_name, &index);
- CHECK(!ScopeInfo::VariableIsSynthetic(raw_name));
+ if (ScopeInfo::VariableIsSynthetic(raw_name)) continue;
name = handle(raw_name, isolate_);
}
Handle<Object> value = Module::LoadVariable(isolate_, module, index);
@@ -605,15 +613,20 @@ bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
}
bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
- for (Variable* var : *current_scope_->locals()) {
- if (var->is_this()) {
- // Only collect "this" for DebugEvaluate. The debugger will manually add
- // "this" in a different way, and if we'd add it here as well, it shows up
- // twice.
- if (mode == Mode::ALL) continue;
- } else if (ScopeInfo::VariableIsSynthetic(*var->name())) {
- continue;
+ if (mode == Mode::STACK && current_scope_->is_declaration_scope() &&
+ current_scope_->AsDeclarationScope()->has_this_declaration()) {
+ Handle<Object> receiver = frame_inspector_ == nullptr
+ ? handle(generator_->receiver(), isolate_)
+ : frame_inspector_->GetReceiver();
+ if (receiver->IsOptimizedOut(isolate_) || receiver->IsTheHole(isolate_)) {
+ receiver = isolate_->factory()->undefined_value();
}
+ if (visitor(isolate_->factory()->this_string(), receiver)) return true;
+ }
+
+ for (Variable* var : *current_scope_->locals()) {
+ DCHECK(!var->is_this());
+ if (ScopeInfo::VariableIsSynthetic(*var->name())) continue;
int index = var->index();
Handle<Object> value;
@@ -623,31 +636,21 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
break;
case VariableLocation::UNALLOCATED:
- if (!var->is_this()) continue;
- // No idea why this diverges...
- value = frame_inspector_->GetReceiver();
- break;
+ continue;
case VariableLocation::PARAMETER: {
if (frame_inspector_ == nullptr) {
// Get the variable from the suspended generator.
DCHECK(!generator_.is_null());
- if (var->is_this()) {
- value = handle(generator_->receiver(), isolate_);
- } else {
- FixedArray parameters_and_registers =
- generator_->parameters_and_registers();
- DCHECK_LT(index, parameters_and_registers->length());
- value = handle(parameters_and_registers->get(index), isolate_);
- }
+ FixedArray parameters_and_registers =
+ generator_->parameters_and_registers();
+ DCHECK_LT(index, parameters_and_registers->length());
+ value = handle(parameters_and_registers->get(index), isolate_);
} else {
- value = var->is_this() ? frame_inspector_->GetReceiver()
- : frame_inspector_->GetParameter(index);
+ value = frame_inspector_->GetParameter(index);
if (value->IsOptimizedOut(isolate_)) {
value = isolate_->factory()->undefined_value();
- } else if (var->is_this() && value->IsTheHole(isolate_)) {
- value = isolate_->factory()->undefined_value();
}
}
break;
@@ -727,7 +730,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
// but don't force |this| to be context-allocated. Otherwise we'd find the
// wrong |this| value.
if (!closure_scope_->has_this_declaration() &&
- !non_locals_->Has(isolate_, isolate_->factory()->this_string())) {
+ !closure_scope_->HasThisReference()) {
if (visitor(isolate_->factory()->this_string(),
isolate_->factory()->undefined_value()))
return;
@@ -863,13 +866,13 @@ bool ScopeIterator::SetContextExtensionValue(Handle<String> variable_name,
bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
-
+ DisallowHeapAllocation no_gc;
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index = ScopeInfo::ContextSlotIndex(scope_info, variable_name, &mode,
- &flag, &maybe_assigned_flag);
+ int slot_index =
+ ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode,
+ &flag, &maybe_assigned_flag);
if (slot_index < 0) return false;
context_->set(slot_index, *new_value);
@@ -878,12 +881,13 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
+ DisallowHeapAllocation no_gc;
int cell_index;
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
cell_index = context_->scope_info()->ModuleIndex(
- variable_name, &mode, &init_flag, &maybe_assigned_flag);
+ *variable_name, &mode, &init_flag, &maybe_assigned_flag);
// Setting imports is currently not supported.
if (ModuleDescriptor::GetCellIndexKind(cell_index) !=
@@ -902,7 +906,7 @@ bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
context_->global_object()->native_context()->script_context_table(),
isolate_);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate_, script_contexts, variable_name,
+ if (ScriptContextTable::Lookup(isolate_, *script_contexts, *variable_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate_, script_contexts, lookup_result.context_index);
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 99d9be380d..214ef0d48b 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -95,14 +95,13 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
if (!scope_iterator.GetNonLocals()->Has(isolate_,
isolate_->factory()->this_string()))
return v8::MaybeLocal<v8::Value>();
-
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ DisallowHeapAllocation no_gc;
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, isolate_->factory()->this_string(), &mode, &flag,
- &maybe_assigned_flag);
+ context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
+ &mode, &flag, &maybe_assigned_flag);
if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
Handle<Object> value = handle(context->get(slot_index), isolate_);
if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index 1f2eb7b44e..c1fe308508 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -71,10 +71,10 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
return result;
}
-void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
+void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfileMode mode) {
HandleScope handle_scope(isolate);
- if (mode == debug::TypeProfile::Mode::kNone) {
+ if (mode == debug::TypeProfileMode::kNone) {
if (!isolate->factory()
->feedback_vectors_for_profiling_tools()
->IsUndefined(isolate)) {
@@ -106,7 +106,7 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
}
}
} else {
- DCHECK_EQ(debug::TypeProfile::Mode::kCollect, mode);
+ DCHECK_EQ(debug::TypeProfileMode::kCollect, mode);
isolate->MaybeInitializeVectorListFromHeap();
}
isolate->set_type_profile_mode(mode);
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
index 3bdcfc62ba..37f2b659d8 100644
--- a/deps/v8/src/debug/debug-type-profile.h
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -34,7 +34,7 @@ struct TypeProfileScript {
class TypeProfile : public std::vector<TypeProfileScript> {
public:
static std::unique_ptr<TypeProfile> Collect(Isolate* isolate);
- static void SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode);
+ static void SelectMode(Isolate* isolate, debug::TypeProfileMode mode);
private:
TypeProfile() = default;
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 2789513514..9f83b58fbe 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -23,6 +23,7 @@
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h" // For NextDebuggingId.
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
@@ -35,6 +36,7 @@
#include "src/objects/slots.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
+#include "src/v8threads.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -1335,10 +1337,10 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
bool was_compiled = false;
for (const auto& candidate : candidates) {
- // Code that cannot be compiled lazily are internal and not debuggable.
- DCHECK(candidate->allows_lazy_compilation());
IsCompiledScope is_compiled_scope(candidate->is_compiled_scope());
if (!is_compiled_scope.is_compiled()) {
+ // Code that cannot be compiled lazily are internal and not debuggable.
+ DCHECK(candidate->allows_lazy_compilation());
if (!Compiler::Compile(candidate, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return false;
@@ -1364,6 +1366,22 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
UNREACHABLE();
}
+MaybeHandle<JSArray> Debug::GetPrivateFields(Handle<JSReceiver> receiver) {
+ Factory* factory = isolate_->factory();
+
+ Handle<FixedArray> internal_fields;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, internal_fields,
+ JSReceiver::GetPrivateEntries(isolate_, receiver),
+ JSArray);
+
+ int nof_internal_fields = internal_fields->length();
+ if (nof_internal_fields == 0) {
+ return factory->NewJSArray(0);
+ }
+
+ return factory->NewJSArrayWithElements(internal_fields);
+}
+
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
@@ -1501,6 +1519,8 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
if (CanBreakAtEntry(shared)) flags |= DebugInfo::kCanBreakAtEntry;
debug_info->set_flags(flags);
debug_info->set_break_points(*break_points);
+
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
}
Handle<DebugInfo> Debug::GetOrCreateDebugInfo(
@@ -1723,7 +1743,8 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- Object::SetProperty(isolate_, jspromise, key, key, LanguageMode::kStrict)
+ Object::SetProperty(isolate_, jspromise, key, key, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Assert();
// Check whether the promise reject is considered an uncaught exception.
uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 215ec1d8ac..fa50f6ecdd 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -7,27 +7,20 @@
#include <vector>
-#include "src/allocation.h"
-#include "src/base/atomicops.h"
-#include "src/base/hashmap.h"
-#include "src/base/platform/platform.h"
#include "src/debug/debug-interface.h"
#include "src/debug/interface-types.h"
-#include "src/execution.h"
-#include "src/flags.h"
#include "src/frames.h"
#include "src/globals.h"
-#include "src/heap/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
#include "src/objects/debug-objects.h"
-#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
-#include "src/string-stream.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
// Forward declarations.
+class AbstractCode;
class DebugScope;
class JSGeneratorObject;
@@ -269,6 +262,8 @@ class Debug {
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
+ MaybeHandle<JSArray> GetPrivateFields(Handle<JSReceiver> receiver);
+
bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
bool CanBreakAtEntry(Handle<SharedFunctionInfo> shared);
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 2d38120da5..4cc742b367 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -86,6 +86,30 @@ enum BreakLocationType {
kCommonBreakLocation
};
+enum class CoverageMode {
+ // Make use of existing information in feedback vectors on the heap.
+ // Only return a yes/no result. Optimization and GC are not affected.
+ // Collecting best effort coverage does not reset counters.
+ kBestEffort,
+ // Disable optimization and prevent feedback vectors from being garbage
+ // collected in order to preserve precise invocation counts. Collecting
+ // precise count coverage resets counters to get incremental updates.
+ kPreciseCount,
+ // We are only interested in a yes/no result for the function. Optimization
+ // and GC can be allowed once a function has been invoked. Collecting
+ // precise binary coverage resets counters for incremental updates.
+ kPreciseBinary,
+ // Similar to the precise coverage modes but provides coverage at a
+ // lower granularity. Design doc: goo.gl/lA2swZ.
+ kBlockCount,
+ kBlockBinary,
+};
+
+enum class TypeProfileMode {
+ kNone,
+ kCollect,
+};
+
class V8_EXPORT_PRIVATE BreakLocation : public Location {
public:
BreakLocation(int line_number, int column_number, BreakLocationType type)
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index e76202e96b..5183121de3 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -22,6 +22,7 @@
#include "src/parsing/parsing.h"
#include "src/source-position-table.h"
#include "src/v8.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -683,7 +684,7 @@ void MapLiterals(const FunctionLiteralChanges& changes,
DCHECK(literal->start_position() != kNoSourcePosition);
DCHECK(literal->end_position() != kNoSourcePosition);
std::pair<int, int> key =
- literal->function_literal_id() == FunctionLiteral::kIdTypeTopLevel
+ literal->function_literal_id() == kFunctionLiteralIdTopLevel
? kTopLevelMarker
: std::make_pair(literal->start_position(),
literal->end_position());
@@ -697,7 +698,7 @@ void MapLiterals(const FunctionLiteralChanges& changes,
FunctionLiteral* literal = change_pair.first;
const FunctionLiteralChange& change = change_pair.second;
std::pair<int, int> key =
- literal->function_literal_id() == FunctionLiteral::kIdTypeTopLevel
+ literal->function_literal_id() == kFunctionLiteralIdTopLevel
? kTopLevelMarker
: std::make_pair(change.new_start_position,
change.new_end_position);
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 0000445e90..6cdfba151f 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -35,16 +35,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
- Register decompr_scratch_for_debug =
- COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
-
- __ movp(rbp, rbx);
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rbp, rbx);
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ leave();
__ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
- decompr_scratch_for_debug);
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 2f34733b61..cf8e59c763 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -14,6 +14,7 @@
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
+#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/log.h"
#include "src/macro-assembler.h"
@@ -23,6 +24,7 @@
#include "src/register-configuration.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
+#include "src/v8threads.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -275,7 +277,7 @@ class ActivationsFinder : public ThreadVisitor {
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
DisallowHeapAllocation no_allocation;
- Isolate* isolate = context->GetHeap()->isolate();
+ Isolate* isolate = context->GetIsolate();
Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
@@ -2642,7 +2644,11 @@ int TranslatedValue::GetChildrenCount() const {
}
uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) {
+#if V8_TARGET_ARCH_32_BIT
+ return ReadUnalignedValue<uint64_t>(fp + slot_offset);
+#else
return Memory<uint64_t>(fp + slot_offset);
+#endif
}
uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 31268c7d4a..a1f7bc5649 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -841,6 +841,15 @@ class DeoptimizerData {
explicit DeoptimizerData(Heap* heap);
~DeoptimizerData();
+#ifdef DEBUG
+ bool IsDeoptEntryCode(Code code) const {
+ for (int i = 0; i < kLastDeoptimizeKind + 1; i++) {
+ if (code == deopt_entry_code_[i]) return true;
+ }
+ return false;
+ }
+#endif // DEBUG
+
private:
Heap* heap_;
static const int kLastDeoptimizeKind =
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index adba1897de..b5be53b9f4 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -5,7 +5,7 @@
#ifndef V8_DISASM_H_
#define V8_DISASM_H_
-#include "src/utils.h"
+#include "src/vector.h"
namespace disasm {
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 71e2b58530..78a10dab48 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -21,6 +21,7 @@
#include "src/snapshot/embedded-data.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
+#include "src/vector.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
@@ -242,9 +243,10 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
}
} else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) {
// Host is isolate-independent, try wasm native module instead.
- wasm::WasmCode* code = host.as_wasm_code()->native_module()->Lookup(
- relocinfo->wasm_stub_call_address());
- out->AddFormatted(" ;; wasm stub: %s", code->GetRuntimeStubName());
+ const char* runtime_stub_name =
+ host.as_wasm_code()->native_module()->GetRuntimeStubName(
+ relocinfo->wasm_stub_call_address());
+ out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index 9e98a15550..37a176557d 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -7,6 +7,8 @@
#include <iomanip>
#include <ostream>
+#include "src/code-desc.h"
+
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM) && \
!defined(V8_TARGET_ARCH_ARM64)
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index 1b4e647058..1f1fb8ea3d 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class CodeDesc;
+
class V8_EXPORT_PRIVATE EhFrameConstants final
: public NON_EXPORTED_BASE(AllStatic) {
public:
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 5f97a2f24d..d6569fab1e 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -8,6 +8,7 @@
#include "src/conversions.h"
#include "src/frames.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
@@ -290,9 +291,18 @@ static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
from_address += kDoubleSize * from_start;
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): we use CopyTagged() in order to avoid unaligned
+ // access to double values in the arrays. This will no longed be necessary
+ // once the allocations alignment issue is fixed.
+ int words_per_double = (kDoubleSize / kTaggedSize);
+ CopyTagged(to_address, from_address,
+ static_cast<size_t>(words_per_double * copy_size));
+#else
int words_per_double = (kDoubleSize / kSystemPointerSize);
CopyWords(to_address, from_address,
static_cast<size_t>(words_per_double * copy_size));
+#endif
}
static void CopySmiToDoubleElements(FixedArrayBase from_base,
@@ -458,10 +468,13 @@ static void SortIndices(
AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
[isolate](Tagged_t elementA, Tagged_t elementB) {
- // TODO(ishell): revisit the code below
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+#ifdef V8_COMPRESS_POINTERS
+ Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
+ Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
+#else
Object a(elementA);
Object b(elementB);
+#endif
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;
@@ -1166,7 +1179,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
isolate->factory()->Uint32ToString(i, use_cache);
list->set(insertion_index, *index_string);
} else {
- list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ list->set(insertion_index, Smi::FromInt(i));
}
insertion_index++;
}
@@ -1373,7 +1386,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Handle<JSObject> object,
uint32_t length) final {
return Subclass::CreateListFromArrayLikeImpl(isolate, object, length);
- };
+ }
static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
Handle<JSObject> object,
@@ -2006,7 +2019,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// has too few used values, normalize it.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() < kMinLengthForSparsenessCheck) return;
- if (Heap::InNewSpace(*backing_store)) return;
+ // TODO(ulan): Check if it works with young large objects.
+ if (ObjectInYoungGeneration(*backing_store)) return;
uint32_t length = 0;
if (obj->IsJSArray()) {
JSArray::cast(*obj)->length()->ToArrayLength(&length);
@@ -3752,7 +3766,7 @@ class SloppyArgumentsElementsAccessor
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
} else {
- list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ list->set(insertion_index, Smi::FromInt(i));
}
insertion_index++;
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index c244121d7b..69f9e1e2d7 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -12,6 +12,15 @@
#include "src/runtime-profiler.h"
#include "src/vm-state-inl.h"
+#define TRACE_INTERRUPT(...) \
+ do { \
+ if (FLAG_trace_interrupts) { \
+ if (any_interrupt_handled) PrintF(", "); \
+ PrintF(__VA_ARGS__); \
+ any_interrupt_handled = true; \
+ } \
+ } while (false)
+
namespace v8 {
namespace internal {
@@ -195,8 +204,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
if ((!params.is_construct || function->IsConstructor()) &&
function->shared()->IsApiFunction() &&
!function->shared()->BreakAtEntry()) {
- SaveContext save(isolate);
- isolate->set_context(function->context());
+ SaveAndSwitchContext save(isolate, function->context());
DCHECK(function->context()->global_object()->IsJSGlobalObject());
Handle<Object> receiver = params.is_construct
@@ -628,47 +636,28 @@ Object StackGuard::HandleInterrupts() {
}
if (CheckAndClearInterrupt(GC_REQUEST)) {
- if (FLAG_trace_interrupts) {
- PrintF("GC_REQUEST");
- any_interrupt_handled = true;
- }
+ TRACE_INTERRUPT("GC_REQUEST");
isolate_->heap()->HandleGCRequest();
}
if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
- if (FLAG_trace_interrupts) {
- if (any_interrupt_handled) PrintF(", ");
- PrintF("TERMINATE_EXECUTION");
- any_interrupt_handled = true;
- }
+ TRACE_INTERRUPT("TERMINATE_EXECUTION");
return isolate_->TerminateExecution();
}
if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
- if (FLAG_trace_interrupts) {
- if (any_interrupt_handled) PrintF(", ");
- PrintF("DEOPT_MARKED_ALLOCATION_SITES");
- any_interrupt_handled = true;
- }
+ TRACE_INTERRUPT("DEOPT_MARKED_ALLOCATION_SITES");
isolate_->heap()->DeoptMarkedAllocationSites();
}
if (CheckAndClearInterrupt(INSTALL_CODE)) {
- if (FLAG_trace_interrupts) {
- if (any_interrupt_handled) PrintF(", ");
- PrintF("INSTALL_CODE");
- any_interrupt_handled = true;
- }
+ TRACE_INTERRUPT("INSTALL_CODE");
DCHECK(isolate_->concurrent_recompilation_enabled());
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
}
if (CheckAndClearInterrupt(API_INTERRUPT)) {
- if (FLAG_trace_interrupts) {
- if (any_interrupt_handled) PrintF(", ");
- PrintF("API_INTERRUPT");
- any_interrupt_handled = true;
- }
+ TRACE_INTERRUPT("API_INTERRUPT");
// Callbacks must be invoked outside of ExecusionAccess lock.
isolate_->InvokeApiInterruptCallbacks();
}
@@ -689,3 +678,5 @@ Object StackGuard::HandleInterrupts() {
} // namespace internal
} // namespace v8
+
+#undef TRACE_INTERRUPT
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
index 78d4127758..d1295c3c62 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/external-reference.cc
@@ -13,7 +13,10 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/heap.h"
+// For IncrementalMarking::RecordWriteFromCode. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
@@ -27,7 +30,6 @@
#include "src/wasm/wasm-external-refs.h"
// Include native regexp-macro-assembler.
-#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
@@ -47,7 +49,6 @@
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
-#endif // V8_INTERPRETED_REGEXP
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
@@ -232,7 +233,7 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
}
FUNCTION_REFERENCE(incremental_marking_record_write_function,
- IncrementalMarking::RecordWriteFromCode);
+ IncrementalMarking::RecordWriteFromCode)
ExternalReference ExternalReference::store_buffer_overflow_function() {
return ExternalReference(
@@ -419,6 +420,11 @@ ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<Address>(&double_min_int_constant));
}
+ExternalReference
+ExternalReference::address_of_mock_arraybuffer_allocator_flag() {
+ return ExternalReference(&FLAG_mock_arraybuffer_allocator);
+}
+
ExternalReference ExternalReference::address_of_runtime_stats_flag() {
return ExternalReference(&FLAG_runtime_stats);
}
@@ -473,8 +479,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
return ExternalReference::Create(&thunk_fun, thunk_type);
}
-#ifndef V8_INTERPRETED_REGEXP
-
#if V8_TARGET_ARCH_X64
#define re_stack_check_func RegExpMacroAssemblerX64::CheckStackGuardState
#elif V8_TARGET_ARCH_IA32
@@ -531,8 +535,6 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
return ExternalReference(isolate->regexp_stack()->memory_size_address());
}
-#endif // V8_INTERPRETED_REGEXP
-
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
@@ -573,6 +575,8 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_tan_function, base::ieee754::tan,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_tanh_function, base::ieee754::tanh,
BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_pow_function, base::ieee754::pow,
+ BUILTIN_FP_FP_CALL)
void* libc_memchr(void* string, int character, size_t search_length) {
return memchr(string, character, search_length);
@@ -649,7 +653,7 @@ FUNCTION_REFERENCE(jsreceiver_create_identity_hash,
static uint32_t ComputeSeededIntegerHash(Isolate* isolate, uint32_t key) {
DisallowHeapAllocation no_gc;
- return ComputeSeededHash(key, isolate->heap()->HashSeed());
+ return ComputeSeededHash(key, HashSeed(isolate));
}
FUNCTION_REFERENCE(compute_integer_hash, ComputeSeededIntegerHash)
@@ -698,11 +702,6 @@ ExternalReference::search_string_raw<const uc16, const uint8_t>();
template ExternalReference
ExternalReference::search_string_raw<const uc16, const uc16>();
-ExternalReference ExternalReference::page_flags(Page* page) {
- return ExternalReference(reinterpret_cast<Address>(page) +
- MemoryChunk::kFlagsOffset);
-}
-
ExternalReference ExternalReference::FromRawAddress(Address address) {
return ExternalReference(address);
}
@@ -764,19 +763,8 @@ static Address InvalidatePrototypeChainsWrapper(Address raw_map) {
FUNCTION_REFERENCE(invalidate_prototype_chains_function,
InvalidatePrototypeChainsWrapper)
-double power_double_double(double x, double y) {
- // The checks for special cases can be dropped in ia32 because it has already
- // been done in generated code before bailing out here.
- if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- return std::numeric_limits<double>::quiet_NaN();
- }
- return Pow(x, y);
-}
-
double modulo_double_double(double x, double y) { return Modulo(x, y); }
-FUNCTION_REFERENCE_WITH_TYPE(power_double_double_function, power_double_double,
- BUILTIN_FP_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(mod_two_doubles_operation, modulo_double_double,
BUILTIN_FP_FP_CALL)
@@ -802,11 +790,6 @@ ExternalReference ExternalReference::fast_c_call_caller_pc_address(
isolate->isolate_data()->fast_c_call_caller_pc_address());
}
-ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
- return ExternalReference(reinterpret_cast<void*>(
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
-}
-
FUNCTION_REFERENCE(call_enqueue_microtask_function,
MicrotaskQueue::CallEnqueueMicrotask)
@@ -923,7 +906,7 @@ static int EnterMicrotaskContextWrapper(HandleScopeImplementer* hsi,
return 0;
}
-FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper);
+FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper)
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
index 5f8d045cf3..a3b2655457 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/external-reference.h
@@ -72,7 +72,16 @@ class StatsCounter;
"IsolateData::fast_c_call_caller_fp_address") \
V(fast_c_call_caller_pc_address, \
"IsolateData::fast_c_call_caller_pc_address") \
- EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V)
+ V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \
+ V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \
+ V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \
+ V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
+ V(re_case_insensitive_compare_uc16, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
+ V(re_check_stack_guard_state, \
+ "RegExpMacroAssembler*::CheckStackGuardState()") \
+ V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
+ V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
#define EXTERNAL_REFERENCE_LIST(V) \
V(abort_with_reason, "abort_with_reason") \
@@ -83,6 +92,8 @@ class StatsCounter;
V(address_of_harmony_await_optimization_flag, \
"FLAG_harmony_await_optimization") \
V(address_of_min_int, "LDoubleConstant::min_int") \
+ V(address_of_mock_arraybuffer_allocator_flag, \
+ "FLAG_mock_arraybuffer_allocator") \
V(address_of_one_half, "LDoubleConstant::one_half") \
V(address_of_runtime_stats_flag, "FLAG_runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
@@ -101,7 +112,6 @@ class StatsCounter;
V(f64_acos_wrapper_function, "f64_acos_wrapper") \
V(f64_asin_wrapper_function, "f64_asin_wrapper") \
V(f64_mod_wrapper_function, "f64_mod_wrapper") \
- V(fixed_typed_array_base_data_offset, "fixed_typed_array_base_data_offset") \
V(get_date_field_function, "JSDate::GetField") \
V(get_or_create_hash_raw, "get_or_create_hash_raw") \
V(ieee754_acos_function, "base::ieee754::acos") \
@@ -120,6 +130,7 @@ class StatsCounter;
V(ieee754_log10_function, "base::ieee754::log10") \
V(ieee754_log1p_function, "base::ieee754::log1p") \
V(ieee754_log2_function, "base::ieee754::log2") \
+ V(ieee754_pow_function, "base::ieee754::pow") \
V(ieee754_sin_function, "base::ieee754::sin") \
V(ieee754_sinh_function, "base::ieee754::sinh") \
V(ieee754_tan_function, "base::ieee754::tan") \
@@ -142,7 +153,6 @@ class StatsCounter;
V(mod_two_doubles_operation, "mod_two_doubles") \
V(new_deoptimizer_function, "Deoptimizer::New()") \
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
- V(power_double_double_function, "power_double_double_function") \
V(printf_function, "printf") \
V(refill_math_random, "MathRandom::RefillCache") \
V(search_string_raw_one_one, "search_string_raw_one_one") \
@@ -197,22 +207,6 @@ class StatsCounter;
"atomic_pair_compare_exchange_function") \
EXTERNAL_REFERENCE_LIST_INTL(V)
-#ifndef V8_INTERPRETED_REGEXP
-#define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V) \
- V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \
- V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \
- V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \
- V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
- V(re_case_insensitive_compare_uc16, \
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
- V(re_check_stack_guard_state, \
- "RegExpMacroAssembler*::CheckStackGuardState()") \
- V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
- V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
-#else
-#define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V)
-#endif // V8_INTERPRETED_REGEXP
-
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
@@ -292,8 +286,6 @@ class ExternalReference {
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw();
- static ExternalReference page_flags(Page* page);
-
static ExternalReference FromRawAddress(Address address);
#define DECL_EXTERNAL_REFERENCE(name, desc) static ExternalReference name();
@@ -332,9 +324,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
void abort_with_reason(int reason);
-// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_double_double(double x, double y);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index bdfc8ee013..afdebc07a2 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -6,10 +6,11 @@
#define V8_FEEDBACK_VECTOR_INL_H_
#include "src/feedback-vector.h"
+
#include "src/globals.h"
#include "src/heap/factory-inl.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/code-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
@@ -32,19 +33,19 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const {
return base::Acquire_Load(reinterpret_cast<const base::Atomic32*>(
- FIELD_ADDR(this, kSlotCountOffset)));
+ FIELD_ADDR(*this, kSlotCountOffset)));
}
int32_t FeedbackMetadata::get(int index) const {
DCHECK(index >= 0 && index < length());
int offset = kHeaderSize + index * kInt32Size;
- return READ_INT32_FIELD(this, offset);
+ return READ_INT32_FIELD(*this, offset);
}
void FeedbackMetadata::set(int index, int32_t value) {
DCHECK(index >= 0 && index < length());
int offset = kHeaderSize + index * kInt32Size;
- WRITE_INT32_FIELD(this, offset, value);
+ WRITE_INT32_FIELD(*this, offset, value);
}
bool FeedbackMetadata::is_empty() const { return slot_count() == 0; }
@@ -70,6 +71,7 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
@@ -266,6 +268,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 2ac34a5e4d..c26bedc600 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -4,6 +4,7 @@
#include "src/feedback-vector.h"
#include "src/feedback-vector-inl.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/objects.h"
#include "src/objects/data-handler-inl.h"
@@ -142,6 +143,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "LoadGlobalNotInsideTypeof";
case FeedbackSlotKind::kLoadKeyed:
return "LoadKeyed";
+ case FeedbackSlotKind::kHasKeyed:
+ return "HasKeyed";
case FeedbackSlotKind::kStoreNamedSloppy:
return "StoreNamedSloppy";
case FeedbackSlotKind::kStoreNamedStrict:
@@ -263,6 +266,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
@@ -443,6 +447,7 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
@@ -483,6 +488,7 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -544,14 +550,14 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
return changed;
}
-Map FeedbackNexus::FindFirstMap() const {
+Map FeedbackNexus::GetFirstMap() const {
MapHandles maps;
ExtractMaps(&maps);
if (maps.size() > 0) return *maps.at(0);
return Map();
}
-InlineCacheState FeedbackNexus::StateFromFeedback() const {
+InlineCacheState FeedbackNexus::ic_state() const {
Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
@@ -569,6 +575,13 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
if (feedback->IsSmi()) return MONOMORPHIC;
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::PremonomorphicSentinel(isolate))) {
+ DCHECK(kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
+ kind() == FeedbackSlotKind::kStoreGlobalStrict);
+ return PREMONOMORPHIC;
+ }
+
DCHECK(feedback->IsWeakOrCleared());
MaybeObject extra = GetFeedbackExtra();
if (!feedback->IsCleared() ||
@@ -586,7 +599,8 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
- case FeedbackSlotKind::kLoadKeyed: {
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed: {
if (feedback == MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
return UNINITIALIZED;
@@ -611,7 +625,8 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
return POLYMORPHIC;
}
if (heap_object->IsName()) {
- DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsKeyedHasICKind(kind()));
Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
WeakFixedArray extra_array = WeakFixedArray::cast(extra);
return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
@@ -727,18 +742,21 @@ void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
}
bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
- int context_slot_index) {
+ int context_slot_index,
+ bool immutable) {
DCHECK(IsGlobalICKind(kind()));
DCHECK_LE(0, script_context_index);
DCHECK_LE(0, context_slot_index);
if (!ContextIndexBits::is_valid(script_context_index) ||
- !SlotIndexBits::is_valid(context_slot_index)) {
+ !SlotIndexBits::is_valid(context_slot_index) ||
+ !ImmutabilityBit::is_valid(immutable)) {
return false;
}
int config = ContextIndexBits::encode(script_context_index) |
- SlotIndexBits::encode(context_slot_index);
+ SlotIndexBits::encode(context_slot_index) |
+ ImmutabilityBit::encode(immutable);
- SetFeedback(Smi::FromInt(config));
+ SetFeedback(Smi::From31BitPattern(config));
Isolate* isolate = GetIsolate();
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
@@ -784,8 +802,8 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
}
break;
case POLYMORPHIC: {
- static constexpr int kMaxElements =
- IC::kMaxPolymorphicMapCount * kCloneObjectPolymorphicEntrySize;
+ const int kMaxElements =
+ FLAG_max_polymorphic_map_count * kCloneObjectPolymorphicEntrySize;
Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(feedback);
int i = 0;
for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) {
@@ -915,7 +933,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsStoreInArrayLiteralICKind(kind()));
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
@@ -963,7 +981,8 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
- IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsKeyedHasICKind(kind()));
MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
@@ -1009,7 +1028,7 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsStoreInArrayLiteralICKind(kind()));
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
@@ -1050,8 +1069,9 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
return count == length;
}
-Name FeedbackNexus::FindFirstName() const {
- if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
+Name FeedbackNexus::GetName() const {
+ if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
+ IsKeyedHasICKind(kind())) {
MaybeObject feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback->GetHeapObjectAssumeStrong());
@@ -1185,7 +1205,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
- IsStoreInArrayLiteralICKind(kind()));
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
MaybeObject feedback = GetFeedback();
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 69fd86436f..f19aea9906 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -40,6 +40,7 @@ enum class FeedbackSlotKind {
kLoadGlobalNotInsideTypeof,
kLoadGlobalInsideTypeof,
kLoadKeyed,
+ kHasKeyed,
kStoreGlobalStrict,
kStoreNamedStrict,
kStoreOwnNamed,
@@ -75,6 +76,10 @@ inline bool IsKeyedLoadICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kLoadKeyed;
}
+inline bool IsKeyedHasICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kHasKeyed;
+}
+
inline bool IsStoreGlobalICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kStoreGlobalSloppy ||
kind == FeedbackSlotKind::kStoreGlobalStrict;
@@ -348,6 +353,10 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
return AddSlot(FeedbackSlotKind::kLoadKeyed);
}
+ FeedbackSlot AddKeyedHasICSlot() {
+ return AddSlot(FeedbackSlotKind::kHasKeyed);
+ }
+
FeedbackSlotKind GetStoreICSlot(LanguageMode language_mode) {
STATIC_ASSERT(LanguageModeSize == 2);
return is_strict(language_mode) ? FeedbackSlotKind::kStoreNamedStrict
@@ -596,23 +605,22 @@ class FeedbackNexus final {
return vector()->GetLanguageMode(slot());
}
- InlineCacheState ic_state() const { return StateFromFeedback(); }
- bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
- bool IsMegamorphic() const { return StateFromFeedback() == MEGAMORPHIC; }
- bool IsGeneric() const { return StateFromFeedback() == GENERIC; }
+ InlineCacheState ic_state() const;
+ bool IsUninitialized() const { return ic_state() == UNINITIALIZED; }
+ bool IsMegamorphic() const { return ic_state() == MEGAMORPHIC; }
+ bool IsGeneric() const { return ic_state() == GENERIC; }
void Print(std::ostream& os); // NOLINT
// For map-based ICs (load, keyed-load, store, keyed-store).
- Map FindFirstMap() const;
+ Map GetFirstMap() const;
- InlineCacheState StateFromFeedback() const;
int ExtractMaps(MapHandles* maps) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
bool FindHandlers(MaybeObjectHandles* code_list, int length = -1) const;
bool IsCleared() const {
- InlineCacheState state = StateFromFeedback();
+ InlineCacheState state = ic_state();
return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
@@ -648,7 +656,7 @@ class FeedbackNexus final {
// For KeyedLoad and KeyedStore ICs.
IcCheckType GetKeyType() const;
- Name FindFirstName() const;
+ Name GetName() const;
// For Call ICs.
int GetCallCount();
@@ -671,8 +679,8 @@ class FeedbackNexus final {
// For Global Load and Store ICs.
void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
// Returns false if given combination of indices is not allowed.
- bool ConfigureLexicalVarMode(int script_context_index,
- int context_slot_index);
+ bool ConfigureLexicalVarMode(int script_context_index, int context_slot_index,
+ bool immutable);
void ConfigureHandlerMode(const MaybeObjectHandle& handler);
// For CloneObject ICs
@@ -682,7 +690,8 @@ class FeedbackNexus final {
// Bit positions in a smi that encodes lexical environment variable access.
#define LEXICAL_MODE_BIT_FIELDS(V, _) \
V(ContextIndexBits, unsigned, 12, _) \
- V(SlotIndexBits, unsigned, 19, _)
+ V(SlotIndexBits, unsigned, 18, _) \
+ V(ImmutabilityBit, bool, 1, _)
DEFINE_BIT_FIELDS(LEXICAL_MODE_BIT_FIELDS)
#undef LEXICAL_MODE_BIT_FIELDS
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index d54e43121e..42828db923 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -7,21 +7,20 @@
#include "src/field-index.h"
#include "src/objects-inl.h"
-#include "src/objects/descriptor-array.h"
+#include "src/objects/descriptor-array-inl.h"
namespace v8 {
namespace internal {
-inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
+FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
DCHECK_IMPLIES(encoding == kWord32, IsAligned(offset, kInt32Size));
DCHECK_IMPLIES(encoding == kTagged, IsAligned(offset, kTaggedSize));
DCHECK_IMPLIES(encoding == kDouble, IsAligned(offset, kDoubleSize));
return FieldIndex(true, offset, encoding, 0, 0);
}
-inline FieldIndex FieldIndex::ForPropertyIndex(const Map map,
- int property_index,
- Representation representation) {
+FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index,
+ Representation representation) {
DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
int inobject_properties = map->GetInObjectProperties();
bool is_inobject = property_index < inobject_properties;
@@ -43,7 +42,7 @@ inline FieldIndex FieldIndex::ForPropertyIndex(const Map map,
// Returns the index format accepted by the HLoadFieldByIndex instruction.
// (In-object: zero-based from (object start + JSObject::kHeaderSize),
// out-of-object: zero-based from FixedArray::kHeaderSize.)
-inline int FieldIndex::GetLoadByFieldIndex() const {
+int FieldIndex::GetLoadByFieldIndex() const {
// For efficiency, the LoadByFieldIndex instruction takes an index that is
// optimized for quick access. If the property is inline, the index is
// positive. If it's out-of-line, the encoded index is -raw_index - 1 to
@@ -57,12 +56,11 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
result -= FixedArray::kHeaderSize / kTaggedSize;
result = -result - 1;
}
- result <<= 1;
+ result = static_cast<uint32_t>(result) << 1;
return is_double() ? (result | 1) : result;
}
-inline FieldIndex FieldIndex::ForDescriptor(const Map map,
- int descriptor_index) {
+FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
int field_index = details.field_index();
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index f2a117e94b..2b5f82203e 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -23,13 +23,13 @@ class FieldIndex final {
FieldIndex() : bit_field_(0) {}
- static FieldIndex ForPropertyIndex(
+ static inline FieldIndex ForPropertyIndex(
const Map map, int index,
Representation representation = Representation::Tagged());
- static FieldIndex ForInObjectOffset(int offset, Encoding encoding);
- static FieldIndex ForDescriptor(const Map map, int descriptor_index);
+ static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
+ static inline FieldIndex ForDescriptor(const Map map, int descriptor_index);
- int GetLoadByFieldIndex() const;
+ inline int GetLoadByFieldIndex() const;
bool is_inobject() const {
return IsInObjectBits::decode(bit_field_);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index d262fb7012..c7dc7520eb 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -204,22 +204,21 @@ DEFINE_IMPLICATION(harmony_private_methods, harmony_private_fields)
V(harmony_weak_refs, "harmony weak references") \
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_locale, "Intl.Locale")
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
+ V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_private_fields, "harmony private fields in class literals") \
- V(harmony_numeric_separator, "harmony numeric separator between digits") \
- V(harmony_hashbang, "harmony hashbang syntax")
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_numeric_separator, "harmony numeric separator between digits")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -232,21 +231,20 @@ DEFINE_IMPLICATION(harmony_private_methods, harmony_private_fields)
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_import_meta, "harmony import.meta property") \
V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}") \
- V(harmony_symbol_description, "harmony Symbol.prototype.description") \
V(harmony_global, "harmony global") \
V(harmony_json_stringify, "well-formed JSON.stringify") \
V(harmony_public_fields, "harmony public instance fields in class literals") \
V(harmony_static_fields, "harmony static fields in class literals") \
V(harmony_string_matchall, "harmony String.prototype.matchAll") \
V(harmony_object_from_entries, "harmony Object.fromEntries()") \
- V(harmony_await_optimization, "harmony await taking 1 tick")
+ V(harmony_await_optimization, "harmony await taking 1 tick") \
+ V(harmony_private_fields, "harmony private fields in class literals") \
+ V(harmony_hashbang, "harmony hashbang syntax")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_list_format, "Intl.ListFormat") \
- V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_locale, "Intl.Locale")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -294,7 +292,6 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"not-too-far future")
DEFINE_IMPLICATION(future, write_protect_code_memory)
-DEFINE_IMPLICATION(future, flush_bytecode)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
@@ -324,6 +321,10 @@ DEFINE_BOOL(feedback_normalization, false,
DEFINE_BOOL_READONLY(internalize_on_the_fly, true,
"internalize string keys for generic keyed ICs on the fly")
+// Flag to faster calls with arguments mismatches (https://crbug.com/v8/8895)
+DEFINE_BOOL(fast_calls_with_arguments_mismatches, true,
+ "skip arguments adaptor frames when it's provably safe")
+
// Flag for one shot optimiztions.
DEFINE_BOOL(enable_one_shot_optimization, true,
"Enable size optimizations for the code that will "
@@ -357,6 +358,9 @@ DEFINE_BOOL(ignition_share_named_property_feedback, true,
"the same object")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
+DEFINE_BOOL(enable_lazy_source_positions, V8_LITE_BOOL,
+ "skip generating source positions during initial compile but "
+ "regenerate when actually required")
DEFINE_STRING(print_bytecode_filter, "*",
"filter for selecting which functions to print bytecode")
#ifdef V8_TRACE_IGNITION
@@ -410,6 +414,11 @@ DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
+DEFINE_BOOL(turbo_control_flow_aware_allocation, false,
+ "consider control flow while allocating registers")
+DEFINE_NEG_IMPLICATION(turbo_control_flow_aware_allocation,
+ turbo_preprocess_ranges)
+
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_STRING(trace_turbo_path, nullptr,
@@ -447,7 +456,7 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in "
"given stub. The format is: StubName,NodeId")
-DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL,
+DEFINE_BOOL_READONLY(fixed_array_bounds_checks, true,
"enable FixedArray bounds checks")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
@@ -766,6 +775,8 @@ DEFINE_BOOL(verify_heap_skip_remembered_set, false,
#endif
DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
+DEFINE_BOOL(memory_reducer_for_small_heaps, true,
+ "use memory reducer for small heaps")
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
@@ -773,7 +784,7 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
-DEFINE_BOOL(flush_bytecode, V8_LITE_BOOL,
+DEFINE_BOOL(flush_bytecode, true,
"flush of bytecode when it has not been executed recently")
DEFINE_BOOL(stress_flush_bytecode, false, "stress bytecode flushing")
DEFINE_IMPLICATION(stress_flush_bytecode, flush_bytecode)
@@ -814,7 +825,7 @@ DEFINE_BOOL(fast_promotion_new_space, false,
DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
-DEFINE_BOOL(young_generation_large_objects, false,
+DEFINE_BOOL(young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
"object space")
@@ -848,6 +859,10 @@ DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
DEFINE_BOOL(partial_constant_pool, true,
"enable use of partial constant pools (X64 only)")
+// Controlling source positions for Torque/CSA code.
+DEFINE_BOOL(enable_source_at_csa_bind, false,
+ "Include source information in the binary at CSA bind locations.")
+
// Deprecated ARM flags (replaced by arm_arch).
DEFINE_MAYBE_BOOL(enable_armv7, "deprecated (use --arm_arch instead)")
DEFINE_MAYBE_BOOL(enable_vfp3, "deprecated (use --arm_arch instead)")
@@ -898,6 +913,8 @@ DEFINE_BOOL(trace, false, "trace function calls")
// codegen.cc
DEFINE_BOOL(lazy, true, "use lazy compilation")
+DEFINE_BOOL(max_lazy, false, "ignore eager compilation hints")
+DEFINE_IMPLICATION(max_lazy, lazy)
DEFINE_BOOL(trace_opt, false, "trace lazy optimization")
DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
@@ -989,11 +1006,13 @@ DEFINE_BOOL(trace_ic, false,
DEFINE_IMPLICATION(trace_ic, log_code)
DEFINE_INT(ic_stats, 0, "inline cache state transitions statistics")
DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
-DEFINE_BOOL_READONLY(track_constant_fields, false,
+DEFINE_BOOL_READONLY(track_constant_fields, true,
"enable constant field tracking")
-DEFINE_BOOL_READONLY(modify_map_inplace, false, "enable in-place map updates")
+DEFINE_BOOL_READONLY(modify_map_inplace, true, "enable in-place map updates")
DEFINE_BOOL_READONLY(fast_map_update, false,
"enable fast map update by caching the migration target")
+DEFINE_INT(max_polymorphic_map_count, 4,
+ "maximum number of maps to track in POLYMORPHIC state")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -1011,6 +1030,7 @@ DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_BOOL(parse_only, false, "only parse the sources")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -1045,6 +1065,7 @@ DEFINE_BOOL(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_BOOL(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
+// TODO(jgruber,machenbach): Rename to --correctness-fuzzer-suppressions.
DEFINE_BOOL(abort_on_stack_or_string_length_overflow, false,
"Abort program when the stack overflows or a string exceeds "
"maximum length (as opposed to throwing RangeError). This is "
@@ -1067,6 +1088,9 @@ DEFINE_INT(fuzzer_random_seed, 0,
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
+DEFINE_BOOL(
+ detailed_error_stack_trace, false,
+ "includes arguments for each function call in the error stack frames array")
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1092,6 +1116,7 @@ DEFINE_UINT(serialization_chunk_size, 4096,
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
+DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
@@ -1158,6 +1183,11 @@ DEFINE_BOOL(jitless, V8_LITE_BOOL,
// Optimizations (i.e. jitting) are disabled.
DEFINE_NEG_IMPLICATION(jitless, opt)
#endif
+// Field representation tracking is only used by TurboFan.
+DEFINE_NEG_IMPLICATION(jitless, track_field_types)
+DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
+// Regexps are interpreted.
+DEFINE_IMPLICATION(jitless, regexp_interpret_all)
// asm.js validation is disabled since it triggers wasm code generation.
DEFINE_NEG_IMPLICATION(jitless, validate_asm)
// Wasm is put into interpreter-only mode. We repeat flag implications down
@@ -1165,6 +1195,7 @@ DEFINE_NEG_IMPLICATION(jitless, validate_asm)
DEFINE_IMPLICATION(jitless, wasm_interpret_all)
DEFINE_NEG_IMPLICATION(jitless, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(jitless, wasm_lazy_compilation)
+// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
// Enable recompilation of function with optimized code.
DEFINE_BOOL(opt, !V8_LITE_BOOL, "use adaptive optimizations")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 33938a6347..5e6b35da42 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -610,6 +610,12 @@ void ComputeFlagListHash() {
}
for (size_t i = 0; i < num_flags; ++i) {
Flag* current = &flags[i];
+ if (current->type() == Flag::TYPE_BOOL &&
+ current->bool_variable() == &FLAG_profile_deserialization) {
+ // We want to be able to flip --profile-deserialization without
+ // causing the code cache to get invalidated by this hash.
+ continue;
+ }
if (!current->IsDefault()) {
modified_args_as_string << i;
modified_args_as_string << *current;
diff --git a/deps/v8/src/flush-instruction-cache.cc b/deps/v8/src/flush-instruction-cache.cc
new file mode 100644
index 0000000000..54f3f6c6ff
--- /dev/null
+++ b/deps/v8/src/flush-instruction-cache.cc
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/flush-instruction-cache.h"
+
+#include "src/base/platform/mutex.h"
+#include "src/cpu-features.h"
+#include "src/simulator.h"
+
+namespace v8 {
+namespace internal {
+
+void FlushInstructionCache(void* start, size_t size) {
+ if (size == 0) return;
+ if (FLAG_jitless) return;
+
+#if defined(USE_SIMULATOR)
+ base::MutexGuard lock_guard(Simulator::i_cache_mutex());
+ Simulator::FlushICache(Simulator::i_cache(), start, size);
+#else
+ CpuFeatures::FlushICache(start, size);
+#endif // USE_SIMULATOR
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/flush-instruction-cache.h b/deps/v8/src/flush-instruction-cache.h
new file mode 100644
index 0000000000..48adc5a95a
--- /dev/null
+++ b/deps/v8/src/flush-instruction-cache.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FLUSH_INSTRUCTION_CACHE_H_
+#define V8_FLUSH_INSTRUCTION_CACHE_H_
+
+#include "include/v8-internal.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+V8_EXPORT_PRIVATE void FlushInstructionCache(void* start, size_t size);
+V8_EXPORT_PRIVATE V8_INLINE void FlushInstructionCache(Address start,
+ size_t size) {
+ return FlushInstructionCache(reinterpret_cast<void*>(start), size);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FLUSH_INSTRUCTION_CACHE_H_
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 7af1ef1e98..aa8df4f2b9 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -199,7 +199,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
// an InterpretedFrame, so we do these fast checks first
if (StackFrame::IsTypeMarker(marker) || maybe_function->IsSmi()) {
return false;
- } else if (!isolate->heap()->code_space()->ContainsSlow(pc)) {
+ } else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
return false;
}
interpreter_entry_trampoline =
@@ -852,6 +852,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
uint32_t stack_slots;
Code code;
bool has_tagged_params = false;
+ uint32_t tagged_parameter_slots = 0;
if (wasm_code != nullptr) {
SafepointTable table(wasm_code->instruction_start(),
wasm_code->safepoint_table_offset(),
@@ -859,6 +860,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction;
+ tagged_parameter_slots = wasm_code->tagged_parameter_slots();
} else {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
@@ -970,6 +972,19 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
}
}
+ // Visit tagged parameters that have been passed to the function of this
+ // frame. Conceptionally these parameters belong to the parent frame. However,
+ // the exact count is only known by this frame (in the presence of tail calls,
+ // this information cannot be derived from the call site).
+ if (tagged_parameter_slots > 0) {
+ FullObjectSlot tagged_parameter_base(&Memory<Address>(caller_sp()));
+ FullObjectSlot tagged_parameter_limit =
+ tagged_parameter_base + tagged_parameter_slots;
+
+ v->VisitRootPointers(Root::kTop, nullptr, tagged_parameter_base,
+ tagged_parameter_limit);
+ }
+
// For the off-heap code cases, we can skip this.
if (!code.is_null()) {
// Visit the return address in the callee and incoming arguments.
@@ -994,11 +1009,6 @@ Address StubFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-
-int StubFrame::GetNumberOfIncomingArguments() const {
- return 0;
-}
-
int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
Code code = LookupCode();
DCHECK(code->is_turbofanned());
@@ -1033,19 +1043,13 @@ bool JavaScriptFrame::HasInlinedFrames() const {
Code JavaScriptFrame::unchecked_code() const { return function()->code(); }
-int JavaScriptFrame::GetNumberOfIncomingArguments() const {
- DCHECK(can_access_heap_objects() &&
- isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function()->shared()->internal_formal_parameter_count();
-}
-
-int OptimizedFrame::GetNumberOfIncomingArguments() const {
+int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
if (code->kind() == Code::BUILTIN) {
return static_cast<int>(
Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
} else {
- return JavaScriptFrame::GetNumberOfIncomingArguments();
+ return JavaScriptFrame::ComputeParametersCount();
}
}
@@ -1075,9 +1079,10 @@ void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
Code code = LookupCode();
int offset = static_cast<int>(pc() - code->InstructionStart());
AbstractCode abstract_code = AbstractCode::cast(code);
- FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
- function(), abstract_code,
- offset, IsConstructor());
+ Handle<FixedArray> params = GetParameters();
+ FrameSummary::JavaScriptFrameSummary summary(
+ isolate(), receiver(), function(), abstract_code, offset, IsConstructor(),
+ *params);
functions->push_back(summary);
}
@@ -1109,7 +1114,7 @@ Script JavaScriptFrame::script() const {
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
- DCHECK_EQ(0, LookupCode()->handler_table_offset());
+ DCHECK(!LookupCode()->has_handler_table());
DCHECK(!LookupCode()->is_optimized_code());
return -1;
}
@@ -1201,38 +1206,28 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
}
}
-void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
- // constructor calls
- DisallowHeapAllocation no_allocation;
- JavaScriptFrameIterator it(isolate);
- ICInfo& ic_info = ICStats::instance()->Current();
- while (!it.done()) {
- if (it.frame()->is_java_script()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->IsConstructor()) ic_info.is_constructor = true;
- JSFunction function = frame->function();
- int code_offset = 0;
- if (frame->is_interpreted()) {
- InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
- code_offset = iframe->GetBytecodeOffset();
- } else {
- Code code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
- }
- CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
- code_offset);
- return;
- }
- it.Advance();
- }
-}
-
Object JavaScriptFrame::GetParameter(int index) const {
return Object(Memory<Address>(GetParameterSlot(index)));
}
int JavaScriptFrame::ComputeParametersCount() const {
- return GetNumberOfIncomingArguments();
+ DCHECK(can_access_heap_objects() &&
+ isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
+ return function()->shared()->internal_formal_parameter_count();
+}
+
+Handle<FixedArray> JavaScriptFrame::GetParameters() const {
+ if (V8_LIKELY(!FLAG_detailed_error_stack_trace)) {
+ return isolate()->factory()->empty_fixed_array();
+ }
+ int param_count = ComputeParametersCount();
+ Handle<FixedArray> parameters =
+ isolate()->factory()->NewFixedArray(param_count);
+ for (int i = 0; i < param_count; i++) {
+ parameters->set(i, GetParameter(i));
+ }
+
+ return parameters;
}
int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
@@ -1271,15 +1266,22 @@ void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
Isolate* isolate, Object receiver, JSFunction function,
- AbstractCode abstract_code, int code_offset, bool is_constructor)
+ AbstractCode abstract_code, int code_offset, bool is_constructor,
+ FixedArray parameters)
: FrameSummaryBase(isolate, FrameSummary::JAVA_SCRIPT),
receiver_(receiver, isolate),
function_(function, isolate),
abstract_code_(abstract_code, isolate),
code_offset_(code_offset),
- is_constructor_(is_constructor) {
+ is_constructor_(is_constructor),
+ parameters_(parameters, isolate) {
DCHECK(abstract_code->IsBytecodeArray() ||
Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION);
+ // TODO(v8:8510): Move this to the SourcePosition getter.
+ if (FLAG_enable_lazy_source_positions && abstract_code->IsBytecodeArray()) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate, handle(function->shared(), isolate));
+ }
}
bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
@@ -1523,9 +1525,10 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
}
// Append full summary of the encountered JS frame.
- FrameSummary::JavaScriptFrameSummary summary(isolate(), *receiver,
- *function, *abstract_code,
- code_offset, is_constructor);
+ Handle<FixedArray> params = GetParameters();
+ FrameSummary::JavaScriptFrameSummary summary(
+ isolate(), *receiver, *function, *abstract_code, code_offset,
+ is_constructor, *params);
frames->push_back(summary);
is_constructor = false;
} else if (it->kind() == TranslatedFrame::kConstructStub) {
@@ -1736,13 +1739,14 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
AbstractCode abstract_code = AbstractCode::cast(GetBytecodeArray());
+ Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
- IsConstructor());
+ IsConstructor(), *params);
functions->push_back(summary);
}
-int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
+int ArgumentsAdaptorFrame::ComputeParametersCount() const {
return Smi::ToInt(GetExpression(0));
}
@@ -1751,7 +1755,7 @@ Code ArgumentsAdaptorFrame::unchecked_code() const {
Builtins::kArgumentsAdaptorTrampoline);
}
-int BuiltinFrame::GetNumberOfIncomingArguments() const {
+int BuiltinFrame::ComputeParametersCount() const {
return Smi::ToInt(GetExpression(0));
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 6672d7b3bc..35f610b7c0 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -446,6 +446,7 @@ class BuiltinExitFrame : public ExitFrame {
inline Object new_target_slot_object() const;
friend class StackFrameIteratorBase;
+ friend class FrameArrayBuilder;
};
class StandardFrame;
@@ -480,13 +481,15 @@ class FrameSummary {
public:
JavaScriptFrameSummary(Isolate* isolate, Object receiver,
JSFunction function, AbstractCode abstract_code,
- int code_offset, bool is_constructor);
+ int code_offset, bool is_constructor,
+ FixedArray parameters);
Handle<Object> receiver() const { return receiver_; }
Handle<JSFunction> function() const { return function_; }
Handle<AbstractCode> abstract_code() const { return abstract_code_; }
int code_offset() const { return code_offset_; }
bool is_constructor() const { return is_constructor_; }
+ Handle<FixedArray> parameters() const { return parameters_; }
bool is_subject_to_debugging() const;
int SourcePosition() const;
int SourceStatementPosition() const;
@@ -500,6 +503,7 @@ class FrameSummary {
Handle<AbstractCode> abstract_code_;
int code_offset_;
bool is_constructor_;
+ Handle<FixedArray> parameters_;
};
class WasmFrameSummary : public FrameSummaryBase {
@@ -555,7 +559,6 @@ class FrameSummary {
int byte_offset_;
};
-#undef FRAME_SUMMARY_FIELD
#define FRAME_SUMMARY_CONS(kind, type, field, desc) \
FrameSummary(type summ) : field(summ) {} // NOLINT
FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CONS)
@@ -600,6 +603,7 @@ class FrameSummary {
FrameSummaryBase base_;
FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_FIELD)
};
+#undef FRAME_SUMMARY_FIELD
};
class StandardFrame : public StackFrame {
@@ -694,6 +698,7 @@ class JavaScriptFrame : public StandardFrame {
inline Address GetParameterSlot(int index) const;
Object GetParameter(int index) const override;
int ComputeParametersCount() const override;
+ Handle<FixedArray> GetParameters() const;
// Debugger access.
void SetParameterValue(int index, Object value) const;
@@ -752,15 +757,12 @@ class JavaScriptFrame : public StandardFrame {
static void CollectFunctionAndOffsetForICStats(JSFunction function,
AbstractCode code,
int code_offset);
- static void CollectTopFrameForICStats(Isolate* isolate);
protected:
inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
Address GetCallerStackPointer() const override;
- virtual int GetNumberOfIncomingArguments() const;
-
virtual void PrintFrameKind(StringStream* accumulator) const {}
private:
@@ -791,8 +793,6 @@ class StubFrame : public StandardFrame {
Address GetCallerStackPointer() const override;
- virtual int GetNumberOfIncomingArguments() const;
-
friend class StackFrameIteratorBase;
};
@@ -818,13 +818,13 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationData GetDeoptimizationData(int* deopt_index) const;
Object receiver() const override;
+ int ComputeParametersCount() const override;
static int StackSlotOffsetRelativeToFp(int slot_index);
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
- int GetNumberOfIncomingArguments() const override;
private:
friend class StackFrameIteratorBase;
@@ -897,6 +897,8 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
return static_cast<ArgumentsAdaptorFrame*>(frame);
}
+ int ComputeParametersCount() const override;
+
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
@@ -904,7 +906,6 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
- int GetNumberOfIncomingArguments() const override;
private:
friend class StackFrameIteratorBase;
@@ -920,11 +921,11 @@ class BuiltinFrame final : public JavaScriptFrame {
DCHECK(frame->is_builtin());
return static_cast<BuiltinFrame*>(frame);
}
+ int ComputeParametersCount() const final;
protected:
inline explicit BuiltinFrame(StackFrameIteratorBase* iterator);
- int GetNumberOfIncomingArguments() const final;
void PrintFrameKind(StringStream* accumulator) const override;
private:
diff --git a/deps/v8/src/function-kind.h b/deps/v8/src/function-kind.h
new file mode 100644
index 0000000000..c7a083784b
--- /dev/null
+++ b/deps/v8/src/function-kind.h
@@ -0,0 +1,190 @@
+
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FUNCTION_KIND_H_
+#define V8_FUNCTION_KIND_H_
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+enum FunctionKind : uint8_t {
+ // BEGIN constructable functions
+ kNormalFunction,
+ kModule,
+ // BEGIN class constructors
+ // BEGIN base constructors
+ kBaseConstructor,
+ // BEGIN default constructors
+ kDefaultBaseConstructor,
+ // END base constructors
+ // BEGIN derived cosntructors
+ kDefaultDerivedConstructor,
+ // END default constructors
+ kDerivedConstructor,
+ // END derived costructors
+ // END class cosntructors
+ // END constructable functions.
+ // BEGIN accessors
+ kGetterFunction,
+ kSetterFunction,
+ // END accessors
+ // BEGIN arrow functions
+ kArrowFunction,
+ // BEGIN async functions
+ kAsyncArrowFunction,
+ // END arrow functions
+ kAsyncFunction,
+ // BEGIN concise methods 1
+ kAsyncConciseMethod,
+ // BEGIN generators
+ kAsyncConciseGeneratorMethod,
+ // END concise methods 1
+ kAsyncGeneratorFunction,
+ // END async functions
+ kGeneratorFunction,
+ // BEGIN concise methods 2
+ kConciseGeneratorMethod,
+ // END generators
+ kConciseMethod,
+ kClassMembersInitializerFunction,
+ // END concise methods 2
+
+ kLastFunctionKind = kClassMembersInitializerFunction,
+};
+
+inline bool IsArrowFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kArrowFunction,
+ FunctionKind::kAsyncArrowFunction);
+}
+
+inline bool IsModule(FunctionKind kind) {
+ return kind == FunctionKind::kModule;
+}
+
+inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod,
+ FunctionKind::kAsyncGeneratorFunction);
+}
+
+inline bool IsGeneratorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod,
+ FunctionKind::kConciseGeneratorMethod);
+}
+
+inline bool IsAsyncFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncArrowFunction,
+ FunctionKind::kAsyncGeneratorFunction);
+}
+
+inline bool IsResumableFunction(FunctionKind kind) {
+ return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind);
+}
+
+inline bool IsConciseMethod(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kAsyncConciseMethod,
+ FunctionKind::kAsyncConciseGeneratorMethod) ||
+ IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
+ FunctionKind::kClassMembersInitializerFunction);
+}
+
+inline bool IsStrictFunctionWithoutPrototype(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kGetterFunction,
+ FunctionKind::kAsyncArrowFunction) ||
+ IsInRange(kind, FunctionKind::kAsyncConciseMethod,
+ FunctionKind::kAsyncConciseGeneratorMethod) ||
+ IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
+ FunctionKind::kClassMembersInitializerFunction);
+}
+
+inline bool IsGetterFunction(FunctionKind kind) {
+ return kind == FunctionKind::kGetterFunction;
+}
+
+inline bool IsSetterFunction(FunctionKind kind) {
+ return kind == FunctionKind::kSetterFunction;
+}
+
+inline bool IsAccessorFunction(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kGetterFunction,
+ FunctionKind::kSetterFunction);
+}
+
+inline bool IsDefaultConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kDefaultBaseConstructor,
+ FunctionKind::kDefaultDerivedConstructor);
+}
+
+inline bool IsBaseConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kBaseConstructor,
+ FunctionKind::kDefaultBaseConstructor);
+}
+
+inline bool IsDerivedConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kDefaultDerivedConstructor,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline bool IsClassConstructor(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kBaseConstructor,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline bool IsClassMembersInitializerFunction(FunctionKind kind) {
+ return kind == FunctionKind::kClassMembersInitializerFunction;
+}
+
+inline bool IsConstructable(FunctionKind kind) {
+ return IsInRange(kind, FunctionKind::kNormalFunction,
+ FunctionKind::kDerivedConstructor);
+}
+
+inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kNormalFunction:
+ return os << "NormalFunction";
+ case FunctionKind::kArrowFunction:
+ return os << "ArrowFunction";
+ case FunctionKind::kGeneratorFunction:
+ return os << "GeneratorFunction";
+ case FunctionKind::kConciseMethod:
+ return os << "ConciseMethod";
+ case FunctionKind::kDerivedConstructor:
+ return os << "DerivedConstructor";
+ case FunctionKind::kBaseConstructor:
+ return os << "BaseConstructor";
+ case FunctionKind::kGetterFunction:
+ return os << "GetterFunction";
+ case FunctionKind::kSetterFunction:
+ return os << "SetterFunction";
+ case FunctionKind::kAsyncFunction:
+ return os << "AsyncFunction";
+ case FunctionKind::kModule:
+ return os << "Module";
+ case FunctionKind::kClassMembersInitializerFunction:
+ return os << "ClassMembersInitializerFunction";
+ case FunctionKind::kDefaultBaseConstructor:
+ return os << "DefaultBaseConstructor";
+ case FunctionKind::kDefaultDerivedConstructor:
+ return os << "DefaultDerivedConstructor";
+ case FunctionKind::kAsyncArrowFunction:
+ return os << "AsyncArrowFunction";
+ case FunctionKind::kAsyncConciseMethod:
+ return os << "AsyncConciseMethod";
+ case FunctionKind::kConciseGeneratorMethod:
+ return os << "ConciseGeneratorMethod";
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ return os << "AsyncConciseGeneratorMethod";
+ case FunctionKind::kAsyncGeneratorFunction:
+ return os << "AsyncGeneratorFunction";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FUNCTION_KIND_H_
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 5ed9347e96..db47bb3022 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -18,6 +18,7 @@
#include "src/ostreams.h"
#include "src/snapshot/natives.h"
#include "src/splay-tree-inl.h"
+#include "src/vector.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -644,8 +645,7 @@ class ELF {
void WriteHeader(Writer* w) {
DCHECK_EQ(w->position(), 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
- (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM)
const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
@@ -781,7 +781,6 @@ class ELFSymbol {
return static_cast<Binding>(info >> 4);
}
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
- (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
@@ -1126,7 +1125,7 @@ class DebugInfoSection : public DebugSection {
uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
w->WriteULEB128(3);
- w->Write<uint8_t>(kPointerSize);
+ w->Write<uint8_t>(kSystemPointerSize);
w->WriteString("v8value");
if (desc_->has_scope_info()) {
@@ -1174,9 +1173,8 @@ class DebugInfoSection : public DebugSection {
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
w->Write<uint8_t>(DW_OP_fbreg);
- w->WriteSLEB128(
- JavaScriptFrameConstants::kLastParameterOffset +
- kPointerSize * (params - param - 1));
+ w->WriteSLEB128(JavaScriptFrameConstants::kLastParameterOffset +
+ kSystemPointerSize * (params - param - 1));
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
@@ -1635,15 +1633,15 @@ class UnwindInfoSection : public DebugSection {
void UnwindInfoSection::WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position) {
- uint32_t align = (w->position() - initial_position) % kPointerSize;
+ uint32_t align = (w->position() - initial_position) % kSystemPointerSize;
if (align != 0) {
- for (uint32_t i = 0; i < (kPointerSize - align); i++) {
+ for (uint32_t i = 0; i < (kSystemPointerSize - align); i++) {
w->Write<uint8_t>(DW_CFA_NOP);
}
}
- DCHECK_EQ((w->position() - initial_position) % kPointerSize, 0);
+ DCHECK_EQ((w->position() - initial_position) % kSystemPointerSize, 0);
length_slot->set(static_cast<uint32_t>(w->position() - initial_position));
}
@@ -1703,7 +1701,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
// for the previous function. The previous RBP has not been pushed yet.
w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
+ w->WriteSLEB128(-kSystemPointerSize);
// The RA is stored at location CFA + kCallerPCOffset. This is an invariant,
// and hence omitted from the next states.
@@ -1765,7 +1763,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
// The CFA can is now calculated in the same way as in the first state.
w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
w->WriteULEB128(AMD64_RSP);
- w->WriteSLEB128(-kPointerSize);
+ w->WriteSLEB128(-kSystemPointerSize);
// The RBP
w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 8e1da8f4ca..5eb3d93f14 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -7,6 +7,8 @@
#include "src/api-inl.h"
#include "src/base/compiler-specific.h"
#include "src/cancelable-task.h"
+#include "src/heap/embedder-tracing.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects-inl.h"
#include "src/objects/slots.h"
#include "src/task-utils.h"
@@ -233,86 +235,168 @@ void GlobalHandles::NodeSpace<NodeType>::Free(NodeType* node) {
global_handles_->handles_count_--;
}
-class GlobalHandles::Node final {
+template <class Child>
+class NodeBase {
public:
- // State transition diagram:
- // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
- enum State {
- FREE = 0,
- NORMAL, // Normal global handle.
- WEAK, // Flagged as weak but not yet finalized.
- PENDING, // Has been recognized as only reachable by weak handles.
- NEAR_DEATH, // Callback has informed the handle is near death.
- NUMBER_OF_NODE_STATES
- };
-
- // Maps handle location (slot) to the containing node.
- static Node* FromLocation(Address* location) {
- DCHECK_EQ(offsetof(Node, object_), 0);
- return reinterpret_cast<Node*>(location);
+ static Child* FromLocation(Address* location) {
+ return reinterpret_cast<Child*>(location);
}
- Node() {
- DCHECK_EQ(offsetof(Node, class_id_), Internals::kNodeClassIdOffset);
- DCHECK_EQ(offsetof(Node, flags_), Internals::kNodeFlagsOffset);
- STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
- Internals::kNodeStateMask);
- STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
- STATIC_ASSERT(PENDING == Internals::kNodeStateIsPendingValue);
- STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
- STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
- Internals::kNodeIsIndependentShift);
- STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
- Internals::kNodeIsActiveShift);
- set_in_new_space_list(false);
+ NodeBase() {
+ DCHECK_EQ(offsetof(NodeBase, object_), 0);
+ DCHECK_EQ(offsetof(NodeBase, class_id_), Internals::kNodeClassIdOffset);
+ DCHECK_EQ(offsetof(NodeBase, flags_), Internals::kNodeFlagsOffset);
}
#ifdef ENABLE_HANDLE_ZAPPING
- ~Node() {
+ ~NodeBase() {
ClearFields();
data_.next_free = nullptr;
index_ = 0;
}
#endif
- void Free(Node* free_list) {
+ void Free(Child* free_list) {
ClearFields();
- set_state(FREE);
+ AsChild()->MarkAsFree();
data_.next_free = free_list;
}
void Acquire(Object object) {
- DCHECK(!IsInUse());
+ DCHECK(!AsChild()->IsInUse());
CheckFieldsAreCleared();
object_ = object.ptr();
- set_state(NORMAL);
+ AsChild()->MarkAsUsed();
data_.parameter = nullptr;
- DCHECK(IsInUse());
+ DCHECK(AsChild()->IsInUse());
}
- void Release(Node* free_list) {
- DCHECK(IsInUse());
+ void Release(Child* free_list) {
+ DCHECK(AsChild()->IsInUse());
Free(free_list);
- DCHECK(!IsInUse());
- }
-
- void Zap() {
- DCHECK(IsInUse());
- // Zap the values for eager trapping.
- object_ = kGlobalHandleZapValue;
+ DCHECK(!AsChild()->IsInUse());
}
- // Object slot accessors.
Object object() const { return Object(object_); }
FullObjectSlot location() { return FullObjectSlot(&object_); }
- const char* label() { return state() == NORMAL ? data_.label : nullptr; }
Handle<Object> handle() { return Handle<Object>(&object_); }
- // Wrapper class ID accessors.
+ uint8_t index() const { return index_; }
+ void set_index(uint8_t value) { index_ = value; }
+
+ uint16_t wrapper_class_id() const { return class_id_; }
bool has_wrapper_class_id() const {
return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
}
- uint16_t wrapper_class_id() const { return class_id_; }
+
+ // Accessors for next free node in the free list.
+ Child* next_free() {
+ DCHECK(!AsChild()->IsInUse());
+ return data_.next_free;
+ }
+
+ void set_parameter(void* parameter) {
+ DCHECK(AsChild()->IsInUse());
+ data_.parameter = parameter;
+ }
+ void* parameter() const {
+ DCHECK(AsChild()->IsInUse());
+ return data_.parameter;
+ }
+
+ protected:
+ Child* AsChild() { return reinterpret_cast<Child*>(this); }
+ const Child* AsChild() const { return reinterpret_cast<const Child*>(this); }
+
+ void ClearFields() {
+ // Zap the values for eager trapping.
+ object_ = kGlobalHandleZapValue;
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ AsChild()->ClearImplFields();
+ }
+
+ void CheckFieldsAreCleared() {
+ DCHECK_EQ(kGlobalHandleZapValue, object_);
+ DCHECK_EQ(v8::HeapProfiler::kPersistentHandleNoClassId, class_id_);
+ AsChild()->CheckImplFieldsAreCleared();
+ }
+
+ // Storage for object pointer.
+ //
+ // Placed first to avoid offset computation. The stored data is equivalent to
+ // an Object. It is stored as a plain Address for convenience (smallest number
+ // of casts), and because it is a private implementation detail: the public
+ // interface provides type safety.
+ Address object_;
+
+ // Class id set by the embedder.
+ uint16_t class_id_;
+
+ // Index in the containing handle block.
+ uint8_t index_;
+
+ uint8_t flags_;
+
+ // The meaning of this field depends on node state:
+ // - Node in free list: Stores next free node pointer.
+ // - Otherwise, specific to the node implementation.
+ union {
+ Child* next_free;
+ void* parameter;
+ } data_;
+};
+
+namespace {
+
+void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
+ int field_count = jsobject->GetEmbedderFieldCount();
+ for (int i = 0; i < len; ++i) {
+ if (field_count == i) break;
+ void* pointer;
+ if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(&pointer)) {
+ embedder_fields[i] = pointer;
+ }
+ }
+}
+
+} // namespace
+
+class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
+ public:
+ // State transition diagram:
+ // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
+ enum State {
+ FREE = 0,
+ NORMAL, // Normal global handle.
+ WEAK, // Flagged as weak but not yet finalized.
+ PENDING, // Has been recognized as only reachable by weak handles.
+ NEAR_DEATH, // Callback has informed the handle is near death.
+ NUMBER_OF_NODE_STATES
+ };
+
+ Node() {
+ STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
+ Internals::kNodeStateMask);
+ STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
+ STATIC_ASSERT(PENDING == Internals::kNodeStateIsPendingValue);
+ STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
+ STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
+ Internals::kNodeIsIndependentShift);
+ STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
+ Internals::kNodeIsActiveShift);
+ set_in_young_list(false);
+ }
+
+ void Zap() {
+ DCHECK(IsInUse());
+ // Zap the values for eager trapping.
+ object_ = kGlobalHandleZapValue;
+ }
+
+ const char* label() const {
+ return state() == NORMAL ? reinterpret_cast<char*>(data_.parameter)
+ : nullptr;
+ }
// State and flag accessors.
@@ -333,12 +417,8 @@ class GlobalHandles::Node final {
flags_ = IsActive::update(flags_, v);
}
- bool is_in_new_space_list() {
- return IsInNewSpaceList::decode(flags_);
- }
- void set_in_new_space_list(bool v) {
- flags_ = IsInNewSpaceList::update(flags_, v);
- }
+ bool is_in_young_list() const { return IsInYoungList::decode(flags_); }
+ void set_in_young_list(bool v) { flags_ = IsInYoungList::update(flags_, v); }
WeaknessType weakness_type() const {
return NodeWeaknessType::decode(flags_);
@@ -365,6 +445,8 @@ class GlobalHandles::Node final {
return weakness_type() == PHANTOM_WEAK_RESET_HANDLE;
}
+ bool IsFinalizerHandle() const { return weakness_type() == FINALIZER_WEAK; }
+
bool IsPendingPhantomCallback() const {
return state() == PENDING && IsPhantomCallback();
}
@@ -396,16 +478,6 @@ class GlobalHandles::Node final {
set_state(PENDING);
}
- // Callback parameter accessors.
- void set_parameter(void* parameter) {
- DCHECK(IsInUse());
- data_.parameter = parameter;
- }
- void* parameter() const {
- DCHECK(IsInUse());
- return data_.parameter;
- }
-
bool has_callback() const { return weak_callback_ != nullptr; }
// Accessors for next free node in the free list.
@@ -455,11 +527,12 @@ class GlobalHandles::Node final {
void AnnotateStrongRetainer(const char* label) {
DCHECK_EQ(state(), NORMAL);
- data_.label = label;
+ data_.parameter = const_cast<char*>(label);
}
void CollectPhantomCallbackData(
- std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
+ std::vector<std::pair<Node*, PendingPhantomCallback>>*
+ pending_phantom_callbacks) {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
DCHECK(state() == PENDING);
@@ -468,29 +541,23 @@ class GlobalHandles::Node final {
void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
nullptr};
if (weakness_type() != PHANTOM_WEAK && object()->IsJSObject()) {
- JSObject jsobject = JSObject::cast(object());
- int field_count = jsobject->GetEmbedderFieldCount();
- for (int i = 0; i < v8::kEmbedderFieldsInWeakCallback; ++i) {
- if (field_count == i) break;
- void* pointer;
- if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(&pointer)) {
- embedder_fields[i] = pointer;
- }
- }
+ ExtractInternalFields(JSObject::cast(object()), embedder_fields,
+ v8::kEmbedderFieldsInWeakCallback);
}
// Zap with something dangerous.
- location().store(Object(0x6057CA11));
+ location().store(Object(0xCA11));
- pending_phantom_callbacks->push_back(PendingPhantomCallback(
- this, weak_callback_, parameter(), embedder_fields));
+ pending_phantom_callbacks->push_back(std::make_pair(
+ this,
+ PendingPhantomCallback(weak_callback_, parameter(), embedder_fields)));
DCHECK(IsInUse());
set_state(NEAR_DEATH);
}
void ResetPhantomHandle() {
- DCHECK(weakness_type() == PHANTOM_WEAK_RESET_HANDLE);
- DCHECK(state() == PENDING);
+ DCHECK_EQ(PHANTOM_WEAK_RESET_HANDLE, weakness_type());
+ DCHECK_EQ(PENDING, state());
DCHECK_NULL(weak_callback_);
Address** handle = reinterpret_cast<Address**>(parameter());
*handle = nullptr;
@@ -522,89 +589,135 @@ class GlobalHandles::Node final {
CHECK_NE(NEAR_DEATH, state());
}
- inline GlobalHandles* GetGlobalHandles();
+ void MarkAsFree() { set_state(FREE); }
+ void MarkAsUsed() { set_state(NORMAL); }
- uint8_t index() const { return index_; }
- void set_index(uint8_t value) { index_ = value; }
+ GlobalHandles* global_handles() {
+ return NodeBlock<Node>::From(this)->global_handles();
+ }
private:
// Fields that are not used for managing node memory.
- void ClearFields() {
- // Zap the values for eager trapping.
- object_ = kGlobalHandleZapValue;
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ void ClearImplFields() {
set_independent(false);
set_active(false);
weak_callback_ = nullptr;
}
- void CheckFieldsAreCleared() {
- DCHECK_EQ(kGlobalHandleZapValue, object_);
- DCHECK_EQ(v8::HeapProfiler::kPersistentHandleNoClassId, class_id_);
+ void CheckImplFieldsAreCleared() {
DCHECK(!is_independent());
DCHECK(!is_active());
DCHECK_EQ(nullptr, weak_callback_);
}
- // Storage for object pointer.
- //
- // Placed first to avoid offset computation. The stored data is equivalent to
- // an Object. It is stored as a plain Address for convenience (smallest number
- // of casts), and because it is a private implementation detail: the public
- // interface provides type safety.
- Address object_;
-
- // Next word stores class_id, index, state, and independent.
- // Note: the most aligned fields should go first.
-
- // Wrapper class ID.
- uint16_t class_id_;
-
- // Index in the containing handle block.
- uint8_t index_;
-
// This stores three flags (independent, partially_dependent and
- // in_new_space_list) and a State.
- class NodeState : public BitField<State, 0, 3> {};
- class IsIndependent : public BitField<bool, 3, 1> {};
+ // in_young_list) and a State.
+ class NodeState : public BitField8<State, 0, 3> {};
+ class IsIndependent : public BitField8<bool, NodeState::kNext, 1> {};
// The following two fields are mutually exclusive
- class IsActive : public BitField<bool, 4, 1> {};
- class IsInNewSpaceList : public BitField<bool, 5, 1> {};
- class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
-
- uint8_t flags_;
+ class IsActive : public BitField8<bool, IsIndependent::kNext, 1> {};
+ class IsInYoungList : public BitField8<bool, IsActive::kNext, 1> {};
+ class NodeWeaknessType
+ : public BitField8<WeaknessType, IsInYoungList::kNext, 2> {};
// Handle specific callback - might be a weak reference in disguise.
WeakCallbackInfo<void>::Callback weak_callback_;
- // The meaning of this field depends on node state:
- // state == FREE: it stores the next free node pointer.
- // state == NORMAL: it stores the strong retainer label.
- // otherwise: it stores the parameter for the weak callback.
- union {
- Node* next_free;
- const char* label;
- void* parameter;
- } data_;
+ friend class NodeBase<Node>;
DISALLOW_COPY_AND_ASSIGN(Node);
};
-GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
- return NodeBlock<Node>::From(this)->global_handles();
-}
+class GlobalHandles::TracedNode final
+ : public NodeBase<GlobalHandles::TracedNode> {
+ public:
+ TracedNode() { set_in_young_list(false); }
+
+ enum State { FREE = 0, NORMAL, NEAR_DEATH };
+
+ State state() const { return NodeState::decode(flags_); }
+ void set_state(State state) { flags_ = NodeState::update(flags_, state); }
+
+ void MarkAsFree() { set_state(FREE); }
+ void MarkAsUsed() { set_state(NORMAL); }
+ bool IsInUse() const { return state() != FREE; }
+ bool IsRetainer() const { return state() == NORMAL; }
+ bool IsPhantomResetHandle() const { return callback_ == nullptr; }
+
+ bool is_in_young_list() const { return IsInYoungList::decode(flags_); }
+ void set_in_young_list(bool v) { flags_ = IsInYoungList::update(flags_, v); }
+
+ bool is_root() const { return IsRoot::decode(flags_); }
+ void set_root(bool v) { flags_ = IsRoot::update(flags_, v); }
+
+ void SetFinalizationCallback(void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ set_parameter(parameter);
+ callback_ = callback;
+ }
+ bool HasFinalizationCallback() const { return callback_ != nullptr; }
+
+ void CollectPhantomCallbackData(
+ std::vector<std::pair<TracedNode*, PendingPhantomCallback>>*
+ pending_phantom_callbacks) {
+ DCHECK(IsInUse());
+ DCHECK_NOT_NULL(callback_);
+
+ void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
+ nullptr};
+ ExtractInternalFields(JSObject::cast(object()), embedder_fields,
+ v8::kEmbedderFieldsInWeakCallback);
+
+ // Zap with something dangerous.
+ location().store(Object(0xCA11));
+
+ pending_phantom_callbacks->push_back(std::make_pair(
+ this, PendingPhantomCallback(callback_, parameter(), embedder_fields)));
+ set_state(NEAR_DEATH);
+ }
+
+ void ResetPhantomHandle() {
+ DCHECK(IsInUse());
+ Address** handle = reinterpret_cast<Address**>(data_.parameter);
+ *handle = nullptr;
+ NodeSpace<TracedNode>::Release(this);
+ DCHECK(!IsInUse());
+ }
+
+ protected:
+ class NodeState : public BitField8<State, 0, 2> {};
+ class IsInYoungList : public BitField8<bool, NodeState::kNext, 1> {};
+ class IsRoot : public BitField8<bool, IsInYoungList::kNext, 1> {};
+
+ void ClearImplFields() {
+ set_root(true);
+ callback_ = nullptr;
+ }
+
+ void CheckImplFieldsAreCleared() const {
+ DCHECK(is_root());
+ DCHECK_NULL(callback_);
+ }
+
+ WeakCallbackInfo<void>::Callback callback_;
+
+ friend class NodeBase<GlobalHandles::TracedNode>;
+
+ DISALLOW_COPY_AND_ASSIGN(TracedNode);
+};
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
- regular_nodes_(new NodeSpace<GlobalHandles::Node>(this)) {}
+ regular_nodes_(new NodeSpace<GlobalHandles::Node>(this)),
+ traced_nodes_(new NodeSpace<GlobalHandles::TracedNode>(this)) {}
GlobalHandles::~GlobalHandles() { regular_nodes_.reset(nullptr); }
Handle<Object> GlobalHandles::Create(Object value) {
GlobalHandles::Node* result = regular_nodes_->Acquire(value);
- if (Heap::InNewSpace(value) && !result->is_in_new_space_list()) {
- new_space_nodes_.push_back(result);
- result->set_in_new_space_list(true);
+ if (ObjectInYoungGeneration(value) && !result->is_in_young_list()) {
+ young_nodes_.push_back(result);
+ result->set_in_young_list(true);
}
return result->handle();
}
@@ -613,10 +726,24 @@ Handle<Object> GlobalHandles::Create(Address value) {
return Create(Object(value));
}
+Handle<Object> GlobalHandles::CreateTraced(Object value, Address* slot) {
+ GlobalHandles::TracedNode* result = traced_nodes_->Acquire(value);
+ if (ObjectInYoungGeneration(value) && !result->is_in_young_list()) {
+ traced_young_nodes_.push_back(result);
+ result->set_in_young_list(true);
+ }
+ result->set_parameter(slot);
+ return result->handle();
+}
+
+Handle<Object> GlobalHandles::CreateTraced(Address value, Address* slot) {
+ return CreateTraced(Object(value), slot);
+}
+
Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
DCHECK_NOT_NULL(location);
GlobalHandles* global_handles =
- Node::FromLocation(location)->GetGlobalHandles();
+ Node::FromLocation(location)->global_handles();
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
Object(*location)->ObjectVerify(global_handles->isolate());
@@ -625,14 +752,53 @@ Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
return global_handles->Create(*location);
}
+void GlobalHandles::MoveGlobal(Address** from, Address** to) {
+ DCHECK_NOT_NULL(*from);
+ DCHECK_NOT_NULL(*to);
+ DCHECK_EQ(*from, *to);
+ Node* node = Node::FromLocation(*from);
+ if (node->IsWeak() && node->IsPhantomResetHandle()) {
+ node->set_parameter(to);
+ }
+
+ // - Strong handles do not require fixups.
+ // - Weak handles with finalizers and callbacks are too general to fix up. For
+ // those the callers need to ensure consistency.
+}
+
+void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
+ DCHECK_NOT_NULL(*from);
+ DCHECK_NOT_NULL(*to);
+ DCHECK_EQ(*from, *to);
+ TracedNode* node = TracedNode::FromLocation(*from);
+ // Only set the backpointer for clearing a phantom handle when there is no
+ // finalization callback attached. As soon as a callback is attached to a node
+ // the embedder is on its own when resetting a handle.
+ if (!node->HasFinalizationCallback()) {
+ node->set_parameter(to);
+ }
+}
+
void GlobalHandles::Destroy(Address* location) {
if (location != nullptr) {
NodeSpace<Node>::Release(Node::FromLocation(location));
}
}
-typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
+void GlobalHandles::DestroyTraced(Address* location) {
+ if (location != nullptr) {
+ NodeSpace<TracedNode>::Release(TracedNode::FromLocation(location));
+ }
+}
+
+void GlobalHandles::SetFinalizationCallbackForTraced(
+ Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ TracedNode::FromLocation(location)->SetFinalizationCallback(parameter,
+ callback);
+}
+typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
void GlobalHandles::MakeWeak(Address* location, void* parameter,
GenericCallback phantom_callback,
@@ -686,26 +852,59 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
++number_of_phantom_handle_resets_;
} else if (node->IsPhantomCallback()) {
node->MarkPending();
- node->CollectPhantomCallbackData(&pending_phantom_callbacks_);
+ node->CollectPhantomCallbackData(&regular_pending_phantom_callbacks_);
+ }
+ }
+ }
+ for (TracedNode* node : *traced_nodes_) {
+ if (node->IsInUse() &&
+ should_reset_handle(isolate()->heap(), node->location())) {
+ if (node->IsPhantomResetHandle()) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ } else {
+ node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
}
}
}
}
-void GlobalHandles::IdentifyWeakHandles(
+void GlobalHandles::IterateWeakRootsIdentifyFinalizers(
WeakSlotCallbackWithHeap should_reset_handle) {
for (Node* node : *regular_nodes_) {
if (node->IsWeak() &&
should_reset_handle(isolate()->heap(), node->location())) {
- if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
+ if (node->IsFinalizerHandle()) {
node->MarkPending();
}
}
}
}
-void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
- for (Node* node : new_space_nodes_) {
+void GlobalHandles::IdentifyWeakUnmodifiedObjects(
+ WeakSlotCallback is_unmodified) {
+ for (Node* node : young_nodes_) {
+ if (node->IsWeak() && !is_unmodified(node->location())) {
+ node->set_active(true);
+ }
+ }
+
+ LocalEmbedderHeapTracer* const tracer =
+ isolate()->heap()->local_embedder_heap_tracer();
+ for (TracedNode* node : traced_young_nodes_) {
+ if (node->IsInUse()) {
+ DCHECK(node->is_root());
+ if (is_unmodified(node->location())) {
+ v8::Value* value = ToApi<v8::Value>(node->handle());
+ node->set_root(tracer->IsRootForNonTracingGC(
+ *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
+ }
+ }
+ }
+}
+
+void GlobalHandles::IterateYoungStrongAndDependentRoots(RootVisitor* v) {
+ for (Node* node : young_nodes_) {
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && !node->is_independent() &&
node->is_active())) {
@@ -713,21 +912,17 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
node->location());
}
}
-}
-
-void GlobalHandles::IdentifyWeakUnmodifiedObjects(
- WeakSlotCallback is_unmodified) {
- for (Node* node : new_space_nodes_) {
- if (node->IsWeak() && !is_unmodified(node->location())) {
- node->set_active(true);
+ for (TracedNode* node : traced_young_nodes_) {
+ if (node->IsInUse() && node->is_root()) {
+ v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
}
}
}
-void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
+void GlobalHandles::MarkYoungWeakUnmodifiedObjectsPending(
WeakSlotCallbackWithHeap is_dead) {
- for (Node* node : new_space_nodes_) {
- DCHECK(node->is_in_new_space_list());
+ for (Node* node : young_nodes_) {
+ DCHECK(node->is_in_young_list());
if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
is_dead(isolate_->heap(), node->location())) {
if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
@@ -737,10 +932,10 @@ void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
}
}
-void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
+void GlobalHandles::IterateYoungWeakUnmodifiedRootsForFinalizers(
RootVisitor* v) {
- for (Node* node : new_space_nodes_) {
- DCHECK(node->is_in_new_space_list());
+ for (Node* node : young_nodes_) {
+ DCHECK(node->is_in_young_list());
if ((node->is_independent() || !node->is_active()) &&
node->IsWeakRetainer() && (node->state() == Node::PENDING)) {
DCHECK(!node->IsPhantomCallback());
@@ -752,10 +947,10 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
}
}
-void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle) {
- for (Node* node : new_space_nodes_) {
- DCHECK(node->is_in_new_space_list());
+ for (Node* node : young_nodes_) {
+ DCHECK(node->is_in_young_list());
if ((node->is_independent() || !node->is_active()) &&
node->IsWeakRetainer() && (node->state() != Node::PENDING)) {
if (should_reset_handle(isolate_->heap(), node->location())) {
@@ -764,10 +959,9 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
node->MarkPending();
node->ResetPhantomHandle();
++number_of_phantom_handle_resets_;
-
} else if (node->IsPhantomCallback()) {
node->MarkPending();
- node->CollectPhantomCallbackData(&pending_phantom_callbacks_);
+ node->CollectPhantomCallbackData(&regular_pending_phantom_callbacks_);
} else {
UNREACHABLE();
}
@@ -778,6 +972,25 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
}
}
}
+ for (TracedNode* node : traced_young_nodes_) {
+ if (!node->IsInUse()) continue;
+
+ DCHECK_IMPLIES(node->is_root(),
+ !should_reset_handle(isolate_->heap(), node->location()));
+ if (should_reset_handle(isolate_->heap(), node->location())) {
+ if (node->IsPhantomResetHandle()) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ } else {
+ node->CollectPhantomCallbackData(&traced_pending_phantom_callbacks_);
+ }
+ } else {
+ if (!node->is_root()) {
+ node->set_root(true);
+ v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
+ }
+ }
+ }
}
void GlobalHandles::InvokeSecondPassPhantomCallbacksFromTask() {
@@ -795,15 +1008,13 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
while (!second_pass_callbacks_.empty()) {
auto callback = second_pass_callbacks_.back();
second_pass_callbacks_.pop_back();
- DCHECK_NULL(callback.node());
- // Fire second pass callback
- callback.Invoke(isolate());
+ callback.Invoke(isolate(), PendingPhantomCallback::kSecondPass);
}
}
size_t GlobalHandles::PostScavengeProcessing(unsigned post_processing_count) {
size_t freed_nodes = 0;
- for (Node* node : new_space_nodes_) {
+ for (Node* node : young_nodes_) {
// Filter free nodes.
if (!node->IsRetainer()) continue;
@@ -843,45 +1054,67 @@ size_t GlobalHandles::PostMarkSweepProcessing(unsigned post_processing_count) {
return freed_nodes;
}
-void GlobalHandles::UpdateListOfNewSpaceNodes() {
+template <typename T>
+void GlobalHandles::UpdateAndCompactListOfYoungNode(
+ std::vector<T*>* node_list) {
size_t last = 0;
- for (Node* node : new_space_nodes_) {
- DCHECK(node->is_in_new_space_list());
- if (node->IsRetainer()) {
- if (Heap::InNewSpace(node->object())) {
- new_space_nodes_[last++] = node;
+ for (T* node : *node_list) {
+ DCHECK(node->is_in_young_list());
+ if (node->IsInUse()) {
+ if (ObjectInYoungGeneration(node->object())) {
+ (*node_list)[last++] = node;
isolate_->heap()->IncrementNodesCopiedInNewSpace();
} else {
- node->set_in_new_space_list(false);
+ node->set_in_young_list(false);
isolate_->heap()->IncrementNodesPromoted();
}
} else {
- node->set_in_new_space_list(false);
+ node->set_in_young_list(false);
isolate_->heap()->IncrementNodesDiedInNewSpace();
}
}
- DCHECK_LE(last, new_space_nodes_.size());
- new_space_nodes_.resize(last);
- new_space_nodes_.shrink_to_fit();
+ DCHECK_LE(last, node_list->size());
+ node_list->resize(last);
+ node_list->shrink_to_fit();
}
-size_t GlobalHandles::InvokeFirstPassWeakCallbacks() {
+void GlobalHandles::UpdateListOfYoungNodes() {
+ UpdateAndCompactListOfYoungNode(&young_nodes_);
+ UpdateAndCompactListOfYoungNode(&traced_young_nodes_);
+}
+
+template <typename T>
+size_t GlobalHandles::InvokeFirstPassWeakCallbacks(
+ std::vector<std::pair<T*, PendingPhantomCallback>>* pending) {
size_t freed_nodes = 0;
- std::vector<PendingPhantomCallback> pending_phantom_callbacks;
- pending_phantom_callbacks.swap(pending_phantom_callbacks_);
+ std::vector<std::pair<T*, PendingPhantomCallback>> pending_phantom_callbacks;
+ pending_phantom_callbacks.swap(*pending);
{
// The initial pass callbacks must simply clear the nodes.
- for (auto callback : pending_phantom_callbacks) {
- // Skip callbacks that have already been processed once.
- if (callback.node() == nullptr) continue;
- callback.Invoke(isolate());
- if (callback.callback()) second_pass_callbacks_.push_back(callback);
+ for (auto& pair : pending_phantom_callbacks) {
+ T* node = pair.first;
+ DCHECK_EQ(T::NEAR_DEATH, node->state());
+ pair.second.Invoke(isolate(), PendingPhantomCallback::kFirstPass);
+
+ // Transition to second pass. It is required that the first pass callback
+ // resets the handle using |v8::PersistentBase::Reset|. Also see comments
+ // on |v8::WeakCallbackInfo|.
+ CHECK_WITH_MSG(T::FREE == node->state(),
+ "Handle not reset in first callback. See comments on "
+ "|v8::WeakCallbackInfo|.");
+
+ if (pair.second.callback()) second_pass_callbacks_.push_back(pair.second);
freed_nodes++;
}
}
return freed_nodes;
}
+size_t GlobalHandles::InvokeFirstPassWeakCallbacks() {
+ return InvokeFirstPassWeakCallbacks(&regular_pending_phantom_callbacks_) +
+ InvokeFirstPassWeakCallbacks(&traced_pending_phantom_callbacks_);
+}
+
void GlobalHandles::InvokeOrScheduleSecondPassPhantomCallbacks(
bool synchronous_second_pass) {
if (!second_pass_callbacks_.empty()) {
@@ -901,11 +1134,10 @@ void GlobalHandles::InvokeOrScheduleSecondPassPhantomCallbacks(
}
}
-void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
+void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate,
+ InvocationType type) {
Data::Callback* callback_addr = nullptr;
- if (node_ != nullptr) {
- // Initialize for first pass callback.
- DCHECK(node_->state() == Node::NEAR_DEATH);
+ if (type == kFirstPass) {
callback_addr = &callback_;
}
Data data(reinterpret_cast<v8::Isolate*>(isolate), parameter_,
@@ -913,15 +1145,6 @@ void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
Data::Callback callback = callback_;
callback_ = nullptr;
callback(data);
- if (node_ != nullptr) {
- // Transition to second pass. It is required that the first pass callback
- // resets the handle using |v8::PersistentBase::Reset|. Also see comments on
- // |v8::WeakCallbackInfo|.
- CHECK_WITH_MSG(Node::FREE == node_->state(),
- "Handle not reset in first callback. See comments on "
- "|v8::WeakCallbackInfo|.");
- node_ = nullptr;
- }
}
bool GlobalHandles::InRecursiveGC(unsigned gc_processing_counter) {
@@ -949,7 +1172,7 @@ size_t GlobalHandles::PostGarbageCollectionProcessing(
: PostMarkSweepProcessing(post_processing_count);
if (InRecursiveGC(post_processing_count)) return freed_nodes;
- UpdateListOfNewSpaceNodes();
+ UpdateListOfYoungNodes();
return freed_nodes;
}
@@ -969,6 +1192,11 @@ void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
node->location());
}
}
+ for (TracedNode* node : *traced_nodes_) {
+ if (node->IsInUse()) {
+ v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
+ }
+ }
}
DISABLE_CFI_PERF
@@ -979,16 +1207,26 @@ void GlobalHandles::IterateAllRoots(RootVisitor* v) {
node->location());
}
}
+ for (TracedNode* node : *traced_nodes_) {
+ if (node->IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
+ }
+ }
}
DISABLE_CFI_PERF
-void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
- for (Node* node : new_space_nodes_) {
+void GlobalHandles::IterateAllYoungRoots(RootVisitor* v) {
+ for (Node* node : young_nodes_) {
if (node->IsRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->label(),
node->location());
}
}
+ for (TracedNode* node : traced_young_nodes_) {
+ if (node->IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
+ }
+ }
}
DISABLE_CFI_PERF
@@ -1010,22 +1248,32 @@ void GlobalHandles::IterateAllRootsWithClassIds(
}
}
+DISABLE_CFI_PERF
+void GlobalHandles::IterateTracedNodes(
+ v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor) {
+ for (TracedNode* node : *traced_nodes_) {
+ if (node->IsInUse()) {
+ v8::Value* value = ToApi<v8::Value>(node->handle());
+ visitor->VisitTracedGlobalHandle(
+ *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value));
+ }
+ }
+}
DISABLE_CFI_PERF
-void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(
+void GlobalHandles::IterateAllYoungRootsWithClassIds(
v8::PersistentHandleVisitor* visitor) {
- for (Node* node : new_space_nodes_) {
+ for (Node* node : young_nodes_) {
if (node->IsRetainer() && node->has_wrapper_class_id()) {
ApplyPersistentHandleVisitor(visitor, node);
}
}
}
-
DISABLE_CFI_PERF
-void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(
+void GlobalHandles::IterateYoungWeakRootsWithClassIds(
v8::PersistentHandleVisitor* visitor) {
- for (Node* node : new_space_nodes_) {
+ for (Node* node : young_nodes_) {
if (node->has_wrapper_class_id() && node->IsWeak()) {
ApplyPersistentHandleVisitor(visitor, node);
}
@@ -1105,8 +1353,8 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
}
}
-void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
- for (int index : new_space_indices_) {
+void EternalHandles::IterateYoungRoots(RootVisitor* visitor) {
+ for (int index : young_node_indices_) {
visitor->VisitRootPointer(Root::kEternalHandles, nullptr,
FullObjectSlot(GetLocation(index)));
}
@@ -1114,13 +1362,13 @@ void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
void EternalHandles::PostGarbageCollectionProcessing() {
size_t last = 0;
- for (int index : new_space_indices_) {
- if (Heap::InNewSpace(Object(*GetLocation(index)))) {
- new_space_indices_[last++] = index;
+ for (int index : young_node_indices_) {
+ if (ObjectInYoungGeneration(Object(*GetLocation(index)))) {
+ young_node_indices_[last++] = index;
}
}
- DCHECK_LE(last, new_space_indices_.size());
- new_space_indices_.resize(last);
+ DCHECK_LE(last, young_node_indices_.size());
+ young_node_indices_.resize(last);
}
void EternalHandles::Create(Isolate* isolate, Object object, int* index) {
@@ -1138,8 +1386,8 @@ void EternalHandles::Create(Isolate* isolate, Object object, int* index) {
}
DCHECK_EQ(the_hole->ptr(), blocks_[block][offset]);
blocks_[block][offset] = object->ptr();
- if (Heap::InNewSpace(object)) {
- new_space_indices_.push_back(size_);
+ if (ObjectInYoungGeneration(object)) {
+ young_node_indices_.push_back(size_);
}
*index = size_++;
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index d12e0c10fd..3604af1d28 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -6,6 +6,7 @@
#define V8_GLOBAL_HANDLES_H_
#include <type_traits>
+#include <utility>
#include <vector>
#include "include/v8.h"
@@ -40,10 +41,17 @@ enum WeaknessType {
// callbacks and finalizers attached to them.
class GlobalHandles final {
public:
- // Copy a global handle
+ template <class NodeType>
+ class NodeBlock;
+
+ //
+ // API for regular handles.
+ //
+
+ static void MoveGlobal(Address** from, Address** to);
+
static Handle<Object> CopyGlobal(Address* location);
- // Destroy a global handle.
static void Destroy(Address* location);
// Make the global handle weak and set the callback parameter for the
@@ -58,7 +66,6 @@ class GlobalHandles final {
static void MakeWeak(Address* location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
v8::WeakCallbackType type);
-
static void MakeWeak(Address** location_addr);
static void AnnotateStrongRetainer(Address* location, const char* label);
@@ -72,6 +79,16 @@ class GlobalHandles final {
// Tells whether global handle is weak.
static bool IsWeak(Address* location);
+ //
+ // API for traced handles.
+ //
+
+ static void MoveTracedGlobal(Address** from, Address** to);
+ static void DestroyTraced(Address* location);
+ static void SetFinalizationCallbackForTraced(
+ Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback);
+
explicit GlobalHandles(Isolate* isolate);
~GlobalHandles();
@@ -87,6 +104,9 @@ class GlobalHandles final {
return Handle<T>::cast(Create(Object(value)));
}
+ Handle<Object> CreateTraced(Object value, Address* slot);
+ Handle<Object> CreateTraced(Address value, Address* slot);
+
void RecordStats(HeapStats* stats);
size_t InvokeFirstPassWeakCallbacks();
@@ -100,45 +120,50 @@ class GlobalHandles final {
void IterateStrongRoots(RootVisitor* v);
void IterateWeakRoots(RootVisitor* v);
void IterateAllRoots(RootVisitor* v);
-
- void IterateAllNewSpaceRoots(RootVisitor* v);
+ void IterateAllYoungRoots(RootVisitor* v);
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(v8::PersistentHandleVisitor* v);
// Iterates over all handles in the new space that have embedder-assigned
// class ID.
- void IterateAllRootsInNewSpaceWithClassIds(v8::PersistentHandleVisitor* v);
+ void IterateAllYoungRootsWithClassIds(v8::PersistentHandleVisitor* v);
// Iterate over all handles in the new space that are weak, unmodified
// and have class IDs
- void IterateWeakRootsInNewSpaceWithClassIds(v8::PersistentHandleVisitor* v);
+ void IterateYoungWeakRootsWithClassIds(v8::PersistentHandleVisitor* v);
+
+ // Iterates over all traces handles represented by TracedGlobal.
+ void IterateTracedNodes(
+ v8::EmbedderHeapTracer::TracedGlobalHandleVisitor* visitor);
- // Iterates over weak roots on the heap.
+ // Marks handles with finalizers on the predicate |should_reset_handle| as
+ // pending.
+ void IterateWeakRootsIdentifyFinalizers(
+ WeakSlotCallbackWithHeap should_reset_handle);
+ // Uses the provided visitor |v| to mark handles with finalizers that are
+ // pending.
void IterateWeakRootsForFinalizers(RootVisitor* v);
+ // Marks handles that are phantom or have callbacks based on the predicate
+ // |should_reset_handle| as pending.
void IterateWeakRootsForPhantomHandles(
WeakSlotCallbackWithHeap should_reset_handle);
- // Marks all handles that should be finalized based on the predicate
- // |should_reset_handle| as pending.
- void IdentifyWeakHandles(WeakSlotCallbackWithHeap should_reset_handle);
-
- // Note: The following *NewSpace* methods are used for the Scavenger to
- // identify and process handles in new space. The set of new space handles is
- // complete but the methods may encounter handles that are already in old
- // space.
+ // Note: The following *Young* methods are used for the Scavenger to
+ // identify and process handles in the young generation. The set of young
+ // handles is complete but the methods may encounter handles that are
+ // already in old space.
// Iterates over strong and dependent handles. See the note above.
- void IterateNewSpaceStrongAndDependentRoots(RootVisitor* v);
+ void IterateYoungStrongAndDependentRoots(RootVisitor* v);
// Marks weak unmodified handles satisfying |is_dead| as pending.
- void MarkNewSpaceWeakUnmodifiedObjectsPending(
- WeakSlotCallbackWithHeap is_dead);
+ void MarkYoungWeakUnmodifiedObjectsPending(WeakSlotCallbackWithHeap is_dead);
// Iterates over weak independent or unmodified handles.
// See the note above.
- void IterateNewSpaceWeakUnmodifiedRootsForFinalizers(RootVisitor* v);
- void IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ void IterateYoungWeakUnmodifiedRootsForFinalizers(RootVisitor* v);
+ void IterateYoungWeakUnmodifiedRootsForPhantomHandles(
RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle);
// Identify unmodified objects that are in weak state and marks them
@@ -164,13 +189,12 @@ class GlobalHandles final {
private:
// Internal node structures.
class Node;
- template <class NodeType>
- class NodeBlock;
template <class BlockType>
class NodeIterator;
template <class NodeType>
class NodeSpace;
class PendingPhantomCallback;
+ class TracedNode;
bool InRecursiveGC(unsigned gc_processing_counter);
@@ -179,7 +203,13 @@ class GlobalHandles final {
size_t PostScavengeProcessing(unsigned post_processing_count);
size_t PostMarkSweepProcessing(unsigned post_processing_count);
- void UpdateListOfNewSpaceNodes();
+ template <typename T>
+ size_t InvokeFirstPassWeakCallbacks(
+ std::vector<std::pair<T*, PendingPhantomCallback>>* pending);
+
+ template <typename T>
+ void UpdateAndCompactListOfYoungNode(std::vector<T*>* node_list);
+ void UpdateListOfYoungNodes();
void ApplyPersistentHandleVisitor(v8::PersistentHandleVisitor* visitor,
Node* node);
@@ -187,15 +217,21 @@ class GlobalHandles final {
Isolate* const isolate_;
std::unique_ptr<NodeSpace<Node>> regular_nodes_;
- // Contains all nodes holding new space objects. Note: when the list
+ // Contains all nodes holding young objects. Note: when the list
// is accessed, some of the objects may have been promoted already.
- std::vector<Node*> new_space_nodes_;
+ std::vector<Node*> young_nodes_;
+
+ std::unique_ptr<NodeSpace<TracedNode>> traced_nodes_;
+ std::vector<TracedNode*> traced_young_nodes_;
// Field always containing the number of handles to global objects.
size_t handles_count_ = 0;
size_t number_of_phantom_handle_resets_ = 0;
- std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
+ std::vector<std::pair<Node*, PendingPhantomCallback>>
+ regular_pending_phantom_callbacks_;
+ std::vector<std::pair<TracedNode*, PendingPhantomCallback>>
+ traced_pending_phantom_callbacks_;
std::vector<PendingPhantomCallback> second_pass_callbacks_;
bool second_pass_callbacks_task_posted_ = false;
@@ -208,22 +244,23 @@ class GlobalHandles final {
class GlobalHandles::PendingPhantomCallback final {
public:
typedef v8::WeakCallbackInfo<void> Data;
+
+ enum InvocationType { kFirstPass, kSecondPass };
+
PendingPhantomCallback(
- Node* node, Data::Callback callback, void* parameter,
+ Data::Callback callback, void* parameter,
void* embedder_fields[v8::kEmbedderFieldsInWeakCallback])
- : node_(node), callback_(callback), parameter_(parameter) {
+ : callback_(callback), parameter_(parameter) {
for (int i = 0; i < v8::kEmbedderFieldsInWeakCallback; ++i) {
embedder_fields_[i] = embedder_fields[i];
}
}
- void Invoke(Isolate* isolate);
+ void Invoke(Isolate* isolate, InvocationType type);
- Node* node() const { return node_; }
Data::Callback callback() const { return callback_; }
private:
- Node* node_;
Data::Callback callback_;
void* parameter_;
void* embedder_fields_[v8::kEmbedderFieldsInWeakCallback];
@@ -244,8 +281,8 @@ class EternalHandles final {
// Iterates over all handles.
void IterateAllRoots(RootVisitor* visitor);
- // Iterates over all handles which might be in new space.
- void IterateNewSpaceRoots(RootVisitor* visitor);
+ // Iterates over all handles which might be in the young generation.
+ void IterateYoungRoots(RootVisitor* visitor);
// Rebuilds new space list.
void PostGarbageCollectionProcessing();
@@ -266,7 +303,7 @@ class EternalHandles final {
int size_ = 0;
std::vector<Address*> blocks_;
- std::vector<int> new_space_indices_;
+ std::vector<int> young_node_indices_;
DISALLOW_COPY_AND_ASSIGN(EternalHandles);
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index d83de13005..da6b889b48 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -76,7 +76,7 @@ namespace internal {
constexpr int kStackSpaceRequiredForCompilation = 40;
// Determine whether double field unboxing feature is enabled.
-#if V8_TARGET_ARCH_64_BIT
+#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
#define V8_DOUBLE_FIELDS_UNBOXING true
#else
#define V8_DOUBLE_FIELDS_UNBOXING false
@@ -134,13 +134,8 @@ constexpr int kIntptrSize = sizeof(intptr_t);
constexpr int kUIntptrSize = sizeof(uintptr_t);
constexpr int kSystemPointerSize = sizeof(void*);
constexpr int kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12;
-#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-constexpr int kRegisterSize = kSystemPointerSize + kSystemPointerSize;
-#else
-constexpr int kRegisterSize = kSystemPointerSize;
-#endif
-constexpr int kPCOnStackSize = kRegisterSize;
-constexpr int kFPOnStackSize = kRegisterSize;
+constexpr int kPCOnStackSize = kSystemPointerSize;
+constexpr int kFPOnStackSize = kSystemPointerSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
constexpr int kElidedFrameSlots = kPCOnStackSize / kSystemPointerSize;
@@ -184,13 +179,7 @@ constexpr size_t kReservedCodeRangePages = 0;
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
-#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-// x32 port also requires code range.
-constexpr bool kRequiresCodeRange = true;
-constexpr size_t kMaximalCodeRangeSize = 256 * MB;
-constexpr size_t kMinimumCodeRangeSize = 3 * MB;
-constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
-#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
@@ -247,13 +236,8 @@ constexpr int kExternalAllocationSoftLimit =
// migrated from new space to large object space. Takes double alignment into
// account.
//
-// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
-#ifdef V8_HOST_ARCH_PPC
-// Reduced kMaxRegularHeapObjectSize due to larger page size(64k) on ppc64le
-constexpr int kMaxRegularHeapObjectSize = 327680;
-#else
-constexpr int kMaxRegularHeapObjectSize = 507136;
-#endif
+// Current value: half of the page size.
+constexpr int kMaxRegularHeapObjectSize = (1 << (kPageSizeBits - 1));
constexpr int kBitsPerByte = 8;
constexpr int kBitsPerByteLog2 = 3;
@@ -576,6 +560,7 @@ class MessageLocation;
class ModuleScope;
class Name;
class NameDictionary;
+class NativeContext;
class NewSpace;
class NewLargeObjectSpace;
class NumberDictionary;
@@ -661,7 +646,6 @@ typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, FullObjectSlot pointer);
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
enum AllocationSpace {
- // TODO(v8:7464): Actually map this space's memory as read-only.
RO_SPACE, // Immortal, immovable and immutable objects,
NEW_SPACE, // Young generation semispaces for regular objects collected with
// Scavenger.
@@ -674,12 +658,22 @@ enum AllocationSpace {
FIRST_SPACE = RO_SPACE,
LAST_SPACE = NEW_LO_SPACE,
+ FIRST_MUTABLE_SPACE = NEW_SPACE,
+ LAST_MUTABLE_SPACE = NEW_LO_SPACE,
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
};
constexpr int kSpaceTagSize = 4;
STATIC_ASSERT(FIRST_SPACE == 0);
+enum class AllocationType {
+ kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
+ kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
+ kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
+ kMap, // Map object allocated in MAP_SPACE
+ kReadOnly // Object allocated in RO_SPACE
+};
+
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
@@ -765,44 +759,13 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
-// A CodeDesc describes a buffer holding instructions and relocation
-// information. The instructions start at the beginning of the buffer
-// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward. A constant pool and a code comments
-// section may exist at the in this order at the end of the instructions.
-//
-// ā”‚<--------------- buffer_size ----------------------------------->ā”‚
-// ā”‚<---------------- instr_size ------------->ā”‚ ā”‚<-reloc_size->ā”‚
-// ā”‚ ā”‚<-const pool->ā”‚ ā”‚ ā”‚ ā”‚
-// ā”‚ ā”‚<- comments->ā”‚ ā”‚ ā”‚
-// ā”œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¤
-// ā”‚ instructions ā”‚ data ā”‚ free ā”‚ reloc info ā”‚
-// ā”œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”“ā”€ā”€ā”€ā”€ā”€ā”€ā”“ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”˜
-// buffer
-
-struct CodeDesc {
- byte* buffer = nullptr;
- int buffer_size = 0;
- int instr_size = 0;
- int reloc_size = 0;
- int constant_pool_size = 0;
- int code_comments_size = 0;
- byte* unwinding_info = 0;
- int unwinding_info_size = 0;
- Assembler* origin = nullptr;
- int constant_pool_offset() const {
- return code_comments_offset() - constant_pool_size;
- }
- int code_comments_offset() const { return instr_size - code_comments_size; }
-};
-
// State for inline cache call sites. Aliased as IC::State.
enum InlineCacheState {
// No feedback will be collected.
NO_FEEDBACK,
// Has never been executed.
UNINITIALIZED,
- // Has been executed but monomorhic state has been delayed.
+ // Has been executed but monomorphic state has been delayed.
PREMONOMORPHIC,
// Has been executed and only one receiver type has been seen.
MONOMORPHIC,
@@ -843,7 +806,10 @@ enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
enum ResultSentinel { kNotFound = -1, kUnsupported = -2 };
-enum ShouldThrow { kThrowOnError, kDontThrow };
+enum ShouldThrow {
+ kThrowOnError = Internals::kThrowOnError,
+ kDontThrow = Internals::kDontThrow
+};
// The Store Buffer (GC).
typedef enum {
@@ -1099,6 +1065,7 @@ enum VariableKind : uint8_t {
NORMAL_VARIABLE,
PARAMETER_VARIABLE,
THIS_VARIABLE,
+ SLOPPY_BLOCK_FUNCTION_VARIABLE,
SLOPPY_FUNCTION_NAME_VARIABLE
};
@@ -1178,156 +1145,6 @@ enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
-enum FunctionKind : uint8_t {
- kNormalFunction,
- kArrowFunction,
- kGeneratorFunction,
- kConciseMethod,
- kDerivedConstructor,
- kBaseConstructor,
- kGetterFunction,
- kSetterFunction,
- kAsyncFunction,
- kModule,
- kClassMembersInitializerFunction,
-
- kDefaultBaseConstructor,
- kDefaultDerivedConstructor,
- kAsyncArrowFunction,
- kAsyncConciseMethod,
-
- kConciseGeneratorMethod,
- kAsyncConciseGeneratorMethod,
- kAsyncGeneratorFunction,
- kLastFunctionKind = kAsyncGeneratorFunction,
-};
-
-inline bool IsArrowFunction(FunctionKind kind) {
- return kind == FunctionKind::kArrowFunction ||
- kind == FunctionKind::kAsyncArrowFunction;
-}
-
-inline bool IsModule(FunctionKind kind) {
- return kind == FunctionKind::kModule;
-}
-
-inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
- return kind == FunctionKind::kAsyncGeneratorFunction ||
- kind == FunctionKind::kAsyncConciseGeneratorMethod;
-}
-
-inline bool IsGeneratorFunction(FunctionKind kind) {
- return kind == FunctionKind::kGeneratorFunction ||
- kind == FunctionKind::kConciseGeneratorMethod ||
- IsAsyncGeneratorFunction(kind);
-}
-
-inline bool IsAsyncFunction(FunctionKind kind) {
- return kind == FunctionKind::kAsyncFunction ||
- kind == FunctionKind::kAsyncArrowFunction ||
- kind == FunctionKind::kAsyncConciseMethod ||
- IsAsyncGeneratorFunction(kind);
-}
-
-inline bool IsResumableFunction(FunctionKind kind) {
- return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind);
-}
-
-inline bool IsConciseMethod(FunctionKind kind) {
- return kind == FunctionKind::kConciseMethod ||
- kind == FunctionKind::kConciseGeneratorMethod ||
- kind == FunctionKind::kAsyncConciseMethod ||
- kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kClassMembersInitializerFunction;
-}
-
-inline bool IsGetterFunction(FunctionKind kind) {
- return kind == FunctionKind::kGetterFunction;
-}
-
-inline bool IsSetterFunction(FunctionKind kind) {
- return kind == FunctionKind::kSetterFunction;
-}
-
-inline bool IsAccessorFunction(FunctionKind kind) {
- return kind == FunctionKind::kGetterFunction ||
- kind == FunctionKind::kSetterFunction;
-}
-
-inline bool IsDefaultConstructor(FunctionKind kind) {
- return kind == FunctionKind::kDefaultBaseConstructor ||
- kind == FunctionKind::kDefaultDerivedConstructor;
-}
-
-inline bool IsBaseConstructor(FunctionKind kind) {
- return kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kDefaultBaseConstructor;
-}
-
-inline bool IsDerivedConstructor(FunctionKind kind) {
- return kind == FunctionKind::kDerivedConstructor ||
- kind == FunctionKind::kDefaultDerivedConstructor;
-}
-
-
-inline bool IsClassConstructor(FunctionKind kind) {
- return IsBaseConstructor(kind) || IsDerivedConstructor(kind);
-}
-
-inline bool IsClassMembersInitializerFunction(FunctionKind kind) {
- return kind == FunctionKind::kClassMembersInitializerFunction;
-}
-
-inline bool IsConstructable(FunctionKind kind) {
- if (IsAccessorFunction(kind)) return false;
- if (IsConciseMethod(kind)) return false;
- if (IsArrowFunction(kind)) return false;
- if (IsGeneratorFunction(kind)) return false;
- if (IsAsyncFunction(kind)) return false;
- return true;
-}
-
-inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
- switch (kind) {
- case FunctionKind::kNormalFunction:
- return os << "NormalFunction";
- case FunctionKind::kArrowFunction:
- return os << "ArrowFunction";
- case FunctionKind::kGeneratorFunction:
- return os << "GeneratorFunction";
- case FunctionKind::kConciseMethod:
- return os << "ConciseMethod";
- case FunctionKind::kDerivedConstructor:
- return os << "DerivedConstructor";
- case FunctionKind::kBaseConstructor:
- return os << "BaseConstructor";
- case FunctionKind::kGetterFunction:
- return os << "GetterFunction";
- case FunctionKind::kSetterFunction:
- return os << "SetterFunction";
- case FunctionKind::kAsyncFunction:
- return os << "AsyncFunction";
- case FunctionKind::kModule:
- return os << "Module";
- case FunctionKind::kClassMembersInitializerFunction:
- return os << "ClassMembersInitializerFunction";
- case FunctionKind::kDefaultBaseConstructor:
- return os << "DefaultBaseConstructor";
- case FunctionKind::kDefaultDerivedConstructor:
- return os << "DefaultDerivedConstructor";
- case FunctionKind::kAsyncArrowFunction:
- return os << "AsyncArrowFunction";
- case FunctionKind::kAsyncConciseMethod:
- return os << "AsyncConciseMethod";
- case FunctionKind::kConciseGeneratorMethod:
- return os << "ConciseGeneratorMethod";
- case FunctionKind::kAsyncConciseGeneratorMethod:
- return os << "AsyncConciseGeneratorMethod";
- case FunctionKind::kAsyncGeneratorFunction:
- return os << "AsyncGeneratorFunction";
- }
- UNREACHABLE();
-}
enum class InterpreterPushArgsMode : unsigned {
kArrayFunction,
@@ -1728,6 +1545,9 @@ enum class StubCallMode {
kCallBuiltinPointer,
};
+constexpr int kFunctionLiteralIdInvalid = -1;
+constexpr int kFunctionLiteralIdTopLevel = 0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/handler-table.cc
index cdf95ce57b..56c5cefecb 100644
--- a/deps/v8/src/handler-table.cc
+++ b/deps/v8/src/handler-table.cc
@@ -14,7 +14,9 @@ namespace v8 {
namespace internal {
HandlerTable::HandlerTable(Code code)
- : HandlerTable(code->InstructionStart(), code->handler_table_offset()) {}
+ : HandlerTable(code->InstructionStart(), code->has_handler_table()
+ ? code->handler_table_offset()
+ : 0) {}
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array->handler_table()) {}
@@ -29,6 +31,11 @@ HandlerTable::HandlerTable(ByteArray byte_array)
reinterpret_cast<Address>(byte_array->GetDataStartAddress())) {
}
+// TODO(jgruber,v8:8758): This constructor should eventually take the handler
+// table size in addition to the offset. That way the {HandlerTable} class
+// remains independent of how the offset/size is encoded in the various code
+// objects. This could even allow us to change the encoding to no longer expect
+// the "number of entries" in the beginning.
HandlerTable::HandlerTable(Address instruction_start,
size_t handler_table_offset)
: number_of_entries_(0),
diff --git a/deps/v8/src/handler-table.h b/deps/v8/src/handler-table.h
index 97f91dd6b0..b4ea8b6ed7 100644
--- a/deps/v8/src/handler-table.h
+++ b/deps/v8/src/handler-table.h
@@ -110,7 +110,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
// the GC heap (either {ByteArray} or {Code}) and hence would become stale
// during a collection. Hence we disallow any allocation.
Address raw_encoded_data_;
- DISALLOW_HEAP_ALLOCATION(no_gc_);
+ DISALLOW_HEAP_ALLOCATION(no_gc_)
// Layout description for handler table based on ranges.
static const int kRangeStartIndex = 0;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7a6c06f571..21d21be9fd 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -12,6 +12,11 @@
#include "src/objects-inl.h"
#include "src/roots-inl.h"
+#ifdef DEBUG
+// For GetIsolateFromWritableHeapObject.
+#include "src/heap/heap-write-barrier-inl.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -29,7 +34,7 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (object->IsSmi()) return true;
HeapObject heap_object = HeapObject::cast(object);
Isolate* isolate;
- if (!Isolate::FromWritableHeapObject(heap_object, &isolate)) return true;
+ if (!GetIsolateFromWritableObject(heap_object, &isolate)) return true;
RootIndex root_index;
if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
diff --git a/deps/v8/src/hash-seed-inl.h b/deps/v8/src/hash-seed-inl.h
new file mode 100644
index 0000000000..575da0c9fd
--- /dev/null
+++ b/deps/v8/src/hash-seed-inl.h
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HASH_SEED_INL_H_
+#define V8_HASH_SEED_INL_H_
+
+#include <stdint.h>
+
+// The #includes below currently lead to cyclic transitive includes, so
+// HashSeed() ends up being required before it is defined, so we have to
+// declare it here. This is a workaround; if we needed this permanently then
+// we should put that line into a "hash-seed.h" header; but we won't need
+// it for long.
+// TODO(jkummerow): Get rid of this by breaking circular include dependencies.
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class ReadOnlyRoots;
+
+// TODO(v8:7464): Remove the Isolate version of this.
+inline uint64_t HashSeed(Isolate* isolate);
+inline uint64_t HashSeed(ReadOnlyRoots roots);
+
+} // namespace internal
+} // namespace v8
+
+// See comment above for why this isn't at the top of the file.
+#include "src/objects/fixed-array-inl.h"
+#include "src/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline uint64_t HashSeed(Isolate* isolate) {
+ return HashSeed(ReadOnlyRoots(isolate));
+}
+
+inline uint64_t HashSeed(ReadOnlyRoots roots) {
+ uint64_t seed;
+ roots.hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
+ DCHECK(FLAG_randomize_hashes || seed == 0);
+ return seed;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HASH_SEED_INL_H_
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index f22f8cbb61..c08b4aafdd 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -11,11 +11,13 @@
V(_, calendar_string, "calendar") \
V(_, cardinal_string, "cardinal") \
V(_, caseFirst_string, "caseFirst") \
+ V(_, dateStyle_string, "dateStyle") \
V(_, day_string, "day") \
V(_, dayPeriod_string, "dayPeriod") \
V(_, decimal_string, "decimal") \
V(_, era_string, "era") \
V(_, fraction_string, "fraction") \
+ V(_, full_string, "full") \
V(_, granularity_string, "granularity") \
V(_, grapheme_string, "grapheme") \
V(_, group_string, "group") \
@@ -65,6 +67,7 @@
V(_, strict_string, "strict") \
V(_, style_string, "style") \
V(_, term_string, "term") \
+ V(_, timeStyle_string, "timeStyle") \
V(_, timeZone_string, "timeZone") \
V(_, timeZoneName_string, "timeZoneName") \
V(_, type_string, "type") \
@@ -111,6 +114,7 @@
V(_, buffer_string, "buffer") \
V(_, byte_length_string, "byteLength") \
V(_, byte_offset_string, "byteOffset") \
+ V(_, CompileError_string, "CompileError") \
V(_, call_string, "call") \
V(_, callee_string, "callee") \
V(_, caller_string, "caller") \
@@ -118,11 +122,11 @@
V(_, closure_string, "(closure)") \
V(_, code_string, "code") \
V(_, column_string, "column") \
- V(_, CompileError_string, "CompileError") \
+ V(_, computed_string, "<computed>") \
V(_, configurable_string, "configurable") \
+ V(_, conjunction_string, "conjunction") \
V(_, construct_string, "construct") \
V(_, constructor_string, "constructor") \
- V(_, conjunction_string, "conjunction") \
V(_, create_string, "create") \
V(_, Date_string, "Date") \
V(_, date_to_string, "[object Date]") \
@@ -133,6 +137,7 @@
V(_, display_name_string, "displayName") \
V(_, done_string, "done") \
V(_, dot_catch_string, ".catch") \
+ V(_, dot_default_string, ".default") \
V(_, dot_for_string, ".for") \
V(_, dot_generator_object_string, ".generator_object") \
V(_, dot_iterator_string, ".iterator") \
@@ -189,6 +194,7 @@
V(_, long_string, "long") \
V(_, Map_string, "Map") \
V(_, MapIterator_string, "Map Iterator") \
+ V(_, medium_string, "medium") \
V(_, message_string, "message") \
V(_, meta_string, "meta") \
V(_, minus_Infinity_string, "-Infinity") \
@@ -253,7 +259,6 @@
V(_, sourceText_string, "sourceText") \
V(_, stack_string, "stack") \
V(_, stackTraceLimit_string, "stackTraceLimit") \
- V(_, star_default_star_string, "*default*") \
V(_, sticky_string, "sticky") \
V(_, String_string, "String") \
V(_, string_string, "string") \
@@ -283,8 +288,6 @@
V(_, value_string, "value") \
V(_, valueOf_string, "valueOf") \
V(_, values_string, "values") \
- V(_, WeakCell_string, "WeakCell") \
- V(_, WeakFactory_string, "WeakFactory") \
V(_, WeakMap_string, "WeakMap") \
V(_, WeakRef_string, "WeakRef") \
V(_, WeakSet_string, "WeakSet") \
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 08d4cc7d9e..04c14b15fd 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -7,8 +7,8 @@
#include "src/conversions-inl.h"
#include "src/heap/array-buffer-tracker.h"
-#include "src/heap/heap.h"
-#include "src/heap/spaces.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/spaces-inl.h"
#include "src/objects.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -18,6 +18,9 @@ namespace internal {
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
if (buffer->backing_store() == nullptr) return;
+ // ArrayBuffer tracking works only for small objects.
+ DCHECK(!heap->IsLargeObject(buffer));
+
const size_t length = buffer->byte_length();
Page* page = Page::FromHeapObject(buffer);
{
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index d8e1001106..6c20e699fc 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -5,6 +5,7 @@
#include "src/heap/code-stats.h"
#include "src/code-comments.h"
+#include "src/heap/spaces-inl.h" // For HeapObjectIterator.
#include "src/objects-inl.h"
#include "src/reloc-info.h"
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f98c4c400d..c993eadea0 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -19,8 +19,11 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/worklist.h"
#include "src/isolate.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/slots-inl.h"
+#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -34,11 +37,11 @@ class ConcurrentMarkingState final
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {}
- Bitmap* bitmap(const MemoryChunk* chunk) {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
- return chunk->marking_bitmap_;
+ return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -237,24 +240,24 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitJSWeakCell(Map map, JSWeakCell weak_cell) {
- int size = VisitJSObjectSubclass(map, weak_cell);
- if (size == 0) {
- return 0;
- }
+ int VisitWeakCell(Map map, WeakCell weak_cell) {
+ if (!ShouldVisit(weak_cell)) return 0;
+ int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
+ VisitMapPointer(weak_cell, weak_cell->map_slot());
+ WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell->target()->IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell->target());
if (marking_state_.IsBlackOrGrey(target)) {
- // Record the slot inside the JSWeakCell, since the
- // VisitJSObjectSubclass above didn't visit it.
+ // Record the slot inside the WeakCell, since the IterateBody above
+ // didn't visit it.
ObjectSlot slot =
- HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
MarkCompactCollector::RecordSlot(weak_cell, slot, target);
} else {
- // JSWeakCell points to a potentially dead object. We have to process
+ // WeakCell points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
- weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
+ weak_objects_->weak_cells.Push(task_id_, weak_cell);
}
}
return size;
@@ -324,14 +327,26 @@ class ConcurrentMarkingVisitor final
DCHECK(marking_state_.IsBlackOrGrey(object));
marking_state_.GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
- int start =
- Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+ size_t current_progress_bar = chunk->ProgressBar();
+ if (current_progress_bar == 0) {
+ // Try to move the progress bar forward to start offset. This solves the
+ // problem of not being able to observe a progress bar reset when
+ // processing the first kProgressBarScanningChunk.
+ if (!chunk->TrySetProgressBar(0,
+ FixedArray::BodyDescriptor::kStartOffset))
+ return 0;
+ current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
+ }
+ int start = static_cast<int>(current_progress_bar);
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, HeapObject::RawField(object, start),
HeapObject::RawField(object, end));
- chunk->set_progress_bar(end);
- if (end < size) {
+ // Setting the progress bar can fail if the object that is currently
+ // scanned is also revisited. In this case, there may be two tasks racing
+ // on the progress counter. The looser can bail out because the progress
+ // bar is reset before the tasks race on the object.
+ if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
shared_.Push(object);
@@ -578,7 +593,7 @@ class ConcurrentMarkingVisitor final
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- DCHECK(host->IsJSWeakCell() || host->IsJSWeakRef());
+ DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
}
private:
@@ -794,8 +809,10 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
// The order of the two loads is important.
Address new_space_top = heap_->new_space()->original_top_acquire();
Address new_space_limit = heap_->new_space()->original_limit_relaxed();
+ Address new_large_object = heap_->new_lo_space()->pending_object();
Address addr = object->address();
- if (new_space_top <= addr && addr < new_space_limit) {
+ if ((new_space_top <= addr && addr < new_space_limit) ||
+ addr == new_large_object) {
on_hold_->Push(task_id, object);
} else {
Map map = object->synchronized_map();
@@ -833,7 +850,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
weak_objects_->js_weak_refs.FlushToGlobal(task_id);
- weak_objects_->js_weak_cells.FlushToGlobal(task_id);
+ weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 09242042dd..1102c8f2af 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -54,6 +54,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool Trace(double deadline);
bool IsRemoteTracingDone();
+ bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
+ return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
+ }
+
void NotifyV8MarkingWorklistWasEmpty() {
num_v8_marking_worklist_was_empty_++;
}
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index f707cd242d..25cbd06a7c 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -10,9 +10,12 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "src/handles-inl.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/feedback-cell.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/oddball.h"
+#include "src/objects/string-inl.h"
#include "src/string-hasher.h"
namespace v8 {
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index f82d8937c3..6dea2cae31 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -14,10 +14,15 @@
#include "src/compiler.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/hash-seed-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
@@ -41,8 +46,10 @@
#include "src/objects/scope-info.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/transitions-inl.h"
#include "src/unicode-cache.h"
-#include "src/unicode-decoder.h"
+#include "src/unicode-inl.h"
namespace v8 {
namespace internal {
@@ -71,28 +78,27 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
Handle<DeoptimizationData> deopt_data,
Handle<ByteArray> reloc_info,
Handle<CodeDataContainer> data_container,
- bool is_turbofanned, int stack_slots,
- int safepoint_table_offset, int handler_table_offset) {
+ bool is_turbofanned, int stack_slots) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
!heap->memory_allocator()->code_range().is_empty(),
heap->memory_allocator()->code_range().contains(code->address()));
- bool has_unwinding_info = desc.unwinding_info != nullptr;
+ constexpr bool kIsNotOffHeapTrampoline = false;
+ const bool has_unwinding_info = desc.unwinding_info != nullptr;
code->set_raw_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
- const bool is_off_heap_trampoline = false;
code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots,
- is_off_heap_trampoline);
- code->set_safepoint_table_offset(safepoint_table_offset);
- code->set_handler_table_offset(handler_table_offset);
+ kIsNotOffHeapTrampoline);
+ code->set_builtin_index(builtin_index);
code->set_code_data_container(*data_container);
code->set_deoptimization_data(*deopt_data);
code->set_source_position_table(*source_position_table);
- code->set_constant_pool_offset(desc.constant_pool_offset());
- code->set_code_comments_offset(desc.code_comments_offset());
- code->set_builtin_index(builtin_index);
+ code->set_safepoint_table_offset(desc.safepoint_table_offset);
+ code->set_handler_table_offset(desc.handler_table_offset);
+ code->set_constant_pool_offset(desc.constant_pool_offset);
+ code->set_code_comments_offset(desc.code_comments_offset);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -126,8 +132,9 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
HeapObject Factory::AllocateRawWithImmortalMap(int size,
PretenureFlag pretenure, Map map,
AllocationAlignment alignment) {
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
- size, Heap::SelectSpace(pretenure), alignment);
+ AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
+ HeapObject result =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, type, alignment);
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -136,11 +143,11 @@ HeapObject Factory::AllocateRawWithAllocationSite(
Handle<Map> map, PretenureFlag pretenure,
Handle<AllocationSite> allocation_site) {
DCHECK(map->instance_type() != MAP_TYPE);
+ AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
WriteBarrierMode write_barrier_mode =
space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(*map, write_barrier_mode);
@@ -163,9 +170,8 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
}
HeapObject Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
- AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
+ AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
@@ -191,12 +197,12 @@ HeapObject Factory::AllocateRawWeakArrayList(int capacity,
HeapObject Factory::New(Handle<Map> map, PretenureFlag pretenure) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
+ AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
// New space objects are allocated white.
- WriteBarrierMode write_barrier_mode =
- space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ WriteBarrierMode write_barrier_mode = type == AllocationType::kYoung
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(*map, write_barrier_mode);
return result;
}
@@ -205,7 +211,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
AllocationSpace space) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
- HeapObject result = heap->AllocateRawWithRetryOrFail(size, space, alignment);
+ HeapObject result = heap->AllocateRawWithRetryOrFail(
+ size, Heap::SelectType(space), alignment);
#ifdef DEBUG
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
DCHECK(chunk->owner()->identity() == space);
@@ -375,8 +382,9 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
int size = FixedArray::SizeFor(length);
AllocationSpace space = Heap::SelectSpace(pretenure);
+ AllocationType type = Heap::SelectType(space);
Heap* heap = isolate()->heap();
- AllocationResult allocation = heap->AllocateRaw(size, space);
+ AllocationResult allocation = heap->AllocateRaw(size, type);
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
@@ -605,12 +613,12 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
- Utf8StringKey key(string, isolate()->heap()->HashSeed());
+ Utf8StringKey key(string, HashSeed(isolate()));
return InternalizeStringWithKey(&key);
}
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
- OneByteStringKey key(string, isolate()->heap()->HashSeed());
+ OneByteStringKey key(string, HashSeed(isolate()));
return InternalizeStringWithKey(&key);
}
@@ -621,7 +629,7 @@ Handle<String> Factory::InternalizeOneByteString(
}
Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
- TwoByteStringKey key(string, isolate()->heap()->HashSeed());
+ TwoByteStringKey key(string, HashSeed(isolate()));
return InternalizeStringWithKey(&key);
}
@@ -661,13 +669,38 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
return NewStringFromOneByte(Vector<const uint8_t>::cast(string), pretenure);
}
- // Non-ASCII and we need to decode.
- auto non_ascii = string.SubVector(non_ascii_start, length);
- Access<UnicodeCache::Utf8Decoder> decoder(
- isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(non_ascii);
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[length - non_ascii_start]);
+
+ const uint8_t* cursor =
+ reinterpret_cast<const uint8_t*>(&string[non_ascii_start]);
+ const uint8_t* end = reinterpret_cast<const uint8_t*>(string.end());
+
+ uint16_t* output_cursor = buffer.get();
+
+ uint32_t incomplete_char = 0;
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
- int utf16_length = static_cast<int>(decoder->Utf16Length());
+ while (cursor < end) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+
+ if (V8_LIKELY(t <= unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ *(output_cursor++) = static_cast<uc16>(t); // The most frequent case.
+ } else if (t == unibrow::Utf8::kIncomplete) {
+ continue;
+ } else {
+ *(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
+ *(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
+ }
+ }
+
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
+ if (t != unibrow::Utf8::kBufferEmpty) {
+ *(output_cursor++) = static_cast<uc16>(t);
+ }
+
+ DCHECK_LE(output_cursor, buffer.get() + length - non_ascii_start);
+ int utf16_length = static_cast<int>(output_cursor - buffer.get());
DCHECK_GT(utf16_length, 0);
// Allocate string.
@@ -676,15 +709,13 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
- // Copy ASCII portion.
+ DCHECK_LE(non_ascii_start + utf16_length, length);
+
DisallowHeapAllocation no_gc;
uint16_t* data = result->GetChars(no_gc);
- for (int i = 0; i < non_ascii_start; i++) {
- *data++ = *ascii_data++;
- }
+ CopyChars(data, ascii_data, non_ascii_start);
+ CopyChars(data + non_ascii_start, buffer.get(), utf16_length);
- // Now write the remainder.
- decoder->WriteUtf16(data, utf16_length, non_ascii);
return result;
}
@@ -961,14 +992,10 @@ MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
return f->external_internalized_string_map();
case EXTERNAL_ONE_BYTE_STRING_TYPE:
return f->external_one_byte_internalized_string_map();
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return f->external_internalized_string_with_one_byte_data_map();
case UNCACHED_EXTERNAL_STRING_TYPE:
return f->uncached_external_internalized_string_map();
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
return f->uncached_external_one_byte_internalized_string_map();
- case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return f->uncached_external_internalized_string_with_one_byte_data_map();
default:
return MaybeHandle<Map>(); // No match found.
}
@@ -978,8 +1005,9 @@ MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
MaybeHandle<Map> Factory::InternalizedStringMapForString(
Handle<String> string) {
- // If the string is in new space it cannot be used as internalized.
- if (Heap::InNewSpace(*string)) return MaybeHandle<Map>();
+ // If the string is in the young generation, it cannot be used as
+ // internalized.
+ if (Heap::InYoungGeneration(*string)) return MaybeHandle<Map>();
return GetInternalizedStringMap(this, string);
}
@@ -1145,17 +1173,6 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
bool left_is_one_byte = left->IsOneByteRepresentation();
bool right_is_one_byte = right->IsOneByteRepresentation();
bool is_one_byte = left_is_one_byte && right_is_one_byte;
- bool is_one_byte_data_in_two_byte_string = false;
- if (!is_one_byte) {
- // At least one of the strings uses two-byte representation so we
- // can't use the fast case code for uncached one-byte strings below, but
- // we can try to save memory if all chars actually fit in one-byte.
- is_one_byte_data_in_two_byte_string =
- left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
- if (is_one_byte_data_in_two_byte_string) {
- isolate()->counters()->string_add_runtime_ext_to_one_byte()->Increment();
- }
- }
// If the resulting string is small make a flat string.
if (length < ConsString::kMinLength) {
@@ -1184,16 +1201,11 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
return result;
}
- return (is_one_byte_data_in_two_byte_string)
- ? ConcatStringContent<uint8_t>(
- NewRawOneByteString(length).ToHandleChecked(), left, right)
- : ConcatStringContent<uc16>(
- NewRawTwoByteString(length).ToHandleChecked(), left,
- right);
+ return ConcatStringContent<uc16>(
+ NewRawTwoByteString(length).ToHandleChecked(), left, right);
}
- bool one_byte = (is_one_byte || is_one_byte_data_in_two_byte_string);
- return NewConsString(left, right, length, one_byte);
+ return NewConsString(left, right, length, is_one_byte);
}
Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
@@ -1308,12 +1320,9 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
}
if (length == 0) return empty_string();
- Handle<Map> map;
- if (!resource->IsCacheable()) {
- map = uncached_external_one_byte_string_map();
- } else {
- map = external_one_byte_string_map();
- }
+ Handle<Map> map = resource->IsCacheable()
+ ? external_one_byte_string_map()
+ : uncached_external_one_byte_string_map();
Handle<ExternalOneByteString> external_string(
ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
@@ -1332,20 +1341,8 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
}
if (length == 0) return empty_string();
- // For small strings we check whether the resource contains only
- // one byte characters. If yes, we use a different string map.
- static const size_t kOneByteCheckLengthLimit = 32;
- bool is_one_byte =
- length <= kOneByteCheckLengthLimit &&
- String::IsOneByte(resource->data(), static_cast<int>(length));
- Handle<Map> map;
- if (!resource->IsCacheable()) {
- map = is_one_byte ? uncached_external_string_with_one_byte_data_map()
- : uncached_external_string_map();
- } else {
- map = is_one_byte ? external_string_with_one_byte_data_map()
- : external_string_map();
- }
+ Handle<Map> map = resource->IsCacheable() ? external_string_map()
+ : uncached_external_string_map();
Handle<ExternalTwoByteString> external_string(
ExternalTwoByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
@@ -1735,12 +1732,13 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<WeakFactoryCleanupJobTask> Factory::NewWeakFactoryCleanupJobTask(
- Handle<JSWeakFactory> weak_factory) {
- Handle<WeakFactoryCleanupJobTask> microtask =
- Handle<WeakFactoryCleanupJobTask>::cast(
- NewStruct(WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE));
- microtask->set_factory(*weak_factory);
+Handle<FinalizationGroupCleanupJobTask>
+Factory::NewFinalizationGroupCleanupJobTask(
+ Handle<JSFinalizationGroup> finalization_group) {
+ Handle<FinalizationGroupCleanupJobTask> microtask =
+ Handle<FinalizationGroupCleanupJobTask>::cast(
+ NewStruct(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE));
+ microtask->set_finalization_group(*finalization_group);
return microtask;
}
@@ -1777,7 +1775,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
// Bytecode array is pretenured, so constant pool array should be too.
- DCHECK(!Heap::InNewSpace(*constant_pool));
+ DCHECK(!Heap::InYoungGeneration(*constant_pool));
int size = BytecodeArray::SizeFor(length);
HeapObject result =
@@ -1793,7 +1791,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(*constant_pool);
instance->set_handler_table(*empty_byte_array());
- instance->set_source_position_table(*empty_byte_array());
+ instance->set_source_position_table(*undefined_value());
CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
raw_bytecodes, length);
instance->clear_padding();
@@ -1813,7 +1811,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
isolate());
elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(external_pointer);
elements->set_length(length);
return elements;
}
@@ -1836,9 +1834,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
isolate());
elements->set_base_pointer(*elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
- reinterpret_cast<void*>(
- ExternalReference::fixed_typed_array_base_data_offset().address()),
- SKIP_WRITE_BARRIER);
+ FixedTypedArrayBase::ExternalPointerPtrForOnHeapArray());
elements->set_length(static_cast<int>(length));
if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
return elements;
@@ -1909,14 +1905,13 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack,
- PretenureFlag pretenure) {
+ AllocationType type) {
+ DCHECK(Heap::IsRegularObjectAllocation(type));
int number_of_all_descriptors = number_of_descriptors + slack;
// Zero-length case must be handled outside.
DCHECK_LT(0, number_of_all_descriptors);
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
- DCHECK_LT(size, kMaxRegularHeapObjectSize);
- AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject obj = isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
+ HeapObject obj = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
obj->set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
array->Initialize(*empty_enum_cache(), *undefined_value(),
@@ -1966,8 +1961,8 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
- HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(Map::kSize, MAP_SPACE);
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
+ Map::kSize, AllocationType::kMap);
result->set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
elements_kind, inobject_properties),
@@ -2010,6 +2005,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
+ map->clear_padding();
map->set_elements_kind(elements_kind);
map->set_new_target_is_base(true);
isolate()->counters()->maps_created()->Increment();
@@ -2044,9 +2040,9 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
int adjusted_object_size =
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
- adjusted_object_size, NEW_SPACE);
+ adjusted_object_size, AllocationType::kYoung);
- SLOW_DCHECK(Heap::InNewSpace(raw_clone));
+ DCHECK(Heap::InYoungGeneration(raw_clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
Heap::CopyBlock(raw_clone->address(), source->address(), object_size);
@@ -2234,7 +2230,7 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
Handle<FixedArray> array) {
- DCHECK(Heap::InNewSpace(*array));
+ DCHECK(Heap::InYoungGeneration(*array));
Handle<FixedArray> result =
CopyFixedArrayUpTo(array, array->length(), TENURED);
@@ -2562,6 +2558,16 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
return prototype;
}
+Handle<WeakCell> Factory::NewWeakCell() {
+ // Allocate the WeakCell object in the old space, because 1) WeakCell weakness
+ // handling is only implemented in the old space 2) they're supposedly
+ // long-living. TODO(marja, gsathya): Support WeakCells in Scavenger.
+ Handle<WeakCell> result(WeakCell::cast(AllocateRawWithImmortalMap(
+ WeakCell::kSize, TENURED, *weak_cell_map())),
+ isolate());
+ return result;
+}
+
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
PretenureFlag pretenure) {
@@ -2705,8 +2711,7 @@ MaybeHandle<Code> Factory::TryNewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset) {
+ bool is_turbofanned, int stack_slots) {
// Allocate objects needed for code initialization.
Handle<ByteArray> reloc_info = NewByteArray(
desc.reloc_size,
@@ -2726,7 +2731,7 @@ MaybeHandle<Code> Factory::TryNewCode(
Heap* heap = isolate()->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result =
- heap->AllocateRawWithLightRetry(object_size, CODE_SPACE);
+ heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
@@ -2744,8 +2749,7 @@ MaybeHandle<Code> Factory::TryNewCode(
InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
- data_container, is_turbofanned, stack_slots,
- safepoint_table_offset, handler_table_offset);
+ data_container, is_turbofanned, stack_slots);
// Flush the instruction cache before changing the permissions.
// Note: we do this before setting permissions to ReadExecute because on
@@ -2762,8 +2766,7 @@ Handle<Code> Factory::NewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset) {
+ bool is_turbofanned, int stack_slots) {
// Allocate objects needed for code initialization.
Handle<ByteArray> reloc_info = NewByteArray(
desc.reloc_size,
@@ -2784,7 +2787,7 @@ Handle<Code> Factory::NewCode(
Heap* heap = isolate()->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result =
- heap->AllocateRawWithRetryOrFail(object_size, CODE_SPACE);
+ heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
if (movability == kImmovable) {
result = heap->EnsureImmovableCode(result, object_size);
}
@@ -2798,8 +2801,7 @@ Handle<Code> Factory::NewCode(
InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
- data_container, is_turbofanned, stack_slots,
- safepoint_table_offset, handler_table_offset);
+ data_container, is_turbofanned, stack_slots);
// Flush the instruction cache before changing the permissions.
// Note: we do this before setting permissions to ReadExecute because on
@@ -2831,17 +2833,15 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
const bool set_is_off_heap_trampoline = true;
const int stack_slots =
code->has_safepoint_info() ? code->stack_slots() : 0;
+ result->code_data_container()->set_kind_specific_flags(
+ code->code_data_container()->kind_specific_flags());
result->initialize_flags(code->kind(), code->has_unwinding_info(),
code->is_turbofanned(), stack_slots,
set_is_off_heap_trampoline);
result->set_builtin_index(code->builtin_index());
+ result->set_safepoint_table_offset(code->safepoint_table_offset());
result->set_handler_table_offset(code->handler_table_offset());
- result->code_data_container()->set_kind_specific_flags(
- code->code_data_container()->kind_specific_flags());
result->set_constant_pool_offset(code->constant_pool_offset());
- if (code->has_safepoint_info()) {
- result->set_safepoint_table_offset(code->safepoint_table_offset());
- }
result->set_code_comments_offset(code->code_comments_offset());
// Replace the newly generated trampoline's RelocInfo ByteArray with the
@@ -2872,7 +2872,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
{
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result = heap->AllocateRawWithRetryOrFail(obj_size, CODE_SPACE);
+ HeapObject result =
+ heap->AllocateRawWithRetryOrFail(obj_size, AllocationType::kCode);
// Copy code object.
Address old_addr = code->address();
@@ -3468,8 +3469,7 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
isolate(), prototype,
JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
- SaveContext save(isolate());
- isolate()->set_context(*target_function->GetCreationContext());
+ SaveAndSwitchContext save(isolate(), *target_function->GetCreationContext());
// Create the [[BoundArguments]] for the result.
Handle<FixedArray> bound_arguments;
@@ -3658,7 +3658,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
}
share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
#if V8_SFI_HAS_UNIQUE_ID
- share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+ Handle<SharedFunctionInfoWithID>::cast(share)->set_unique_id(
+ isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
// Set integer fields (smi or int, depending on the architecture).
@@ -3779,6 +3780,14 @@ Handle<String> Factory::NumberToString(Smi number, bool check_cache) {
check_cache);
}
+Handle<ClassPositions> Factory::NewClassPositions(int start, int end) {
+ Handle<ClassPositions> class_positions =
+ Handle<ClassPositions>::cast(NewStruct(CLASS_POSITIONS_TYPE, TENURED));
+ class_positions->set_start(start);
+ class_positions->set_end(end);
+ return class_positions;
+}
+
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
Heap* heap = isolate()->heap();
@@ -3834,6 +3843,20 @@ Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
return new_break_point;
}
+Handle<StackTraceFrame> Factory::NewStackTraceFrame(
+ Handle<FrameArray> frame_array, int index) {
+ Handle<StackTraceFrame> frame = Handle<StackTraceFrame>::cast(
+ NewStruct(STACK_TRACE_FRAME_TYPE, NOT_TENURED));
+ frame->set_frame_array(*frame_array);
+ frame->set_frame_index(index);
+ frame->set_frame_info(*undefined_value());
+
+ int id = isolate()->last_stack_frame_info_id() + 1;
+ isolate()->set_last_stack_frame_info_id(id);
+ frame->set_id(id);
+ return frame;
+}
+
Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
@@ -3847,6 +3870,51 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
return stack_frame_info;
}
+Handle<StackFrameInfo> Factory::NewStackFrameInfo(
+ Handle<FrameArray> frame_array, int index) {
+ FrameArrayIterator it(isolate(), frame_array, index);
+ DCHECK(it.HasFrame());
+
+ Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
+ info->set_flag(0);
+
+ const bool is_wasm = frame_array->IsAnyWasmFrame(index);
+ info->set_is_wasm(is_wasm);
+
+ // Line numbers are 1-based, for Wasm we need to adjust.
+ int line = it.Frame()->GetLineNumber();
+ if (is_wasm && line >= 0) line++;
+ info->set_line_number(line);
+
+ // Column numbers are 1-based. For Wasm we use the position
+ // as the iterator does not currently provide a column number.
+ const int column =
+ is_wasm ? it.Frame()->GetPosition() + 1 : it.Frame()->GetColumnNumber();
+ info->set_column_number(column);
+
+ info->set_script_id(it.Frame()->GetScriptId());
+ info->set_script_name(*it.Frame()->GetFileName());
+ info->set_script_name_or_source_url(*it.Frame()->GetScriptNameOrSourceUrl());
+
+ // TODO(szuend): Adjust this, once it is decided what name to use in both
+ // "simple" and "detailed" stack traces. This code is for
+ // backwards compatibility to fullfill test expectations.
+ auto function_name = it.Frame()->GetFunctionName();
+ if (!is_wasm) {
+ Handle<Object> function = it.Frame()->GetFunction();
+ if (function->IsJSFunction()) {
+ function_name =
+ JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
+ }
+ }
+ info->set_function_name(*function_name);
+ info->set_is_eval(it.Frame()->IsEval());
+ info->set_is_constructor(it.Frame()->IsConstructor());
+
+ return info;
+}
+
Handle<SourcePositionTableWithFrameCache>
Factory::NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
@@ -3874,11 +3942,13 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
Handle<JSObject> result = NewJSObjectFromMap(map);
Handle<Smi> value(Smi::FromInt(length), isolate());
Object::SetProperty(isolate(), result, length_string(), value,
- LanguageMode::kStrict)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Assert();
if (!strict_mode_callee) {
Object::SetProperty(isolate(), result, callee_string(), callee,
- LanguageMode::kStrict)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Assert();
}
return result;
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index abdca3807a..3ac69cb44d 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -8,6 +8,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "src/builtins/builtins.h"
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/heap/heap.h"
@@ -15,7 +16,6 @@
#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
-#include "src/objects/hash-table.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/ordered-hash-table.h"
@@ -38,6 +38,7 @@ class ArrayBoilerplateDescription;
class CoverageInfo;
class DebugInfo;
class EnumCache;
+class FinalizationGroupCleanupJobTask;
class FreshlyAllocatedBigInt;
class Isolate;
class JSDataView;
@@ -60,12 +61,13 @@ class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
class StackFrameInfo;
+class StackTraceFrame;
class StoreHandler;
class TemplateObjectDescription;
class UncompiledDataWithoutPreparseData;
class UncompiledDataWithPreparseData;
class WasmExportedFunctionData;
-class WeakFactoryCleanupJobTask;
+class WeakCell;
struct SourceRange;
template <typename T>
class ZoneVector;
@@ -437,7 +439,11 @@ class V8_EXPORT_PRIVATE Factory {
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
+ Handle<StackTraceFrame> NewStackTraceFrame(Handle<FrameArray> frame_array,
+ int index);
Handle<StackFrameInfo> NewStackFrameInfo();
+ Handle<StackFrameInfo> NewStackFrameInfo(Handle<FrameArray> frame_array,
+ int index);
Handle<SourcePositionTableWithFrameCache>
NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
@@ -451,8 +457,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
Handle<JSReceiver> thenable, Handle<Context> context);
- Handle<WeakFactoryCleanupJobTask> NewWeakFactoryCleanupJobTask(
- Handle<JSWeakFactory> weak_factory);
+ Handle<FinalizationGroupCleanupJobTask> NewFinalizationGroupCleanupJobTask(
+ Handle<JSFinalizationGroup> finalization_group);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -485,7 +491,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<DescriptorArray> NewDescriptorArray(
int number_of_entries, int slack = 0,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType type = AllocationType::kYoung);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -508,6 +514,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+ Handle<WeakCell> NewWeakCell();
+
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
Handle<JSObject> CopyJSObject(Handle<JSObject> object);
@@ -781,9 +789,7 @@ class V8_EXPORT_PRIVATE Factory {
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
Movability movability = kMovable,
- bool is_turbofanned = false, int stack_slots = 0,
- int safepoint_table_offset = 0,
- int handler_table_offset = 0);
+ bool is_turbofanned = false, int stack_slots = 0);
// Like NewCode, this function allocates a new code object (fully
// initialized). It may return an empty handle if the allocation does not
@@ -796,8 +802,7 @@ class V8_EXPORT_PRIVATE Factory {
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
Movability movability = kMovable, bool is_turbofanned = false,
- int stack_slots = 0, int safepoint_table_offset = 0,
- int handler_table_offset = 0);
+ int stack_slots = 0);
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
@@ -888,6 +893,7 @@ class V8_EXPORT_PRIVATE Factory {
MessageTemplate message, Handle<Object> argument, int start_position,
int end_position, Handle<Script> script, Handle<Object> stack_frames);
+ Handle<ClassPositions> NewClassPositions(int start, int end);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 1e10e81ddb..a3bf1aa167 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -17,27 +17,6 @@ const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
-void GCIdleTimeAction::Print() {
- switch (type) {
- case DONE:
- PrintF("done");
- break;
- case DO_NOTHING:
- PrintF("no action");
- break;
- case DO_INCREMENTAL_STEP:
- PrintF("incremental step");
- if (additional_work) {
- PrintF("; finalized marking");
- }
- break;
- case DO_FULL_GC:
- PrintF("full GC");
- break;
- }
-}
-
-
void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
@@ -96,19 +75,6 @@ bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
}
-GCIdleTimeAction GCIdleTimeHandler::NothingOrDone(double idle_time_in_ms) {
- if (idle_time_in_ms >= kMinBackgroundIdleTime) {
- return GCIdleTimeAction::Nothing();
- }
- if (idle_times_which_made_no_progress_ >= kMaxNoProgressIdleTimes) {
- return GCIdleTimeAction::Done();
- } else {
- idle_times_which_made_no_progress_++;
- return GCIdleTimeAction::Nothing();
- }
-}
-
-
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
@@ -128,25 +94,17 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
heap_state.contexts_disposal_rate,
heap_state.size_of_objects)) {
- return GCIdleTimeAction::FullGC();
+ return GCIdleTimeAction::kFullGC;
}
}
- return GCIdleTimeAction::Nothing();
- }
-
- // We are in a context disposal GC scenario. Don't do anything if we do not
- // get the right idle signal.
- if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate,
- heap_state.size_of_objects)) {
- return NothingOrDone(idle_time_in_ms);
+ return GCIdleTimeAction::kDone;
}
- if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
- return GCIdleTimeAction::Done();
+ if (FLAG_incremental_marking && !heap_state.incremental_marking_stopped) {
+ return GCIdleTimeAction::kIncrementalStep;
}
- return GCIdleTimeAction::IncrementalStep();
+ return GCIdleTimeAction::kDone;
}
bool GCIdleTimeHandler::Enabled() { return FLAG_incremental_marking; }
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 722710e11a..5781f44d87 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -10,51 +10,12 @@
namespace v8 {
namespace internal {
-enum GCIdleTimeActionType {
- DONE,
- DO_NOTHING,
- DO_INCREMENTAL_STEP,
- DO_FULL_GC,
+enum class GCIdleTimeAction : uint8_t {
+ kDone,
+ kIncrementalStep,
+ kFullGC,
};
-
-class GCIdleTimeAction {
- public:
- static GCIdleTimeAction Done() {
- GCIdleTimeAction result;
- result.type = DONE;
- result.additional_work = false;
- return result;
- }
-
- static GCIdleTimeAction Nothing() {
- GCIdleTimeAction result;
- result.type = DO_NOTHING;
- result.additional_work = false;
- return result;
- }
-
- static GCIdleTimeAction IncrementalStep() {
- GCIdleTimeAction result;
- result.type = DO_INCREMENTAL_STEP;
- result.additional_work = false;
- return result;
- }
-
- static GCIdleTimeAction FullGC() {
- GCIdleTimeAction result;
- result.type = DO_FULL_GC;
- result.additional_work = false;
- return result;
- }
-
- void Print();
-
- GCIdleTimeActionType type;
- bool additional_work;
-};
-
-
class GCIdleTimeHeapState {
public:
void Print();
@@ -117,20 +78,13 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
- // Number of times we will return a Nothing action in the current mode
- // despite having idle time available before we returning a Done action to
- // ensure we don't keep scheduling idle tasks and making no progress.
- static const int kMaxNoProgressIdleTimes = 10;
-
- GCIdleTimeHandler() : idle_times_which_made_no_progress_(0) {}
+ GCIdleTimeHandler() = default;
GCIdleTimeAction Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state);
bool Enabled();
- void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
-
static size_t EstimateMarkingStepSize(double idle_time_in_ms,
double marking_speed_in_bytes_per_ms);
@@ -148,11 +102,6 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
static bool ShouldDoOverApproximateWeakClosure(double idle_time_in_ms);
private:
- GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
-
- // Idle notifications with no progress.
- int idle_times_which_made_no_progress_;
-
DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
};
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index c3f7ff3029..423dc66a2e 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -115,8 +115,8 @@ GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
end_memory_size(0),
start_holes_size(0),
end_holes_size(0),
- new_space_object_size(0),
- survived_new_space_object_size(0),
+ young_object_size(0),
+ survived_young_object_size(0),
incremental_marking_bytes(0),
incremental_marking_duration(0.0) {
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
@@ -246,7 +246,8 @@ void GCTracer::Start(GarbageCollector collector,
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.new_space_object_size = heap_->new_space()->Size();
+ current_.young_object_size =
+ heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0;
@@ -299,7 +300,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
- current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
+ current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
AddAllocation(current_.end_time);
@@ -309,9 +310,9 @@ void GCTracer::Stop(GarbageCollector collector) {
case Event::SCAVENGER:
case Event::MINOR_MARK_COMPACTOR:
recorded_minor_gcs_total_.Push(
- MakeBytesAndDuration(current_.new_space_object_size, duration));
- recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
- current_.survived_new_space_object_size, duration));
+ MakeBytesAndDuration(current_.young_object_size, duration));
+ recorded_minor_gcs_survived_.Push(
+ MakeBytesAndDuration(current_.survived_young_object_size, duration));
FetchBackgroundMinorGCCounters();
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
@@ -327,7 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
+ MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
@@ -339,7 +340,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordMutatorUtilization(
current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
+ MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
@@ -958,9 +959,15 @@ double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
}
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
+ const double kMinimumMarkingSpeed = 0.5;
+ if (combined_mark_compact_speed_cache_ > 0)
+ return combined_mark_compact_speed_cache_;
+ // MarkCompact speed is more stable than incremental marking speed, because
+ // there might not be many incremental marking steps because of concurrent
+ // marking.
+ combined_mark_compact_speed_cache_ = MarkCompactSpeedInBytesPerMillisecond();
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
- const double kMinimumMarkingSpeed = 0.5;
double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 4b4736048c..0ad1f59b41 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -179,11 +179,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// after the current GC.
size_t end_holes_size;
- // Size of new space objects in constructor.
- size_t new_space_object_size;
+ // Size of young objects in constructor.
+ size_t young_object_size;
- // Size of survived new space objects in destructor.
- size_t survived_new_space_object_size;
+ // Size of survived young objects in destructor.
+ size_t survived_young_object_size;
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
size_t incremental_marking_bytes;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index d515199518..41ffa7b1a3 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/heap/heap-controller.h"
+
+#include "src/heap/spaces.h"
#include "src/isolate-inl.h"
namespace v8 {
@@ -73,7 +75,7 @@ size_t MemoryController::CalculateAllocationLimit(
double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
- heap_->isolate()->PrintWithTimestamp(
+ Isolate::FromHeap(heap_)->PrintWithTimestamp(
"%s factor %.1f based on mu=%.3f, speed_ratio=%.f "
"(gc=%.f, mutator=%.f)\n",
ControllerName(), factor, target_mutator_utilization_,
@@ -104,7 +106,7 @@ size_t MemoryController::CalculateAllocationLimit(
size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
- heap_->isolate()->PrintWithTimestamp(
+ Isolate::FromHeap(heap_)->PrintWithTimestamp(
"%s Limit: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
ControllerName(), curr_size / KB, result / KB, factor);
}
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d617d9f9ac..b143a33af5 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -34,17 +34,12 @@
#include "src/objects/property-cell.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
+#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
#include "src/zone/zone-list-inl.h"
-// The following header includes the write barrier essentials that can also be
-// used stand-alone without including heap-inl.h.
-// TODO(mlippautz): Remove once users of object-macros.h include this file on
-// their own.
-#include "src/heap/heap-write-barrier-inl.h"
-
namespace v8 {
namespace internal {
@@ -124,6 +119,11 @@ void Heap::SetMessageListeners(TemplateList value) {
roots_table()[RootIndex::kMessageListeners] = value->ptr();
}
+void Heap::SetPendingOptimizeForTestBytecode(Object bytecode) {
+ DCHECK(bytecode->IsBytecodeArray() || bytecode->IsUndefined(isolate()));
+ roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = bytecode->ptr();
+}
+
PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE);
DCHECK_NE(idx, NEW_SPACE);
@@ -158,7 +158,7 @@ size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
-AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
+AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
@@ -166,7 +166,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
- return AllocationResult::Retry(space);
+ return AllocationResult::Retry();
}
}
#endif
@@ -178,44 +178,36 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
HeapObject object;
AllocationResult allocation;
- if (NEW_SPACE == space) {
+
+ if (AllocationType::kYoung == type) {
if (large_object) {
- // TODO(hpayer): Implement a LO tenuring strategy.
- space = FLAG_young_generation_large_objects ? NEW_LO_SPACE : LO_SPACE;
+ if (FLAG_young_generation_large_objects) {
+ allocation = new_lo_space_->AllocateRaw(size_in_bytes);
+ } else {
+ // If young generation large objects are disalbed we have to tenure the
+ // allocation and violate the given allocation type. This could be
+ // dangerous. We may want to remove FLAG_young_generation_large_objects
+ // and avoid patching.
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
+ }
} else {
allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
- if (allocation.To(&object)) {
- OnAllocationEvent(object, size_in_bytes);
- }
- return allocation;
}
- }
-
- // Here we only allocate in the old generation.
- if (OLD_SPACE == space) {
+ } else if (AllocationType::kOld == type) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
}
- } else if (CODE_SPACE == space) {
+ } else if (AllocationType::kCode == type) {
if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
}
- } else if (LO_SPACE == space) {
- DCHECK(large_object);
- allocation = lo_space_->AllocateRaw(size_in_bytes);
- } else if (NEW_LO_SPACE == space) {
- DCHECK(FLAG_young_generation_large_objects);
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else if (CODE_LO_SPACE == space) {
- DCHECK(large_object);
- allocation = code_lo_space_->AllocateRaw(size_in_bytes);
- } else if (MAP_SPACE == space) {
+ } else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
- } else if (RO_SPACE == space) {
+ } else if (AllocationType::kReadOnly == type) {
#ifdef V8_USE_SNAPSHOT
DCHECK(isolate_->serializer_enabled());
#endif
@@ -223,12 +215,11 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(CanAllocateInReadOnlySpace());
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
- // NEW_SPACE is not allowed here.
UNREACHABLE();
}
if (allocation.To(&object)) {
- if (space == CODE_SPACE) {
+ if (AllocationType::kCode == type) {
// Unprotect the memory chunk of the object if it was not unprotected
// already.
UnprotectAndRegisterMemoryChunk(object);
@@ -300,19 +291,6 @@ void Heap::RegisterExternalString(String string) {
external_string_table_.AddString(string);
}
-void Heap::UpdateExternalString(String string, size_t old_payload,
- size_t new_payload) {
- DCHECK(string->IsExternalString());
- Page* page = Page::FromHeapObject(string);
-
- if (old_payload > new_payload)
- page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, old_payload - new_payload);
- else
- page->IncrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString, new_payload - old_payload);
-}
-
void Heap::FinalizeExternalString(String string) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
@@ -335,67 +313,65 @@ void Heap::FinalizeExternalString(String string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
-// static
-bool Heap::InNewSpace(Object object) {
+bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
+ return object->IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
}
// static
-bool Heap::InNewSpace(MaybeObject object) {
+bool Heap::InYoungGeneration(MaybeObject object) {
HeapObject heap_object;
- return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InYoungGeneration(heap_object);
}
// static
-bool Heap::InNewSpace(HeapObject heap_object) {
- // Inlined check from NewSpace::Contains.
- bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
+bool Heap::InYoungGeneration(HeapObject heap_object) {
+ bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
#ifdef DEBUG
- // If in NEW_SPACE, then check we're either not in the middle of GC or the
- // object is in to-space.
+ // If in the young generation, then check we're either not in the middle of
+ // GC or the object is in to-space.
if (result) {
- // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
+ // If the object is in the young generation, then it's not in RO_SPACE so
+ // this is safe.
Heap* heap = Heap::FromWritableHeapObject(heap_object);
- DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
+ DCHECK_IMPLIES(heap->gc_state_ == NOT_IN_GC, InToPage(heap_object));
}
#endif
return result;
}
// static
-bool Heap::InFromSpace(Object object) {
+bool Heap::InFromPage(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
+ return object->IsHeapObject() && InFromPage(HeapObject::cast(object));
}
// static
-bool Heap::InFromSpace(MaybeObject object) {
+bool Heap::InFromPage(MaybeObject object) {
HeapObject heap_object;
- return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InFromPage(heap_object);
}
// static
-bool Heap::InFromSpace(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)
- ->IsFlagSet(Page::IN_FROM_SPACE);
+bool Heap::InFromPage(HeapObject heap_object) {
+ return MemoryChunk::FromHeapObject(heap_object)->IsFromPage();
}
// static
-bool Heap::InToSpace(Object object) {
+bool Heap::InToPage(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
+ return object->IsHeapObject() && InToPage(HeapObject::cast(object));
}
// static
-bool Heap::InToSpace(MaybeObject object) {
+bool Heap::InToPage(MaybeObject object) {
HeapObject heap_object;
- return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
+ return object->GetHeapObject(&heap_object) && InToPage(heap_object);
}
// static
-bool Heap::InToSpace(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
+bool Heap::InToPage(HeapObject heap_object) {
+ return MemoryChunk::FromHeapObject(heap_object)->IsToPage();
}
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
@@ -426,8 +402,7 @@ bool Heap::ShouldBePromoted(Address old_address) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
DCHECK(IsAligned(byte_size, kTaggedSize));
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- CopyWords(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
+ CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
}
template <Heap::FindMementoMode mode>
@@ -495,11 +470,13 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
- DCHECK(InFromSpace(object) ||
- (InToSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
- Page::PAGE_NEW_NEW_PROMOTION)) ||
- (!InNewSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
- Page::PAGE_NEW_OLD_PROMOTION)));
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ DCHECK_IMPLIES(chunk->IsToPage(),
+ chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
+ DCHECK_IMPLIES(!chunk->InYoungGeneration(),
+ chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
+#endif
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(map->instance_type())) {
return;
@@ -519,10 +496,10 @@ void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));
- if (InNewSpace(string)) {
- new_space_strings_.push_back(string);
+ if (InYoungGeneration(string)) {
+ young_strings_.push_back(string);
} else {
- old_space_strings_.push_back(string);
+ old_strings_.push_back(string);
}
}
@@ -531,14 +508,6 @@ Oddball Heap::ToBoolean(bool condition) {
return condition ? roots.true_value() : roots.false_value();
}
-uint64_t Heap::HashSeed() {
- uint64_t seed;
- ReadOnlyRoots(this).hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed),
- kInt64Size);
- DCHECK(FLAG_randomize_hashes || seed == 0);
- return seed;
-}
-
int Heap::NextScriptId() {
int last_id = last_script_id()->value();
if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
@@ -588,11 +557,13 @@ void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedDecrement(&backing_store_bytes_, amount);
}
-AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
- : heap_(isolate->heap()) {
+AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
heap_->always_allocate_scope_count_++;
}
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+ : AlwaysAllocateScope(isolate->heap()) {}
+
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_--;
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index a8137ddee4..63d16ca82d 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -11,8 +11,11 @@
#include "src/heap/heap-write-barrier.h"
#include "src/globals.h"
-#include "src/heap/heap.h"
+// TODO(jkummerow): Get rid of this by moving GetIsolateFromWritableObject
+// elsewhere.
+#include "src/isolate.h"
#include "src/objects/code.h"
+#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/slots.h"
@@ -20,17 +23,51 @@
namespace v8 {
namespace internal {
+// Defined in heap.cc.
+V8_EXPORT_PRIVATE bool Heap_PageFlagsAreConsistent(HeapObject object);
+V8_EXPORT_PRIVATE void Heap_GenerationalBarrierSlow(HeapObject object,
+ Address slot,
+ HeapObject value);
+V8_EXPORT_PRIVATE void Heap_MarkingBarrierSlow(HeapObject object, Address slot,
+ HeapObject value);
+V8_EXPORT_PRIVATE void Heap_WriteBarrierForCodeSlow(Code host);
+V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host,
+ RelocInfo* rinfo,
+ HeapObject object);
+V8_EXPORT_PRIVATE void Heap_MarkingBarrierForCodeSlow(Code host,
+ RelocInfo* rinfo,
+ HeapObject object);
+V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForElementsSlow(Heap* heap,
+ FixedArray array,
+ int offset,
+ int length);
+V8_EXPORT_PRIVATE void Heap_MarkingBarrierForElementsSlow(Heap* heap,
+ HeapObject object);
+V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
+ Heap* heap, HeapObject host, HeapObject descriptor_array,
+ int number_of_own_descriptors);
+
// Do not use these internal details anywhere outside of this file. These
// internals are only intended to shortcut write barrier checks.
namespace heap_internals {
+struct Space {
+ static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
+ V8_INLINE AllocationSpace identity() {
+ return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
+ kIdOffset);
+ }
+};
+
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
static constexpr uintptr_t kHeapOffset =
kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
+ static constexpr uintptr_t kOwnerOffset =
+ kHeapOffset + 2 * kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
- static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3;
- static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4;
+ static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
+ static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -39,9 +76,9 @@ struct MemoryChunk {
V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
- V8_INLINE bool InNewSpace() const {
- constexpr uintptr_t kNewSpaceMask = kFromSpaceBit | kToSpaceBit;
- return GetFlags() & kNewSpaceMask;
+ V8_INLINE bool InYoungGeneration() const {
+ constexpr uintptr_t kYoungGenerationMask = kFromPageBit | kToPageBit;
+ return GetFlags() & kYoungGenerationMask;
}
V8_INLINE uintptr_t GetFlags() const {
@@ -55,30 +92,37 @@ struct MemoryChunk {
SLOW_DCHECK(heap != nullptr);
return heap;
}
+
+ V8_INLINE Space* GetOwner() {
+ return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) +
+ kOwnerOffset);
+ }
};
inline void GenerationalBarrierInternal(HeapObject object, Address slot,
HeapObject value) {
- DCHECK(Heap::PageFlagsAreConsistent(object));
+ DCHECK(Heap_PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- if (!value_chunk->InNewSpace() || object_chunk->InNewSpace()) return;
+ if (!value_chunk->InYoungGeneration() || object_chunk->InYoungGeneration()) {
+ return;
+ }
- Heap::GenerationalBarrierSlow(object, slot, value);
+ Heap_GenerationalBarrierSlow(object, slot, value);
}
inline void MarkingBarrierInternal(HeapObject object, Address slot,
HeapObject value) {
- DCHECK(Heap::PageFlagsAreConsistent(object));
+ DCHECK(Heap_PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
if (!value_chunk->IsMarking()) return;
- Heap::MarkingBarrierSlow(object, slot, value);
+ Heap_MarkingBarrierSlow(object, slot, value);
}
} // namespace heap_internals
@@ -92,7 +136,7 @@ inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value) {
}
inline void WriteBarrierForCode(Code host) {
- Heap::WriteBarrierForCodeSlow(host);
+ Heap_WriteBarrierForCodeSlow(host);
}
inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
@@ -116,17 +160,17 @@ inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
int offset, int length) {
heap_internals::MemoryChunk* array_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
- if (array_chunk->InNewSpace()) return;
+ if (array_chunk->InYoungGeneration()) return;
- Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
+ Heap_GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- if (!object_chunk->InNewSpace()) return;
- Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
+ if (!object_chunk->InYoungGeneration()) return;
+ Heap_GenerationalBarrierForCodeSlow(host, rinfo, object);
}
inline void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value) {
@@ -150,7 +194,7 @@ inline void MarkingBarrierForElements(Heap* heap, HeapObject object) {
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->IsMarking()) return;
- Heap::MarkingBarrierForElementsSlow(heap, object);
+ Heap_MarkingBarrierForElementsSlow(heap, object);
}
inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
@@ -159,7 +203,7 @@ inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->IsMarking()) return;
- Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
+ Heap_MarkingBarrierForCodeSlow(host, rinfo, object);
}
inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
@@ -169,8 +213,24 @@ inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
heap_internals::MemoryChunk::FromHeapObject(descriptor_array);
if (!chunk->IsMarking()) return;
- Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
- number_of_own_descriptors);
+ Heap_MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
+ number_of_own_descriptors);
+}
+
+inline WriteBarrierMode GetWriteBarrierModeForObject(
+ HeapObject object, const DisallowHeapAllocation* promise) {
+ DCHECK(Heap_PageFlagsAreConsistent(object));
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (chunk->IsMarking()) return UPDATE_WRITE_BARRIER;
+ if (chunk->InYoungGeneration()) return SKIP_WRITE_BARRIER;
+ return UPDATE_WRITE_BARRIER;
+}
+
+inline bool ObjectInYoungGeneration(const Object object) {
+ if (object.IsSmi()) return false;
+ return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
+ ->InYoungGeneration();
}
inline Heap* GetHeapFromWritableObject(const HeapObject object) {
@@ -179,6 +239,17 @@ inline Heap* GetHeapFromWritableObject(const HeapObject object) {
return chunk->GetHeap();
}
+inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(obj);
+ if (chunk->GetOwner()->identity() == RO_SPACE) {
+ *isolate = nullptr;
+ return false;
+ }
+ *isolate = Isolate::FromHeap(chunk->GetHeap());
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index d399d070b8..e72269d40a 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -37,6 +37,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -59,12 +60,14 @@
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
+#include "src/string-stream.h"
#include "src/tracing/trace-event.h"
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
+#include "src/v8threads.h"
#include "src/vm-state-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -73,6 +76,52 @@
namespace v8 {
namespace internal {
+// These are outside the Heap class so they can be forward-declared
+// in heap-write-barrier-inl.h.
+bool Heap_PageFlagsAreConsistent(HeapObject object) {
+ return Heap::PageFlagsAreConsistent(object);
+}
+
+void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
+ HeapObject value) {
+ Heap::GenerationalBarrierSlow(object, slot, value);
+}
+
+void Heap_MarkingBarrierSlow(HeapObject object, Address slot,
+ HeapObject value) {
+ Heap::MarkingBarrierSlow(object, slot, value);
+}
+
+void Heap_WriteBarrierForCodeSlow(Code host) {
+ Heap::WriteBarrierForCodeSlow(host);
+}
+
+void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
+ HeapObject object) {
+ Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
+}
+
+void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
+ HeapObject object) {
+ Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
+}
+
+void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
+ int offset, int length) {
+ Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
+}
+
+void Heap_MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
+ Heap::MarkingBarrierForElementsSlow(heap, object);
+}
+
+void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
+ HeapObject descriptor_array,
+ int number_of_own_descriptors) {
+ Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
+ number_of_own_descriptors);
+}
+
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
@@ -152,8 +201,12 @@ Heap::Heap()
RememberUnmappedPage(kNullAddress, false);
}
+Heap::~Heap() = default;
+
size_t Heap::MaxReserved() {
+ const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
return static_cast<size_t>(2 * max_semi_space_size_ +
+ kMaxNewLargeObjectSpaceSize +
max_old_generation_size_);
}
@@ -204,7 +257,8 @@ size_t Heap::CommittedMemoryOfUnmapper() {
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
+ return new_space_->CommittedMemory() + new_lo_space_->Size() +
+ CommittedOldGenerationMemory();
}
@@ -266,7 +320,7 @@ bool Heap::HasBeenSetUp() {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
- if (space != NEW_SPACE) {
+ if (space != NEW_SPACE && space != NEW_LO_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
@@ -284,7 +338,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Over-estimate the new space size using capacity to allow some slack.
- if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
+ if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
+ new_lo_space()->Size())) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -585,6 +640,7 @@ void Heap::GarbageCollectionPrologue() {
ephemeron_retainer_.clear();
retaining_root_.clear();
}
+ memory_allocator()->unmapper()->PrepareForGC();
}
size_t Heap::SizeOfObjects() {
@@ -942,32 +998,36 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_harmony_weak_refs) {
// TODO(marja): (spec): The exact condition on when to schedule the cleanup
- // task is unclear. This version schedules the cleanup task for a factory
- // whenever the GC has discovered new dirty WeakCells for it (at that point
- // it might have leftover dirty WeakCells since an earlier invocation of the
- // cleanup function didn't iterate through them). See
- // https://github.com/tc39/proposal-weakrefs/issues/34
+ // task is unclear. This version schedules the cleanup task for a
+ // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
+ // for it (at that point it might have leftover dirty WeakCells since an
+ // earlier invocation of the cleanup function didn't iterate through
+ // them). See https://github.com/tc39/proposal-weakrefs/issues/34
HandleScope handle_scope(isolate());
- while (
- !isolate()->heap()->dirty_js_weak_factories()->IsUndefined(isolate())) {
- // Enqueue one microtask per JSWeakFactory.
- Handle<JSWeakFactory> weak_factory(
- JSWeakFactory::cast(isolate()->heap()->dirty_js_weak_factories()),
+ while (!isolate()->heap()->dirty_js_finalization_groups()->IsUndefined(
+ isolate())) {
+ // Enqueue one microtask per JSFinalizationGroup.
+ Handle<JSFinalizationGroup> finalization_group(
+ JSFinalizationGroup::cast(
+ isolate()->heap()->dirty_js_finalization_groups()),
isolate());
- isolate()->heap()->set_dirty_js_weak_factories(weak_factory->next());
- weak_factory->set_next(ReadOnlyRoots(isolate()).undefined_value());
- Handle<NativeContext> context(weak_factory->native_context(), isolate());
+ isolate()->heap()->set_dirty_js_finalization_groups(
+ finalization_group->next());
+ finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
+ Handle<NativeContext> context(finalization_group->native_context(),
+ isolate());
// GC has no native context, but we use the creation context of the
- // JSWeakFactory for the EnqueueTask operation. This is consitent with the
- // Promise implementation, assuming the JSFactory creation context is the
- // "caller's context" in promise functions. An alternative would be to use
- // the native context of the cleanup function. This difference shouldn't
- // be observable from JavaScript, since we enter the native context of the
- // cleanup function before calling it. TODO(marja): Revisit when the spec
- // clarifies this. See also
+ // JSFinalizationGroup for the EnqueueTask operation. This is consitent
+ // with the Promise implementation, assuming the JSFinalizationGroup's
+ // creation context is the "caller's context" in promise functions. An
+ // alternative would be to use the native context of the cleanup
+ // function. This difference shouldn't be observable from JavaScript,
+ // since we enter the native context of the cleanup function before
+ // calling it. TODO(marja): Revisit when the spec clarifies this. See also
// https://github.com/tc39/proposal-weakrefs/issues/38 .
- Handle<WeakFactoryCleanupJobTask> task =
- isolate()->factory()->NewWeakFactoryCleanupJobTask(weak_factory);
+ Handle<FinalizationGroupCleanupJobTask> task =
+ isolate()->factory()->NewFinalizationGroupCleanupJobTask(
+ finalization_group);
context->microtask_queue()->EnqueueMicrotask(*task);
}
}
@@ -1169,6 +1229,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
+ new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace();
EagerlyFreeExternalMemory();
@@ -1243,7 +1304,7 @@ void Heap::ReportExternalMemoryPressure() {
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
- incremental_marking()->AdvanceIncrementalMarking(
+ incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
@@ -1268,7 +1329,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
- if (!CanExpandOldGeneration(new_space()->Capacity())) {
+ if (!CanExpandOldGeneration(new_space()->Capacity() +
+ new_lo_space()->Size())) {
InvokeNearHeapLimitCallback();
}
@@ -1429,7 +1491,6 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
- gc_idle_time_handler_->ResetNoProgressCounter();
StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
gc_callback_flags);
}
@@ -1644,6 +1705,8 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+ DisallowJavascriptExecution no_js(isolate());
+
size_t freed_global_handles = 0;
if (!IsYoungGenerationCollector(collector)) {
@@ -1668,6 +1731,7 @@ bool Heap::PerformGarbageCollection(
EmbedderHeapTracer::EmbedderStackState::kUnknown);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
@@ -1677,10 +1741,11 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
- size_t start_new_space_size = Heap::new_space()->Size();
+ size_t start_young_generation_size =
+ Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
{
- Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
+ Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
switch (collector) {
case MARK_COMPACTOR:
@@ -1700,7 +1765,8 @@ bool Heap::PerformGarbageCollection(
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size()))) {
+ CanExpandOldGeneration(new_space()->Size() +
+ new_lo_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
@@ -1716,14 +1782,14 @@ bool Heap::PerformGarbageCollection(
ProcessPretenuringFeedback();
}
- UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
+ UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
if (collector != MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
- start_new_space_size - SurvivedNewSpaceObjectSize());
+ start_young_generation_size - SurvivedYoungObjectSize());
}
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
@@ -1755,6 +1821,7 @@ bool Heap::PerformGarbageCollection(
gc_post_processing_depth_++;
{
AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
freed_global_handles +=
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, gc_callback_flags);
@@ -1804,6 +1871,7 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
@@ -1935,6 +2003,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
+ new_lo_space()->SetCapacity(new_space()->Capacity());
}
void Heap::EvacuateYoungGeneration() {
@@ -1943,7 +2012,8 @@ void Heap::EvacuateYoungGeneration() {
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
- DCHECK(CanExpandOldGeneration(new_space()->Size()));
+ DCHECK(
+ CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
@@ -1968,12 +2038,21 @@ void Heap::EvacuateYoungGeneration() {
new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
+ for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
+ LargePage* page = *it;
+ // Increment has to happen after we save the page, because it is going to
+ // be removed below.
+ it++;
+ lo_space()->PromoteNewLargeObject(page);
+ }
+
// Fix up special trackers.
- external_string_table_.PromoteAllNewSpaceStrings();
+ external_string_table_.PromoteYoung();
// GlobalHandles are updated in PostGarbageCollectonProcessing
- IncrementYoungSurvivorsCounter(new_space()->Size());
- IncrementPromotedObjectsSize(new_space()->Size());
+ size_t promoted = new_space()->Size() + new_lo_space()->Size();
+ IncrementYoungSurvivorsCounter(promoted);
+ IncrementPromotedObjectsSize(promoted);
IncrementSemiSpaceCopiedObjectSize(0);
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -2008,6 +2087,7 @@ void Heap::Scavenge() {
// We also flip the young generation large object space. All large objects
// will be in the from space.
new_lo_space()->Flip();
+ new_lo_space()->ResetPendingObject();
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -2061,33 +2141,54 @@ void Heap::ProtectUnprotectedMemoryChunks() {
}
bool Heap::ExternalStringTable::Contains(String string) {
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- if (new_space_strings_[i] == string) return true;
+ for (size_t i = 0; i < young_strings_.size(); ++i) {
+ if (young_strings_[i] == string) return true;
}
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- if (old_space_strings_[i] == string) return true;
+ for (size_t i = 0; i < old_strings_.size(); ++i) {
+ if (old_strings_[i] == string) return true;
}
return false;
}
-String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, FullObjectSlot p) {
- MapWord first_word = HeapObject::cast(*p)->map_word();
+void Heap::UpdateExternalString(String string, size_t old_payload,
+ size_t new_payload) {
+ DCHECK(string->IsExternalString());
+ Page* page = Page::FromHeapObject(string);
- if (!first_word.IsForwardingAddress()) {
- // Unreachable external string can be finalized.
- String string = String::cast(*p);
- if (!string->IsExternalString()) {
- // Original external string has been internalized.
- DCHECK(string->IsThinString());
+ if (old_payload > new_payload) {
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, old_payload - new_payload);
+ } else {
+ page->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, new_payload - old_payload);
+ }
+}
+
+String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
+ FullObjectSlot p) {
+ HeapObject obj = HeapObject::cast(*p);
+ MapWord first_word = obj->map_word();
+
+ String new_string;
+
+ if (InFromPage(obj)) {
+ if (!first_word.IsForwardingAddress()) {
+ // Unreachable external string can be finalized.
+ String string = String::cast(obj);
+ if (!string->IsExternalString()) {
+ // Original external string has been internalized.
+ DCHECK(string->IsThinString());
+ return String();
+ }
+ heap->FinalizeExternalString(string);
return String();
}
- heap->FinalizeExternalString(string);
- return String();
+ new_string = String::cast(first_word.ToForwardingAddress());
+ } else {
+ new_string = String::cast(obj);
}
// String is still reachable.
- String new_string = String::cast(first_word.ToForwardingAddress());
if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
return String();
@@ -2103,16 +2204,16 @@ String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
return new_string->IsExternalString() ? new_string : String();
}
-void Heap::ExternalStringTable::VerifyNewSpace() {
+void Heap::ExternalStringTable::VerifyYoung() {
#ifdef DEBUG
std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- String obj = String::cast(new_space_strings_[i]);
+ for (size_t i = 0; i < young_strings_.size(); ++i) {
+ String obj = String::cast(young_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
- DCHECK(mc->InNewSpace());
- DCHECK(heap_->InNewSpace(obj));
+ DCHECK(mc->InYoungGeneration());
+ DCHECK(heap_->InYoungGeneration(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
DCHECK(obj->IsExternalString());
// Note: we can have repeated elements in the table.
@@ -2131,12 +2232,12 @@ void Heap::ExternalStringTable::Verify() {
std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
- VerifyNewSpace();
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- String obj = String::cast(old_space_strings_[i]);
+ VerifyYoung();
+ for (size_t i = 0; i < old_strings_.size(); ++i) {
+ String obj = String::cast(old_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
- DCHECK(!mc->InNewSpace());
- DCHECK(!heap_->InNewSpace(obj));
+ DCHECK(!mc->InYoungGeneration());
+ DCHECK(!heap_->InYoungGeneration(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
DCHECK(obj->IsExternalString());
// Note: we can have repeated elements in the table.
@@ -2150,12 +2251,12 @@ void Heap::ExternalStringTable::Verify() {
#endif
}
-void Heap::ExternalStringTable::UpdateNewSpaceReferences(
+void Heap::ExternalStringTable::UpdateYoungReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
- if (new_space_strings_.empty()) return;
+ if (young_strings_.empty()) return;
- FullObjectSlot start(&new_space_strings_[0]);
- FullObjectSlot end(&new_space_strings_[new_space_strings_.size()]);
+ FullObjectSlot start(&young_strings_[0]);
+ FullObjectSlot end(&young_strings_[young_strings_.size()]);
FullObjectSlot last = start;
for (FullObjectSlot p = start; p < end; ++p) {
@@ -2165,67 +2266,66 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
DCHECK(target->IsExternalString());
- if (InNewSpace(target)) {
+ if (InYoungGeneration(target)) {
// String is still in new space. Update the table entry.
last.store(target);
++last;
} else {
// String got promoted. Move it to the old string list.
- old_space_strings_.push_back(target);
+ old_strings_.push_back(target);
}
}
DCHECK(last <= end);
- new_space_strings_.resize(last - start);
+ young_strings_.resize(last - start);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyNewSpace();
+ VerifyYoung();
}
#endif
}
-void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
- old_space_strings_.reserve(old_space_strings_.size() +
- new_space_strings_.size());
- std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
- std::back_inserter(old_space_strings_));
- new_space_strings_.clear();
+void Heap::ExternalStringTable::PromoteYoung() {
+ old_strings_.reserve(old_strings_.size() + young_strings_.size());
+ std::move(std::begin(young_strings_), std::end(young_strings_),
+ std::back_inserter(old_strings_));
+ young_strings_.clear();
}
-void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
- if (!new_space_strings_.empty()) {
+void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
+ if (!young_strings_.empty()) {
v->VisitRootPointers(
Root::kExternalStringsTable, nullptr,
- FullObjectSlot(&new_space_strings_[0]),
- FullObjectSlot(&new_space_strings_[new_space_strings_.size()]));
+ FullObjectSlot(&young_strings_[0]),
+ FullObjectSlot(&young_strings_[young_strings_.size()]));
}
}
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
- IterateNewSpaceStrings(v);
- if (!old_space_strings_.empty()) {
+ IterateYoung(v);
+ if (!old_strings_.empty()) {
v->VisitRootPointers(
Root::kExternalStringsTable, nullptr,
- FullObjectSlot(old_space_strings_.data()),
- FullObjectSlot(old_space_strings_.data() + old_space_strings_.size()));
+ FullObjectSlot(old_strings_.data()),
+ FullObjectSlot(old_strings_.data() + old_strings_.size()));
}
}
-void Heap::UpdateNewSpaceReferencesInExternalStringTable(
+void Heap::UpdateYoungReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
- external_string_table_.UpdateNewSpaceReferences(updater_func);
+ external_string_table_.UpdateYoungReferences(updater_func);
}
void Heap::ExternalStringTable::UpdateReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
- if (old_space_strings_.size() > 0) {
- FullObjectSlot start(old_space_strings_.data());
- FullObjectSlot end(old_space_strings_.data() + old_space_strings_.size());
+ if (old_strings_.size() > 0) {
+ FullObjectSlot start(old_strings_.data());
+ FullObjectSlot end(old_strings_.data() + old_strings_.size());
for (FullObjectSlot p = start; p < end; ++p)
p.store(updater_func(heap_, p));
}
- UpdateNewSpaceReferences(updater_func);
+ UpdateYoungReferences(updater_func);
}
void Heap::UpdateReferencesInExternalStringTable(
@@ -2347,13 +2447,20 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.IterateAll(&external_string_table_visitor);
}
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
- 0); // NOLINT
-STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
- 0); // NOLINT
+STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
+
+#ifdef V8_COMPRESS_POINTERS
+// TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
+// is only kTaggedSize aligned but we can keep using unaligned access since
+// both x64 and arm64 architectures (where pointer compression supported)
+// allow unaligned access to doubles.
+STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kTaggedSize));
+#else
+STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kDoubleAlignment));
+#endif
+
#ifdef V8_HOST_ARCH_32_BIT
-STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
- 0); // NOLINT
+STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
@@ -2492,20 +2599,7 @@ bool Heap::IsImmovable(HeapObject object) {
}
bool Heap::IsLargeObject(HeapObject object) {
- return IsLargeMemoryChunk(MemoryChunk::FromHeapObject(object));
-}
-
-bool Heap::IsLargeMemoryChunk(MemoryChunk* chunk) {
- return chunk->owner()->identity() == NEW_LO_SPACE ||
- chunk->owner()->identity() == LO_SPACE ||
- chunk->owner()->identity() == CODE_LO_SPACE;
-}
-
-bool Heap::IsInYoungGeneration(HeapObject object) {
- if (MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace()) {
- return !object->map_word().IsForwardingAddress();
- }
- return Heap::InNewSpace(object);
+ return MemoryChunk::FromHeapObject(object)->IsLargePage();
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -2534,7 +2628,7 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
- if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
+ if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
// Whitelist objects that definitely do not have pointers.
if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
@@ -2910,6 +3004,7 @@ void Heap::ReduceNewSpaceSize() {
((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) {
new_space_->Shrink();
+ new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace();
}
}
@@ -3107,31 +3202,26 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
GCIdleTimeHeapState heap_state,
double deadline_in_ms) {
bool result = false;
- switch (action.type) {
- case DONE:
+ switch (action) {
+ case GCIdleTimeAction::kDone:
result = true;
break;
- case DO_INCREMENTAL_STEP: {
- const double remaining_idle_time_in_ms =
- incremental_marking()->AdvanceIncrementalMarking(
- deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kTask);
- if (remaining_idle_time_in_ms > 0.0) {
- FinalizeIncrementalMarkingIfComplete(
- GarbageCollectionReason::kFinalizeMarkingViaTask);
- }
+ case GCIdleTimeAction::kIncrementalStep: {
+ incremental_marking()->AdvanceWithDeadline(
+ deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kTask);
+ FinalizeIncrementalMarkingIfComplete(
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
result = incremental_marking()->IsStopped();
break;
}
- case DO_FULL_GC: {
+ case GCIdleTimeAction::kFullGC: {
DCHECK_LT(0, contexts_disposed_);
HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
break;
}
- case DO_NOTHING:
- break;
}
return result;
@@ -3147,14 +3237,23 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
contexts_disposed_ = 0;
- if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
- FLAG_trace_idle_notification_verbose) {
+ if (FLAG_trace_idle_notification) {
isolate_->PrintWithTimestamp(
"Idle notification: requested idle time %.2f ms, used idle time %.2f "
"ms, deadline usage %.2f ms [",
idle_time_in_ms, idle_time_in_ms - deadline_difference,
deadline_difference);
- action.Print();
+ switch (action) {
+ case GCIdleTimeAction::kDone:
+ PrintF("done");
+ break;
+ case GCIdleTimeAction::kIncrementalStep:
+ PrintF("incremental step");
+ break;
+ case GCIdleTimeAction::kFullGC:
+ PrintF("full GC");
+ break;
+ }
PrintF("]");
if (FLAG_trace_idle_notification_verbose) {
PrintF("[");
@@ -3565,6 +3664,12 @@ void Heap::Verify() {
VerifyPointersVisitor visitor(this);
IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ if (!isolate()->context().is_null() &&
+ !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
+ NormalizedMapCache::cast(*isolate()->normalized_map_cache())
+ ->NormalizedMapCacheVerify(isolate());
+ }
+
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
@@ -3645,10 +3750,10 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
: SlotVerifyingVisitor(untyped, typed) {}
bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
- DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
- Heap::InToSpace(target));
- return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
- !Heap::InNewSpace(host);
+ DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
+ Heap::InToPage(target));
+ return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
+ !Heap::InYoungGeneration(host);
}
};
@@ -3684,7 +3789,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
Address end = start + object->Size();
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address> > typed_old_to_new;
- if (!InNewSpace(object)) {
+ if (!InYoungGeneration(object)) {
store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
@@ -3888,10 +3993,10 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
break;
case VISIT_ALL_IN_SCAVENGE:
case VISIT_ALL_IN_MINOR_MC_MARK:
- isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
+ isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- isolate_->global_handles()->IterateAllNewSpaceRoots(v);
+ isolate_->global_handles()->IterateAllYoungRoots(v);
break;
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
@@ -3904,7 +4009,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// serializer. Values referenced by eternal handles need to be added manually.
if (mode != VISIT_FOR_SERIALIZATION) {
if (isMinorGC) {
- isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+ isolate_->eternal_handles()->IterateYoungRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
}
@@ -4320,10 +4425,10 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
return heap_object;
}
-HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
+HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType type,
AllocationAlignment alignment) {
HeapObject result;
- AllocationResult alloc = AllocateRaw(size, space, alignment);
+ AllocationResult alloc = AllocateRaw(size, type, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4332,7 +4437,7 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
- alloc = AllocateRaw(size, space, alignment);
+ alloc = AllocateRaw(size, type, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4341,17 +4446,17 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
return HeapObject();
}
-HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
+HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType type,
AllocationAlignment alignment) {
AllocationResult alloc;
- HeapObject result = AllocateRawWithLightRetry(size, space, alignment);
+ HeapObject result = AllocateRawWithLightRetry(size, type, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
- alloc = AllocateRaw(size, space, alignment);
+ alloc = AllocateRaw(size, type, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4396,7 +4501,7 @@ HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
return HeapObject();
}
-void Heap::SetUp() {
+void Heap::SetUp(ReadOnlyHeap* ro_heap) {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
#endif
@@ -4409,42 +4514,46 @@ void Heap::SetUp() {
// and old_generation_size_ otherwise.
if (!configured_) ConfigureHeapDefault();
+ DCHECK_NOT_NULL(ro_heap);
+ read_only_heap_ = ro_heap;
+
mmap_region_base_ =
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
- memory_allocator_ =
- new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
+ memory_allocator_.reset(
+ new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
- store_buffer_ = new StoreBuffer(this);
+ store_buffer_.reset(new StoreBuffer(this));
- heap_controller_ = new HeapController(this);
+ heap_controller_.reset(new HeapController(this));
- mark_compact_collector_ = new MarkCompactCollector(this);
+ mark_compact_collector_.reset(new MarkCompactCollector(this));
- scavenger_collector_ = new ScavengerCollector(this);
+ scavenger_collector_.reset(new ScavengerCollector(this));
- incremental_marking_ =
+ incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
- mark_compact_collector_->weak_objects());
+ mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
- concurrent_marking_ = new ConcurrentMarking(
+ concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(),
- mark_compact_collector_->weak_objects(), marking_worklist->embedder());
+ mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
} else {
- concurrent_marking_ =
- new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
+ concurrent_marking_.reset(
+ new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
- space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
+ space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
+ DCHECK_NOT_NULL(read_only_space_);
space_[NEW_SPACE] = new_space_ =
new NewSpace(this, memory_allocator_->data_page_allocator(),
initial_semispace_size_, max_semi_space_size_);
@@ -4452,7 +4561,8 @@ void Heap::SetUp() {
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
- space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
+ space_[NEW_LO_SPACE] = new_lo_space_ =
+ new NewLargeObjectSpace(this, new_space_->Capacity());
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -4460,20 +4570,20 @@ void Heap::SetUp() {
deferred_counters_[i] = 0;
}
- tracer_ = new GCTracer(this);
+ tracer_.reset(new GCTracer(this));
#ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
#else
minor_mark_compact_collector_ = nullptr;
#endif // ENABLE_MINOR_MC
- array_buffer_collector_ = new ArrayBufferCollector(this);
- gc_idle_time_handler_ = new GCIdleTimeHandler();
- memory_reducer_ = new MemoryReducer(this);
+ array_buffer_collector_.reset(new ArrayBufferCollector(this));
+ gc_idle_time_handler_.reset(new GCIdleTimeHandler());
+ memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(FLAG_gc_stats)) {
- live_object_stats_ = new ObjectStats(this);
- dead_object_stats_ = new ObjectStats(this);
+ live_object_stats_.reset(new ObjectStats(this));
+ dead_object_stats_.reset(new ObjectStats(this));
}
- local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
+ local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -4488,10 +4598,10 @@ void Heap::SetUp() {
#endif // ENABLE_MINOR_MC
if (FLAG_idle_time_scavenge) {
- scavenge_job_ = new ScavengeJob();
- idle_scavenge_observer_ = new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
- new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ scavenge_job_.reset(new ScavengeJob());
+ idle_scavenge_observer_.reset(new IdleScavengeObserver(
+ *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
+ new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
SetGetExternallyAllocatedMemoryInBytesCallback(
@@ -4603,7 +4713,8 @@ void Heap::NotifyOldGenerationExpansion() {
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
- kMemoryReducerActivationThreshold) {
+ kMemoryReducerActivationThreshold &&
+ FLAG_memory_reducer_for_small_heaps) {
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -4661,11 +4772,9 @@ void Heap::TearDown() {
}
if (FLAG_idle_time_scavenge) {
- new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
- delete idle_scavenge_observer_;
- idle_scavenge_observer_ = nullptr;
- delete scavenge_job_;
- scavenge_job_ = nullptr;
+ new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
+ idle_scavenge_observer_.reset();
+ scavenge_job_.reset();
}
if (FLAG_stress_marking > 0) {
@@ -4680,15 +4789,11 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr;
}
- if (heap_controller_ != nullptr) {
- delete heap_controller_;
- heap_controller_ = nullptr;
- }
+ heap_controller_.reset();
- if (mark_compact_collector_ != nullptr) {
+ if (mark_compact_collector_) {
mark_compact_collector_->TearDown();
- delete mark_compact_collector_;
- mark_compact_collector_ = nullptr;
+ mark_compact_collector_.reset();
}
#ifdef ENABLE_MINOR_MC
@@ -4699,43 +4804,22 @@ void Heap::TearDown() {
}
#endif // ENABLE_MINOR_MC
- if (scavenger_collector_ != nullptr) {
- delete scavenger_collector_;
- scavenger_collector_ = nullptr;
- }
-
- if (array_buffer_collector_ != nullptr) {
- delete array_buffer_collector_;
- array_buffer_collector_ = nullptr;
- }
-
- delete incremental_marking_;
- incremental_marking_ = nullptr;
-
- delete concurrent_marking_;
- concurrent_marking_ = nullptr;
+ scavenger_collector_.reset();
+ array_buffer_collector_.reset();
+ incremental_marking_.reset();
+ concurrent_marking_.reset();
- delete gc_idle_time_handler_;
- gc_idle_time_handler_ = nullptr;
+ gc_idle_time_handler_.reset();
if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown();
- delete memory_reducer_;
- memory_reducer_ = nullptr;
+ memory_reducer_.reset();
}
- if (live_object_stats_ != nullptr) {
- delete live_object_stats_;
- live_object_stats_ = nullptr;
- }
-
- if (dead_object_stats_ != nullptr) {
- delete dead_object_stats_;
- dead_object_stats_ = nullptr;
- }
+ live_object_stats_.reset();
+ dead_object_stats_.reset();
- delete local_embedder_heap_tracer_;
- local_embedder_heap_tracer_ = nullptr;
+ local_embedder_heap_tracer_.reset();
external_string_table_.TearDown();
@@ -4744,10 +4828,11 @@ void Heap::TearDown() {
// store.
ArrayBufferTracker::TearDown(this);
- delete tracer_;
- tracer_ = nullptr;
+ tracer_.reset();
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ read_only_heap_->OnHeapTearDown();
+ space_[RO_SPACE] = read_only_space_ = nullptr;
+ for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
}
@@ -4763,11 +4848,8 @@ void Heap::TearDown() {
}
strong_roots_list_ = nullptr;
- delete store_buffer_;
- store_buffer_ = nullptr;
-
- delete memory_allocator_;
- memory_allocator_ = nullptr;
+ store_buffer_.reset();
+ memory_allocator_.reset();
}
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
@@ -4998,8 +5080,9 @@ Address Heap::store_buffer_overflow_function_address() {
}
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
+ DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
- if (!page->InNewSpace()) {
+ if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address());
}
@@ -5007,7 +5090,8 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
#ifdef DEBUG
void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
- if (InNewSpace(object)) return;
+ DCHECK(!IsLargeObject(object));
+ if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
@@ -5020,7 +5104,8 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
- if (!page->InNewSpace()) {
+ DCHECK(!page->IsLargePage());
+ if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end);
}
@@ -5247,11 +5332,11 @@ void Heap::UpdateTotalGCTime(double duration) {
}
}
-void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
+void Heap::ExternalStringTable::CleanUpYoung() {
int last = 0;
Isolate* isolate = heap_->isolate();
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object o = new_space_strings_[i];
+ for (size_t i = 0; i < young_strings_.size(); ++i) {
+ Object o = young_strings_[i];
if (o->IsTheHole(isolate)) {
continue;
}
@@ -5259,21 +5344,21 @@ void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
// will be processed. Re-processing it will add a duplicate to the vector.
if (o->IsThinString()) continue;
DCHECK(o->IsExternalString());
- if (InNewSpace(o)) {
- new_space_strings_[last++] = o;
+ if (InYoungGeneration(o)) {
+ young_strings_[last++] = o;
} else {
- old_space_strings_.push_back(o);
+ old_strings_.push_back(o);
}
}
- new_space_strings_.resize(last);
+ young_strings_.resize(last);
}
void Heap::ExternalStringTable::CleanUpAll() {
- CleanUpNewSpaceStrings();
+ CleanUpYoung();
int last = 0;
Isolate* isolate = heap_->isolate();
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object o = old_space_strings_[i];
+ for (size_t i = 0; i < old_strings_.size(); ++i) {
+ Object o = old_strings_[i];
if (o->IsTheHole(isolate)) {
continue;
}
@@ -5281,10 +5366,10 @@ void Heap::ExternalStringTable::CleanUpAll() {
// will be processed. Re-processing it will add a duplicate to the vector.
if (o->IsThinString()) continue;
DCHECK(o->IsExternalString());
- DCHECK(!InNewSpace(o));
- old_space_strings_[last++] = o;
+ DCHECK(!InYoungGeneration(o));
+ old_strings_[last++] = o;
}
- old_space_strings_.resize(last);
+ old_strings_.resize(last);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -5293,20 +5378,20 @@ void Heap::ExternalStringTable::CleanUpAll() {
}
void Heap::ExternalStringTable::TearDown() {
- for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object o = new_space_strings_[i];
+ for (size_t i = 0; i < young_strings_.size(); ++i) {
+ Object o = young_strings_[i];
// Dont finalize thin strings.
if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
- new_space_strings_.clear();
- for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object o = old_space_strings_[i];
+ young_strings_.clear();
+ for (size_t i = 0; i < old_strings_.size(); ++i) {
+ Object o = old_strings_[i];
// Dont finalize thin strings.
if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
- old_space_strings_.clear();
+ old_strings_.clear();
}
@@ -5358,22 +5443,23 @@ void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
set_interpreter_entry_trampoline_for_profiling(code);
}
-void Heap::AddDirtyJSWeakFactory(
- JSWeakFactory weak_factory,
+void Heap::AddDirtyJSFinalizationGroup(
+ JSFinalizationGroup finalization_group,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot) {
- DCHECK(dirty_js_weak_factories()->IsUndefined(isolate()) ||
- dirty_js_weak_factories()->IsJSWeakFactory());
- DCHECK(weak_factory->next()->IsUndefined(isolate()));
- DCHECK(!weak_factory->scheduled_for_cleanup());
- weak_factory->set_scheduled_for_cleanup(true);
- weak_factory->set_next(dirty_js_weak_factories());
- gc_notify_updated_slot(weak_factory,
- weak_factory.RawField(JSWeakFactory::kNextOffset),
- dirty_js_weak_factories());
- set_dirty_js_weak_factories(weak_factory);
+ DCHECK(dirty_js_finalization_groups()->IsUndefined(isolate()) ||
+ dirty_js_finalization_groups()->IsJSFinalizationGroup());
+ DCHECK(finalization_group->next()->IsUndefined(isolate()));
+ DCHECK(!finalization_group->scheduled_for_cleanup());
+ finalization_group->set_scheduled_for_cleanup(true);
+ finalization_group->set_next(dirty_js_finalization_groups());
+ gc_notify_updated_slot(
+ finalization_group,
+ finalization_group.RawField(JSFinalizationGroup::kNextOffset),
+ dirty_js_finalization_groups());
+ set_dirty_js_finalization_groups(finalization_group);
// Roots are rescanned after objects are moved, so no need to record a slot
- // for the root pointing to the first JSWeakFactory.
+ // for the root pointing to the first JSFinalizationGroup.
}
void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
@@ -5515,6 +5601,11 @@ void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
void VerifyPointersVisitor::VerifyPointers(HeapObject host,
MaybeObjectSlot start,
MaybeObjectSlot end) {
+ // If this DCHECK fires then you probably added a pointer field
+ // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
+ // this by moving that object to POINTER_VISITOR_ID_LIST.
+ DCHECK_EQ(ObjectFields::kMaybePointers,
+ Map::ObjectFieldsFrom(host->map()->visitor_id()));
VerifyPointersImpl(start, end);
}
@@ -5572,10 +5663,10 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
void Heap::CreateObjectStats() {
if (V8_LIKELY(FLAG_gc_stats == 0)) return;
if (!live_object_stats_) {
- live_object_stats_ = new ObjectStats(this);
+ live_object_stats_.reset(new ObjectStats(this));
}
if (!dead_object_stats_) {
- dead_object_stats_ = new ObjectStats(this);
+ dead_object_stats_.reset(new ObjectStats(this));
}
}
@@ -5676,7 +5767,7 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
for (int i = 0; i < length; i++) {
- if (!InNewSpace(array->get(offset + i))) continue;
+ if (!InYoungGeneration(array->get(offset + i))) continue;
heap->store_buffer()->InsertEntry(
array->RawFieldOfElementAt(offset + i).address());
}
@@ -5684,7 +5775,7 @@ void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
- DCHECK(InNewSpace(object));
+ DCHECK(InYoungGeneration(object));
Page* source_page = Page::FromHeapObject(host);
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
@@ -5751,7 +5842,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
const bool generation_consistency =
chunk->owner()->identity() != NEW_SPACE ||
- (chunk->InNewSpace() && slim_chunk->InNewSpace());
+ (chunk->InYoungGeneration() && slim_chunk->InYoungGeneration());
const bool marking_consistency =
!heap->incremental_marking()->IsMarking() ||
(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
@@ -5763,18 +5854,21 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
heap_internals::MemoryChunk::kMarkingBit,
"Incremental marking flag inconsistent");
-static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
- heap_internals::MemoryChunk::kFromSpaceBit,
- "From space flag inconsistent");
-static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
- heap_internals::MemoryChunk::kToSpaceBit,
- "To space flag inconsistent");
+static_assert(MemoryChunk::Flag::FROM_PAGE ==
+ heap_internals::MemoryChunk::kFromPageBit,
+ "From page flag inconsistent");
+static_assert(MemoryChunk::Flag::TO_PAGE ==
+ heap_internals::MemoryChunk::kToPageBit,
+ "To page flag inconsistent");
static_assert(MemoryChunk::kFlagsOffset ==
heap_internals::MemoryChunk::kFlagsOffset,
"Flag offset inconsistent");
static_assert(MemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset,
"Heap offset inconsistent");
+static_assert(MemoryChunk::kOwnerOffset ==
+ heap_internals::MemoryChunk::kOwnerOffset,
+ "Owner offset inconsistent");
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index d75d450c23..7f687e8fdf 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -58,7 +58,6 @@ class ArrayBufferCollector;
class ArrayBufferTracker;
class CodeLargeObjectSpace;
class ConcurrentMarking;
-class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
@@ -68,7 +67,7 @@ class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
-class JSWeakFactory;
+class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
class MemoryAllocator;
class MemoryReducer;
@@ -77,6 +76,7 @@ class ObjectIterator;
class ObjectStats;
class Page;
class PagedSpace;
+class ReadOnlyHeap;
class RootVisitor;
class ScavengeJob;
class Scavenger;
@@ -145,6 +145,8 @@ enum class YoungGenerationHandling {
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
+enum class GCIdleTimeAction : uint8_t;
+
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
@@ -236,10 +238,11 @@ class Heap {
#endif
// Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB =
- 1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
- static const size_t kMaxSemiSpaceSizeInKB =
- 16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
+ static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
+ static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
+
+ STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -379,10 +382,7 @@ class Heap {
bool IsImmovable(HeapObject object);
- bool IsLargeObject(HeapObject object);
- bool IsLargeMemoryChunk(MemoryChunk* chunk);
-
- bool IsInYoungGeneration(HeapObject object);
+ static bool IsLargeObject(HeapObject object);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
@@ -512,8 +512,6 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- inline uint64_t HashSeed();
-
inline int NextScriptId();
inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
@@ -583,7 +581,7 @@ class Heap {
// Prepares the heap, setting up memory areas that are needed in the isolate
// without actually creating any objects.
- void SetUp();
+ void SetUp(ReadOnlyHeap* ro_heap);
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
@@ -629,14 +627,16 @@ class Heap {
// Getters to other components. ==============================================
// ===========================================================================
- GCTracer* tracer() { return tracer_; }
+ ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
- MemoryAllocator* memory_allocator() { return memory_allocator_; }
+ GCTracer* tracer() { return tracer_.get(); }
+
+ MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
- return mark_compact_collector_;
+ return mark_compact_collector_.get();
}
MinorMarkCompactCollector* minor_mark_compact_collector() {
@@ -644,7 +644,7 @@ class Heap {
}
ArrayBufferCollector* array_buffer_collector() {
- return array_buffer_collector_;
+ return array_buffer_collector_.get();
}
// ===========================================================================
@@ -664,6 +664,7 @@ class Heap {
V8_INLINE void SetRootStringTable(StringTable value);
V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
V8_INLINE void SetMessageListeners(TemplateList value);
+ V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
// Set the stack limit in the roots table. Some architectures generate
// code that looks here, because it is faster than loading from the static
@@ -687,9 +688,9 @@ class Heap {
// See also: FLAG_interpreted_frames_native_stack.
void SetInterpreterEntryTrampolineForProfiling(Code code);
- // Add weak_factory into the dirty_js_weak_factories list.
- void AddDirtyJSWeakFactory(
- JSWeakFactory weak_factory,
+ // Add finalization_group into the dirty_js_finalization_groups list.
+ void AddDirtyJSFinalizationGroup(
+ JSFinalizationGroup finalization_group,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
@@ -834,13 +835,15 @@ class Heap {
Reservation* reservations, const std::vector<HeapObject>& large_objects,
const std::vector<Address>& maps);
- IncrementalMarking* incremental_marking() { return incremental_marking_; }
+ IncrementalMarking* incremental_marking() {
+ return incremental_marking_.get();
+ }
// ===========================================================================
// Concurrent marking API. ===================================================
// ===========================================================================
- ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }
+ ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
@@ -878,7 +881,7 @@ class Heap {
// ===========================================================================
LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
- return local_embedder_heap_tracer_;
+ return local_embedder_heap_tracer_.get();
}
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
@@ -897,14 +900,14 @@ class Heap {
// Called when a string's resource is changed. The size of the payload is sent
// as argument of the method.
- inline void UpdateExternalString(String string, size_t old_payload,
- size_t new_payload);
+ void UpdateExternalString(String string, size_t old_payload,
+ size_t new_payload);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
inline void FinalizeExternalString(String string);
- static String UpdateNewSpaceReferenceInExternalStringTableEntry(
+ static String UpdateYoungReferenceInExternalStringTableEntry(
Heap* heap, FullObjectSlot pointer);
// ===========================================================================
@@ -912,15 +915,15 @@ class Heap {
// ===========================================================================
// Returns whether the object resides in new space.
- static inline bool InNewSpace(Object object);
- static inline bool InNewSpace(MaybeObject object);
- static inline bool InNewSpace(HeapObject heap_object);
- static inline bool InFromSpace(Object object);
- static inline bool InFromSpace(MaybeObject object);
- static inline bool InFromSpace(HeapObject heap_object);
- static inline bool InToSpace(Object object);
- static inline bool InToSpace(MaybeObject object);
- static inline bool InToSpace(HeapObject heap_object);
+ static inline bool InYoungGeneration(Object object);
+ static inline bool InYoungGeneration(MaybeObject object);
+ static inline bool InYoungGeneration(HeapObject heap_object);
+ static inline bool InFromPage(Object object);
+ static inline bool InFromPage(MaybeObject object);
+ static inline bool InFromPage(HeapObject heap_object);
+ static inline bool InToPage(Object object);
+ static inline bool InToPage(MaybeObject object);
+ static inline bool InToPage(HeapObject heap_object);
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
@@ -1053,7 +1056,7 @@ class Heap {
return semi_space_copied_object_size_;
}
- inline size_t SurvivedNewSpaceObjectSize() {
+ inline size_t SurvivedYoungObjectSize() {
return promoted_objects_size_ + semi_space_copied_object_size_;
}
@@ -1295,32 +1298,32 @@ class Heap {
bool Contains(String string);
void IterateAll(RootVisitor* v);
- void IterateNewSpaceStrings(RootVisitor* v);
- void PromoteAllNewSpaceStrings();
+ void IterateYoung(RootVisitor* v);
+ void PromoteYoung();
// Restores internal invariant and gets rid of collected strings. Must be
// called after each Iterate*() that modified the strings.
void CleanUpAll();
- void CleanUpNewSpaceStrings();
+ void CleanUpYoung();
// Finalize all registered external strings and clear tables.
void TearDown();
- void UpdateNewSpaceReferences(
+ void UpdateYoungReferences(
Heap::ExternalStringTableUpdaterCallback updater_func);
void UpdateReferences(
Heap::ExternalStringTableUpdaterCallback updater_func);
private:
void Verify();
- void VerifyNewSpace();
+ void VerifyYoung();
Heap* const heap_;
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- std::vector<Object> new_space_strings_;
- std::vector<Object> old_space_strings_;
+ // To speed up scavenge collections young string are kept separate from old
+ // strings.
+ std::vector<Object> young_strings_;
+ std::vector<Object> old_strings_;
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
@@ -1377,6 +1380,7 @@ class Heap {
static const int kInitialFeedbackCapacity = 256;
Heap();
+ ~Heap();
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
@@ -1392,6 +1396,36 @@ class Heap {
}
}
+ // TODO(hpayer): Remove this translation function as soon as all code is
+ // converted to use AllocationType. Also remove PretenureFlag and use
+ // Allocation Type instead.
+ static AllocationType SelectType(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE:
+ return AllocationType::kYoung;
+ case NEW_LO_SPACE:
+ return AllocationType::kYoung;
+ case OLD_SPACE:
+ return AllocationType::kOld;
+ case LO_SPACE:
+ return AllocationType::kOld;
+ case CODE_SPACE:
+ return AllocationType::kCode;
+ case CODE_LO_SPACE:
+ return AllocationType::kCode;
+ case MAP_SPACE:
+ return AllocationType::kMap;
+ case RO_SPACE:
+ return AllocationType::kReadOnly;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static bool IsRegularObjectAllocation(AllocationType type) {
+ return AllocationType::kYoung == type || AllocationType::kOld == type;
+ }
+
static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
return 0;
}
@@ -1400,7 +1434,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- StoreBuffer* store_buffer() { return store_buffer_; }
+ StoreBuffer* store_buffer() { return store_buffer_.get(); }
void set_current_gc_flags(int flags) {
current_gc_flags_ = flags;
@@ -1557,7 +1591,7 @@ class Heap {
void Scavenge();
void EvacuateYoungGeneration();
- void UpdateNewSpaceReferencesInExternalStringTable(
+ void UpdateYoungReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void UpdateReferencesInExternalStringTable(
@@ -1621,8 +1655,8 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
- HeapController* heap_controller() { return heap_controller_; }
- MemoryReducer* memory_reducer() { return memory_reducer_; }
+ HeapController* heap_controller() { return heap_controller_.get(); }
+ MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
// This constant limits the effect of load RAIL mode on GC.
@@ -1681,26 +1715,26 @@ class Heap {
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationSpace space,
+ int size_in_bytes, AllocationType type,
AllocationAlignment aligment = kWordAligned);
- // This method will try to perform an allocation of a given size in a given
- // space. If the allocation fails, a regular full garbage collection is
- // triggered and the allocation is retried. This is performed multiple times.
- // If after that retry procedure the allocation still fails nullptr is
+ // This method will try to perform an allocation of a given size of a given
+ // AllocationType. If the allocation fails, a regular full garbage collection
+ // is triggered and the allocation is retried. This is performed multiple
+ // times. If after that retry procedure the allocation still fails nullptr is
// returned.
HeapObject AllocateRawWithLightRetry(
- int size, AllocationSpace space,
+ int size, AllocationType type,
AllocationAlignment alignment = kWordAligned);
- // This method will try to perform an allocation of a given size in a given
- // space. If the allocation fails, a regular full garbage collection is
- // triggered and the allocation is retried. This is performed multiple times.
- // If after that retry procedure the allocation still fails a "hammer"
+ // This method will try to perform an allocation of a given size of a given
+ // AllocationType. If the allocation fails, a regular full garbage collection
+ // is triggered and the allocation is retried. This is performed multiple
+ // times. If after that retry procedure the allocation still fails a "hammer"
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
HeapObject AllocateRawWithRetryOrFail(
- int size, AllocationSpace space,
+ int size, AllocationType type,
AllocationAlignment alignment = kWordAligned);
HeapObject AllocateRawCodeInLargeObjectSpace(int size);
@@ -1788,6 +1822,8 @@ class Heap {
// and after context disposal.
int number_of_disposed_maps_ = 0;
+ ReadOnlyHeap* read_only_heap_ = nullptr;
+
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
@@ -1900,23 +1936,23 @@ class Heap {
// Last time a garbage collection happened.
double last_gc_time_ = 0.0;
- GCTracer* tracer_ = nullptr;
- MarkCompactCollector* mark_compact_collector_ = nullptr;
+ std::unique_ptr<GCTracer> tracer_;
+ std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
- ScavengerCollector* scavenger_collector_ = nullptr;
- ArrayBufferCollector* array_buffer_collector_ = nullptr;
- MemoryAllocator* memory_allocator_ = nullptr;
- StoreBuffer* store_buffer_ = nullptr;
- HeapController* heap_controller_ = nullptr;
- IncrementalMarking* incremental_marking_ = nullptr;
- ConcurrentMarking* concurrent_marking_ = nullptr;
- GCIdleTimeHandler* gc_idle_time_handler_ = nullptr;
- MemoryReducer* memory_reducer_ = nullptr;
- ObjectStats* live_object_stats_ = nullptr;
- ObjectStats* dead_object_stats_ = nullptr;
- ScavengeJob* scavenge_job_ = nullptr;
- AllocationObserver* idle_scavenge_observer_ = nullptr;
- LocalEmbedderHeapTracer* local_embedder_heap_tracer_ = nullptr;
+ std::unique_ptr<ScavengerCollector> scavenger_collector_;
+ std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
+ std::unique_ptr<MemoryAllocator> memory_allocator_;
+ std::unique_ptr<StoreBuffer> store_buffer_;
+ std::unique_ptr<HeapController> heap_controller_;
+ std::unique_ptr<IncrementalMarking> incremental_marking_;
+ std::unique_ptr<ConcurrentMarking> concurrent_marking_;
+ std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
+ std::unique_ptr<MemoryReducer> memory_reducer_;
+ std::unique_ptr<ObjectStats> live_object_stats_;
+ std::unique_ptr<ObjectStats> dead_object_stats_;
+ std::unique_ptr<ScavengeJob> scavenge_job_;
+ std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
+ std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
StrongRootsList* strong_roots_list_ = nullptr;
// This counter is increased before each GC and never reset.
@@ -2008,7 +2044,6 @@ class Heap {
friend class AlwaysAllocateScope;
friend class ArrayBufferCollector;
friend class ConcurrentMarking;
- friend class EphemeronHashTableMarkingTask;
friend class GCCallbacksScope;
friend class GCTracer;
friend class MemoryController;
@@ -2023,6 +2058,7 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
+ friend class NewLargeObjectSpace;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
@@ -2089,6 +2125,7 @@ class HeapStats {
class AlwaysAllocateScope {
public:
+ explicit inline AlwaysAllocateScope(Heap* heap);
explicit inline AlwaysAllocateScope(Isolate* isolate);
inline ~AlwaysAllocateScope();
@@ -2132,7 +2169,7 @@ class CodePageMemoryModificationScope {
// Disallow any GCs inside this scope, as a relocation of the underlying
// object would change the {MemoryChunk} that this scope targets.
- DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
};
// Visitor class to verify interior pointers in spaces that do not contain
@@ -2232,7 +2269,7 @@ class HeapIterator {
private:
HeapObject NextObject();
- DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
Heap* heap_;
HeapObjectsFiltering filtering_;
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 836b491d8f..fe90dafcfa 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -18,15 +18,16 @@ namespace internal {
class IncrementalMarkingJob::Task : public CancelableTask {
public:
- static void Step(Heap* heap,
- EmbedderHeapTracer::EmbedderStackState stack_state);
+ static StepResult Step(Heap* heap,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
Task(Isolate* isolate, IncrementalMarkingJob* job,
- EmbedderHeapTracer::EmbedderStackState stack_state)
+ EmbedderHeapTracer::EmbedderStackState stack_state, TaskType task_type)
: CancelableTask(isolate),
isolate_(isolate),
job_(job),
- stack_state_(stack_state) {}
+ stack_state_(stack_state),
+ task_type_(task_type) {}
// CancelableTask overrides.
void RunInternal() override;
@@ -37,6 +38,7 @@ class IncrementalMarkingJob::Task : public CancelableTask {
Isolate* const isolate_;
IncrementalMarkingJob* const job_;
const EmbedderHeapTracer::EmbedderStackState stack_state_;
+ const TaskType task_type_;
};
void IncrementalMarkingJob::Start(Heap* heap) {
@@ -44,30 +46,46 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
-void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
- if (!task_pending_ && !heap->IsTearingDown()) {
+void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
+ if (!IsTaskPending(task_type) && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
- task_pending_ = true;
+ SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
- if (taskrunner->NonNestableTasksEnabled()) {
- taskrunner->PostNonNestableTask(base::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kEmpty));
+ if (task_type == TaskType::kNormal) {
+ if (taskrunner->NonNestableTasksEnabled()) {
+ taskrunner->PostNonNestableTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
+ } else {
+ taskrunner->PostTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
+ }
} else {
- taskrunner->PostTask(base::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kUnknown));
+ if (taskrunner->NonNestableDelayedTasksEnabled()) {
+ taskrunner->PostNonNestableDelayedTask(
+ base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
+ kDelayInSeconds);
+ } else {
+ taskrunner->PostDelayedTask(
+ base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
+ kDelayInSeconds);
+ }
}
}
}
-void IncrementalMarkingJob::Task::Step(
+StepResult IncrementalMarkingJob::Task::Step(
Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
- heap->incremental_marking()->AdvanceIncrementalMarking(
+ StepResult result = heap->incremental_marking()->AdvanceWithDeadline(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
{
@@ -76,6 +94,7 @@ void IncrementalMarkingJob::Task::Step(
heap->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
+ return result;
}
void IncrementalMarkingJob::Task::RunInternal() {
@@ -95,12 +114,14 @@ void IncrementalMarkingJob::Task::RunInternal() {
// Clear this flag after StartIncrementalMarking call to avoid
// scheduling a new task when startining incremental marking.
- job_->task_pending_ = false;
+ job_->SetTaskPending(task_type_, false);
if (!incremental_marking->IsStopped()) {
- Step(heap, stack_state_);
+ StepResult step_result = Step(heap, stack_state_);
if (!incremental_marking->IsStopped()) {
- job_->ScheduleTask(heap);
+ job_->ScheduleTask(heap, step_result == StepResult::kNoImmediateWork
+ ? TaskType::kDelayed
+ : TaskType::kNormal);
}
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index a2202c7504..277a3955c0 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -18,18 +18,32 @@ class Isolate;
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
- IncrementalMarkingJob() = default;
+ enum class TaskType { kNormal, kDelayed };
- bool TaskPending() const { return task_pending_; }
+ IncrementalMarkingJob() V8_NOEXCEPT = default;
void Start(Heap* heap);
- void ScheduleTask(Heap* heap);
+ void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
private:
class Task;
-
- bool task_pending_ = false;
+ static constexpr double kDelayInSeconds = 10.0 / 1000.0;
+
+ bool IsTaskPending(TaskType task_type) {
+ return task_type == TaskType::kNormal ? normal_task_pending_
+ : delayed_task_pending_;
+ }
+ void SetTaskPending(TaskType task_type, bool value) {
+ if (task_type == TaskType::kNormal) {
+ normal_task_pending_ = value;
+ } else {
+ delayed_task_pending_ = value;
+ }
+ }
+
+ bool normal_task_pending_ = false;
+ bool delayed_task_pending_ = false;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index ea86e4f7c9..2a665394d3 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -17,9 +17,12 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/slots-inl.h"
#include "src/tracing/trace-event.h"
+#include "src/transitions-inl.h"
#include "src/v8.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
@@ -39,7 +42,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
- incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
+ incremental_marking_.AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
incremental_marking_.EnsureBlackAllocated(addr, size);
}
@@ -51,15 +54,15 @@ IncrementalMarking::IncrementalMarking(
marking_worklist_(marking_worklist),
weak_objects_(weak_objects),
initial_old_generation_size_(0),
- bytes_marked_ahead_of_schedule_(0),
+ bytes_marked_(0),
+ scheduled_bytes_to_mark_(0),
+ schedule_update_time_ms_(0),
bytes_marked_concurrently_(0),
- unscanned_bytes_of_large_object_(0),
is_compacting_(false),
should_hurry_(false),
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
- trace_wrappers_toggle_(false),
request_type_(NONE),
new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
@@ -209,6 +212,11 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+ for (LargePage* p : *heap_->new_lo_space()) {
+ p->SetYoungGenerationPageFlags(false);
+ DCHECK(p->IsLargePage());
+ }
+
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
@@ -239,6 +247,11 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
+ for (LargePage* p : *heap_->new_lo_space()) {
+ p->SetYoungGenerationPageFlags(true);
+ DCHECK(p->IsLargePage());
+ }
+
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
}
@@ -297,8 +310,9 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
- bytes_allocated_ = 0;
- bytes_marked_ahead_of_schedule_ = 0;
+ bytes_marked_ = 0;
+ scheduled_bytes_to_mark_ = 0;
+ schedule_update_time_ms_ = start_time_ms_;
bytes_marked_concurrently_ = 0;
should_hurry_ = false;
was_activated_ = true;
@@ -340,12 +354,6 @@ void IncrementalMarking::StartMarking() {
SetState(MARKING);
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue();
- }
-
ActivateIncrementalWriteBarrier();
// Marking bits are cleared by the sweeper.
@@ -371,6 +379,14 @@ void IncrementalMarking::StartMarking() {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
}
+
+ {
+ // TracePrologue may call back into V8 in corner cases, requiring that
+ // marking (including write barriers) is fully set up.
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
+ heap_->local_embedder_heap_tracer()->TracePrologue();
+ }
}
void IncrementalMarking::StartBlackAllocation() {
@@ -411,9 +427,7 @@ void IncrementalMarking::FinishBlackAllocation() {
void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
if (black_allocation() && allocated != kNullAddress) {
HeapObject object = HeapObject::FromAddress(allocated);
- if (marking_state()->IsWhite(object) &&
- !(Heap::InNewSpace(object) ||
- heap_->new_lo_space()->Contains(object))) {
+ if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
if (heap_->IsLargeObject(object)) {
marking_state()->WhiteToBlack(object);
} else {
@@ -542,7 +556,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
- if (Heap::InFromSpace(obj)) {
+ if (Heap::InFromPage(obj)) {
MapWord map_word = obj->map_word();
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist anymore,
@@ -556,27 +570,30 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
*out = dest;
return true;
- } else if (Heap::InToSpace(obj)) {
- // The object may be on a page that was moved in new space.
- DCHECK(Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ } else if (Heap::InToPage(obj)) {
+ // The object may be on a large page or on a page that was moved in new
+ // space.
+ DCHECK(Heap::IsLargeObject(obj) ||
+ Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsGrey(obj)) {
- *out = obj;
- return true;
+ if (minor_marking_state->IsWhite(obj)) {
+ return false;
}
#endif // ENABLE_MINOR_MC
- return false;
+ // Either a large object or an object marked by the minor mark-compactor.
+ *out = obj;
+ return true;
} else {
// The object may be on a page that was moved from new to old space. Only
// applicable during minor MC garbage collections.
if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsGrey(obj)) {
- *out = obj;
- return true;
+ if (minor_marking_state->IsWhite(obj)) {
+ return false;
}
#endif // ENABLE_MINOR_MC
- return false;
+ *out = obj;
+ return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
// Skip one word filler objects that appear on the
@@ -599,9 +616,10 @@ T ForwardingAddress(T heap_obj) {
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
- } else if (Heap::InNewSpace(heap_obj)) {
+ } else if (Heap::InFromPage(heap_obj)) {
return T();
} else {
+ // TODO(ulan): Support minor mark-compactor here.
return heap_obj;
}
}
@@ -668,10 +686,22 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
+
+ weak_objects_->flushed_js_functions.Update(
+ [](JSFunction slot_in, JSFunction* slot_out) -> bool {
+ JSFunction forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
#ifdef DEBUG
weak_objects_->bytecode_flushing_candidates.Iterate(
[](SharedFunctionInfo candidate) {
- DCHECK(!Heap::InNewSpace(candidate));
+ DCHECK(!Heap::InYoungGeneration(candidate));
});
#endif
}
@@ -679,8 +709,7 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
size_t dead_bytes_in_new_space) {
if (!IsMarking()) return;
- bytes_marked_ahead_of_schedule_ -=
- Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
+ bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
}
bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
@@ -722,7 +751,8 @@ void IncrementalMarking::RevisitObject(HeapObject obj) {
DCHECK(IsMarking());
DCHECK(marking_state()->IsBlack(obj));
Page* page = Page::FromHeapObject(obj);
- if (page->owner()->identity() == LO_SPACE) {
+ if (page->owner()->identity() == LO_SPACE ||
+ page->owner()->identity() == NEW_LO_SPACE) {
page->ResetProgressBar();
}
Map map = obj->map();
@@ -756,14 +786,14 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
DCHECK(!marking_state()->IsImpossible(obj));
continue;
}
- unscanned_bytes_of_large_object_ = 0;
- int size = VisitObject(obj->map(), obj);
- bytes_processed += size - unscanned_bytes_of_large_object_;
+ bytes_processed += VisitObject(obj->map(), obj);
}
return bytes_processed;
}
-void IncrementalMarking::EmbedderStep(double duration_ms) {
+StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
+ if (!ShouldDoEmbedderStep()) return StepResult::kNoImmediateWork;
+
constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
@@ -789,6 +819,8 @@ void IncrementalMarking::EmbedderStep(double duration_ms) {
} while (!empty_worklist &&
(heap_->MonotonicallyIncreasingTimeInMs() < deadline));
heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
+ return empty_worklist ? StepResult::kNoImmediateWork
+ : StepResult::kMoreWorkRemaining;
}
void IncrementalMarking::Hurry() {
@@ -903,7 +935,58 @@ bool IncrementalMarking::ShouldDoEmbedderStep() {
heap_->local_embedder_heap_tracer()->InUse();
}
-double IncrementalMarking::AdvanceIncrementalMarking(
+void IncrementalMarking::FastForwardSchedule() {
+ if (scheduled_bytes_to_mark_ < bytes_marked_) {
+ scheduled_bytes_to_mark_ = bytes_marked_;
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Fast-forwarded schedule\n");
+ }
+ }
+}
+
+void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
+ // Consider marking close to finalization if 75% of the initial old
+ // generation was marked.
+ if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
+ FastForwardSchedule();
+ }
+}
+
+void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
+ // Time interval that should be sufficient to complete incremental marking.
+ constexpr double kTargetMarkingWallTimeInMs = 500;
+ constexpr double kMinTimeBetweenScheduleInMs = 10;
+ if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
+ double delta_ms =
+ Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
+ schedule_update_time_ms_ = time_ms;
+
+ size_t bytes_to_mark =
+ (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
+ AddScheduledBytesToMark(bytes_to_mark);
+
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Scheduled %" PRIuS
+ "KB to mark based on time delta %.1fms\n",
+ bytes_to_mark / KB, delta_ms);
+ }
+}
+
+namespace {
+StepResult CombineStepResults(StepResult a, StepResult b) {
+ if (a == StepResult::kMoreWorkRemaining ||
+ b == StepResult::kMoreWorkRemaining)
+ return StepResult::kMoreWorkRemaining;
+ if (a == StepResult::kWaitingForFinalization ||
+ b == StepResult::kWaitingForFinalization)
+ return StepResult::kWaitingForFinalization;
+ return StepResult::kNoImmediateWork;
+}
+} // anonymous namespace
+
+StepResult IncrementalMarking::AdvanceWithDeadline(
double deadline_in_ms, CompletionAction completion_action,
StepOrigin step_origin) {
HistogramTimerScope incremental_marking_scope(
@@ -912,26 +995,26 @@ double IncrementalMarking::AdvanceIncrementalMarking(
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
+ ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
+ FastForwardScheduleIfCloseToFinalization();
+
double remaining_time_in_ms = 0.0;
+ StepResult result;
do {
- if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
- EmbedderStep(kStepSizeInMs);
- } else {
- const intptr_t step_size_in_bytes =
- GCIdleTimeHandler::EstimateMarkingStepSize(
- kStepSizeInMs,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
- Step(step_size_in_bytes, completion_action, step_origin);
- }
- trace_wrappers_toggle_ = !trace_wrappers_toggle_;
+ StepResult v8_result =
+ V8Step(kStepSizeInMs / 2, completion_action, step_origin);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms > kStepSizeInMs && !IsComplete() &&
- !marking_worklist()->IsEmpty());
- return remaining_time_in_ms;
+ StepResult embedder_result =
+ EmbedderStep(Min(kStepSizeInMs, remaining_time_in_ms));
+ result = CombineStepResults(v8_result, embedder_result);
+ remaining_time_in_ms =
+ deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
+ } while (remaining_time_in_ms >= kStepSizeInMs &&
+ result == StepResult::kMoreWorkRemaining);
+ return result;
}
-
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@@ -950,9 +1033,9 @@ void IncrementalMarking::FinalizeSweeping() {
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
// Update bytes_allocated_ based on the allocation counter.
size_t current_counter = heap_->OldGenerationAllocationCounter();
- bytes_allocated_ += current_counter - old_generation_allocation_counter_;
+ size_t result = current_counter - old_generation_allocation_counter_;
old_generation_allocation_counter_ = current_counter;
- return bytes_allocated_;
+ return result;
}
size_t IncrementalMarking::StepSizeToMakeProgress() {
@@ -970,72 +1053,89 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
kMaxStepSizeInByte);
}
-void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
- // Code using an AlwaysAllocateScope assumes that the GC state does not
- // change; that implies that no marking steps must be performed.
- if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
- (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
- return;
+void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
+ if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
+ // The overflow case.
+ scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
+ } else {
+ scheduled_bytes_to_mark_ += bytes_to_mark;
}
+}
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
+ size_t progress_bytes = StepSizeToMakeProgress();
+ size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
+ size_t bytes_to_mark = progress_bytes + allocation_bytes;
+ AddScheduledBytesToMark(bytes_to_mark);
- double embedder_step_time_ms = 0.0;
- if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
- double start = heap_->MonotonicallyIncreasingTimeInMs();
- EmbedderStep(kMaxStepSizeInMs);
- embedder_step_time_ms = heap_->MonotonicallyIncreasingTimeInMs() - start;
- }
- trace_wrappers_toggle_ = !trace_wrappers_toggle_;
-
- size_t bytes_to_process =
- StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
- if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes &&
- embedder_step_time_ms < kMaxStepSizeInMs) {
- StepOnAllocation(bytes_to_process,
- kMaxStepSizeInMs - embedder_step_time_ms);
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Scheduled %" PRIuS
+ "KB to mark based on allocation (progress="
+ "%" PRIuS "KB, allocation=%" PRIuS "KB)\n",
+ bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
}
}
-void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
- double max_step_size) {
- // The first step after Scavenge will see many allocated bytes.
- // Cap the step size to distribute the marking work more uniformly.
- size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
- max_step_size,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
- bytes_to_process = Min(bytes_to_process, step_size);
- size_t bytes_processed = 0;
+void IncrementalMarking::FetchBytesMarkedConcurrently() {
if (FLAG_concurrent_marking) {
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
// short period of time when a concurrent marking task is finishing.
if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
- bytes_marked_ahead_of_schedule_ +=
+ bytes_marked_ +=
current_bytes_marked_concurrently - bytes_marked_concurrently_;
bytes_marked_concurrently_ = current_bytes_marked_concurrently;
}
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Marked %" PRIuS "KB on background threads\n",
+ heap_->concurrent_marking()->TotalMarkedBytes() / KB);
+ }
}
- if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
- // Steps performed in tasks and concurrently have put us ahead of
- // schedule. We skip processing of marking dequeue here and thus shift
- // marking time from inside V8 to standalone tasks.
- bytes_marked_ahead_of_schedule_ -= bytes_to_process;
- bytes_processed += bytes_to_process;
- bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
+}
+
+size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
+ FetchBytesMarkedConcurrently();
+ if (FLAG_trace_incremental_marking) {
+ if (scheduled_bytes_to_mark_ > bytes_marked_) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Marker is %" PRIuS "KB behind schedule\n",
+ (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
+ } else {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Marker is %" PRIuS "KB ahead of schedule\n",
+ (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
+ }
}
- bytes_processed +=
- Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
- bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
+ // Allow steps on allocation to get behind the schedule by small ammount.
+ // This gives higher priority to steps in tasks.
+ size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
+ if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
+ return 0;
+ return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
}
-size_t IncrementalMarking::Step(size_t bytes_to_process,
- CompletionAction action,
- StepOrigin step_origin) {
+void IncrementalMarking::AdvanceOnAllocation() {
+ // Code using an AlwaysAllocateScope assumes that the GC state does not
+ // change; that implies that no marking steps must be performed.
+ if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
+ (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
+ return;
+ }
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+ ScheduleBytesToMarkBasedOnAllocation();
+ V8Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
+}
+
+StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
+ CompletionAction action,
+ StepOrigin step_origin) {
+ StepResult result = StepResult::kMoreWorkRemaining;
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1043,10 +1143,11 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
FinalizeSweeping();
}
- size_t bytes_processed = 0;
+ size_t bytes_processed = 0, bytes_to_process = 0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
+ heap_->new_lo_space()->ResetPendingObject();
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
@@ -1061,20 +1162,38 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
marking_worklist()->Print();
}
#endif
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Marking speed %.fKB/ms\n",
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ }
+ // The first step after Scavenge will see many allocated bytes.
+ // Cap the step size to distribute the marking work more uniformly.
+ size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ max_step_size_in_ms,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
+ if (bytes_to_process == 0) {
+ result = StepResult::kNoImmediateWork;
+ }
- bytes_processed = ProcessMarkingWorklist(bytes_to_process);
+ bytes_processed =
+ ProcessMarkingWorklist(Max(bytes_to_process, kMinStepSizeInBytes));
- if (step_origin == StepOrigin::kTask) {
- bytes_marked_ahead_of_schedule_ += bytes_processed;
- }
+ bytes_marked_ += bytes_processed;
if (marking_worklist()->IsEmpty()) {
+ result = StepResult::kNoImmediateWork;
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (!finalize_marking_completed_) {
FinalizeMarking(action);
+ FastForwardSchedule();
+ result = StepResult::kWaitingForFinalization;
+ incremental_marking_job()->Start(heap_);
} else {
MarkingComplete(action);
+ result = StepResult::kWaitingForFinalization;
}
} else {
heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
@@ -1082,6 +1201,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
}
}
if (FLAG_concurrent_marking) {
+ marking_worklist()->ShareWorkIfGlobalPoolIsEmpty();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
@@ -1097,12 +1217,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
bytes_processed / KB, bytes_to_process / KB, duration);
}
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "Concurrently marked %" PRIuS "KB\n",
- heap_->concurrent_marking()->TotalMarkedBytes() / KB);
- }
- return bytes_processed;
+ return result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index bbf12f6bc0..f3f0703bd1 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -20,6 +20,11 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
+enum class StepResult {
+ kNoImmediateWork,
+ kMoreWorkRemaining,
+ kWaitingForFinalization
+};
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
@@ -70,8 +75,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
static const size_t kMinStepSizeInBytes = 64 * KB;
- static const int kStepSizeInMs = 1;
- static const int kMaxStepSizeInMs = 5;
+ static constexpr double kStepSizeInMs = 1;
+ static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
@@ -164,21 +169,20 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void Epilogue();
- // Performs incremental marking steps until deadline_in_ms is reached. It
- // returns the remaining time that cannot be used for incremental marking
- // anymore because a single step would exceed the deadline.
- double AdvanceIncrementalMarking(double deadline_in_ms,
- CompletionAction completion_action,
- StepOrigin step_origin);
+ // Performs incremental marking steps and returns before the deadline_in_ms is
+ // reached. It may return earlier if the marker is already ahead of the
+ // marking schedule, which is indicated with StepResult::kDone.
+ StepResult AdvanceWithDeadline(double deadline_in_ms,
+ CompletionAction completion_action,
+ StepOrigin step_origin);
void FinalizeSweeping();
- size_t Step(size_t bytes_to_process, CompletionAction action,
- StepOrigin step_origin);
- void StepOnAllocation(size_t bytes_to_process, double max_step_size);
+ StepResult V8Step(double max_step_size_in_ms, CompletionAction action,
+ StepOrigin step_origin);
bool ShouldDoEmbedderStep();
- void EmbedderStep(double duration);
+ StepResult EmbedderStep(double duration);
inline void RestartIfNotMarking();
@@ -218,10 +222,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsCompacting() { return IsMarking() && is_compacting_; }
- void NotifyIncompleteScanOfObject(int unscanned_bytes) {
- unscanned_bytes_of_large_object_ = unscanned_bytes;
- }
-
void ProcessBlackAllocatedObject(HeapObject obj);
Heap* heap() const { return heap_; }
@@ -290,12 +290,30 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map map, HeapObject obj);
- void IncrementIdleMarkingDelayCounter();
-
- void AdvanceIncrementalMarkingOnAllocation();
-
+ // Updates scheduled_bytes_to_mark_ to ensure marking progress based on
+ // time.
+ void ScheduleBytesToMarkBasedOnTime(double time_ms);
+ // Updates scheduled_bytes_to_mark_ to ensure marking progress based on
+ // allocations.
+ void ScheduleBytesToMarkBasedOnAllocation();
+ // Helper functions for ScheduleBytesToMarkBasedOnAllocation.
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
+ void AddScheduledBytesToMark(size_t bytes_to_mark);
+
+ // Schedules more bytes to mark so that the marker is no longer ahead
+ // of schedule.
+ void FastForwardSchedule();
+ void FastForwardScheduleIfCloseToFinalization();
+
+ // Fetches marked byte counters from the concurrent marker.
+ void FetchBytesMarkedConcurrently();
+
+ // Returns the bytes to mark in the current step based on the scheduled
+ // bytes and already marked bytes.
+ size_t ComputeStepSizeInBytes(StepOrigin step_origin);
+
+ void AdvanceOnAllocation();
void SetState(State s) {
state_ = s;
@@ -309,13 +327,13 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
double start_time_ms_;
size_t initial_old_generation_size_;
size_t old_generation_allocation_counter_;
- size_t bytes_allocated_;
- size_t bytes_marked_ahead_of_schedule_;
+ size_t bytes_marked_;
+ size_t scheduled_bytes_to_mark_;
+ double schedule_update_time_ms_;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_;
- size_t unscanned_bytes_of_large_object_;
// Must use SetState() above to update state_
State state_;
@@ -325,7 +343,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
- bool trace_wrappers_toggle_;
IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_;
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 85dd55c593..17a9f04e79 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -5,6 +5,7 @@
#include "src/heap/item-parallel-job.h"
#include "src/base/platform/semaphore.h"
+#include "src/counters.h"
#include "src/v8.h"
namespace v8 {
@@ -12,16 +13,9 @@ namespace internal {
ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
-ItemParallelJob::Task::~Task() {
- // The histogram is reset in RunInternal(). If it's still around it means
- // this task was cancelled before being scheduled.
- if (gc_parallel_task_latency_histogram_)
- gc_parallel_task_latency_histogram_->RecordAbandon();
-}
-
-void ItemParallelJob::Task::SetupInternal(
- base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
- base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
+void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
+ std::vector<Item*>* items,
+ size_t start_index) {
on_finish_ = on_finish;
items_ = items;
@@ -30,17 +24,9 @@ void ItemParallelJob::Task::SetupInternal(
} else {
items_considered_ = items_->size();
}
-
- gc_parallel_task_latency_histogram_ =
- std::move(gc_parallel_task_latency_histogram);
}
void ItemParallelJob::Task::RunInternal() {
- if (gc_parallel_task_latency_histogram_) {
- gc_parallel_task_latency_histogram_->RecordDone();
- gc_parallel_task_latency_histogram_.reset();
- }
-
RunInParallel();
on_finish_->Signal();
}
@@ -58,7 +44,7 @@ ItemParallelJob::~ItemParallelJob() {
}
}
-void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
+void ItemParallelJob::Run() {
DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size();
@@ -68,9 +54,6 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
"num_tasks", static_cast<int>(num_tasks), "num_items",
static_cast<int>(num_items));
- AsyncTimedHistogram gc_parallel_task_latency_histogram(
- async_counters->gc_parallel_task_latency(), async_counters);
-
// Some jobs have more tasks than items (when the items are mere coarse
// grain tasks that generate work dynamically for a second phase which all
// tasks participate in). Some jobs even have 0 items to preprocess but
@@ -101,9 +84,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
// assigning work items.
DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
- task->SetupInternal(pending_tasks_, &items_, start_index,
- i > 0 ? gc_parallel_task_latency_histogram
- : base::Optional<AsyncTimedHistogram>());
+ task->SetupInternal(pending_tasks_, &items_, start_index);
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 15351d5d84..6639ea1ef5 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -11,9 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
#include "src/cancelable-task.h"
-#include "src/counters.h"
#include "src/globals.h"
namespace v8 {
@@ -36,9 +34,6 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
-//
-// Each parallel (non-main thread) task will report the time between the job
-// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
@@ -71,7 +66,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
explicit Task(Isolate* isolate);
- ~Task() override;
+ ~Task() override = default;
virtual void RunInParallel() = 0;
@@ -101,13 +96,9 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// Sets up state required before invoking Run(). If
// |start_index is >= items_.size()|, this task will not process work items
// (some jobs have more tasks than work items in order to parallelize post-
- // processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
- // provided, it will be used to report histograms on the latency between
- // posting the task and it being scheduled.
- void SetupInternal(
- base::Semaphore* on_finish, std::vector<Item*>* items,
- size_t start_index,
- base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
+ // processing, e.g. scavenging).
+ void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
+ size_t start_index);
// We don't allow overriding this method any further.
void RunInternal() final;
@@ -116,7 +107,6 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
size_t cur_index_ = 0;
size_t items_considered_ = 0;
base::Semaphore* on_finish_ = nullptr;
- base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
@@ -135,15 +125,15 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
- // Runs this job. Reporting metrics in a thread-safe manner to
- // |async_counters|.
- void Run(const std::shared_ptr<Counters>& async_counters);
+ // Runs this job.
+ void Run();
private:
std::vector<Item*> items_;
std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_;
+
DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
};
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index cb73fedca7..925e98c170 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -5,9 +5,12 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
+#include "src/heap/mark-compact.h"
+
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects/js-collection-inl.h"
@@ -247,7 +250,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
// Record the slot inside the JSWeakRef, since the IterateBody below
// won't visit it.
ObjectSlot slot =
- HeapObject::RawField(weak_ref, JSWeakCell::kTargetOffset);
+ HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
collector_->RecordSlot(weak_ref, slot, target);
} else {
// JSWeakRef points to a potentially dead object. We have to process
@@ -263,24 +266,23 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSWeakCell(Map map,
- JSWeakCell weak_cell) {
+ MarkingState>::VisitWeakCell(Map map, WeakCell weak_cell) {
if (weak_cell->target()->IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell->target());
if (marking_state()->IsBlackOrGrey(target)) {
- // Record the slot inside the JSWeakCell, since the IterateBody below
+ // Record the slot inside the WeakCell, since the IterateBody below
// won't visit it.
ObjectSlot slot =
- HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
collector_->RecordSlot(weak_cell, slot, target);
} else {
- // JSWeakCell points to a potentially dead object. We have to process
+ // WeakCell points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
collector_->AddWeakCell(weak_cell);
}
}
- int size = JSWeakCell::BodyDescriptor::SizeOf(map, weak_cell);
- JSWeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
+ int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
+ WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
return size;
}
@@ -398,25 +400,37 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
DCHECK(FLAG_use_marking_progress_bar);
DCHECK(heap_->IsLargeObject(object));
- int start =
- Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+ size_t current_progress_bar = chunk->ProgressBar();
+ if (current_progress_bar == 0) {
+ // Try to move the progress bar forward to start offset. This solves the
+ // problem of not being able to observe a progress bar reset when
+ // processing the first kProgressBarScanningChunk.
+ if (!chunk->TrySetProgressBar(0,
+ FixedArray::BodyDescriptor::kStartOffset))
+ return 0;
+ current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
+ }
+ int start = static_cast<int>(current_progress_bar);
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, HeapObject::RawField(object, start),
HeapObject::RawField(object, end));
- chunk->set_progress_bar(end);
- if (end < size) {
+ // Setting the progress bar can fail if the object that is currently
+ // scanned is also revisited. In this case, there may be two tasks racing
+ // on the progress counter. The looser can bail out because the progress
+ // bar is reset before the tasks race on the object.
+ if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
DCHECK(marking_state()->IsBlack(object));
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
marking_worklist()->Push(object);
- heap_->incremental_marking()->NotifyIncompleteScanOfObject(
- size - (end - start));
}
}
- } else {
- FixedArray::BodyDescriptor::IterateBody(map, object, size, this);
+ return end - start;
}
+
+ // Non-batched processing.
+ FixedArray::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -459,7 +473,8 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
#ifdef ENABLE_MINOR_MC
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
- if (Heap::InNewSpace(obj) && non_atomic_marking_state_.WhiteToGrey(obj)) {
+ if (Heap::InYoungGeneration(obj) &&
+ non_atomic_marking_state_.WhiteToGrey(obj)) {
worklist_->Push(kMainThread, obj);
}
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 0a6b5ed000..77534b921d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -27,6 +27,7 @@
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-objects-inl.h"
@@ -63,7 +64,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
protected:
explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
- virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
+ virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const MemoryChunk* chunk) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
@@ -171,6 +173,7 @@ class FullMarkingVerifier : public MarkingVerifier {
void Run() override {
VerifyRoots(VISIT_ONLY_STRONG);
VerifyMarking(heap_->new_space());
+ VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
@@ -179,7 +182,8 @@ class FullMarkingVerifier : public MarkingVerifier {
}
protected:
- Bitmap* bitmap(const MemoryChunk* chunk) override {
+ ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const MemoryChunk* chunk) override {
return marking_state_->bitmap(chunk);
}
@@ -322,7 +326,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
+ Heap::InToPage(heap_object));
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
}
@@ -791,8 +796,6 @@ void MarkCompactCollector::Prepare() {
heap()->incremental_marking()->Stop();
}
- heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
-
if (!was_marked_incrementally_) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
@@ -983,7 +986,7 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
p.store(the_hole);
} else {
// StringTable contains only old space strings.
- DCHECK(!Heap::InNewSpace(o));
+ DCHECK(!Heap::InYoungGeneration(o));
MarkCompactCollector::RecordSlot(table_, p, heap_object);
}
}
@@ -1117,7 +1120,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
- DCHECK(!Heap::InNewSpace(target));
+ DCHECK(!Heap::InYoungGeneration(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
@@ -1139,10 +1142,11 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(value.ptr());
- if (p->InNewSpace()) {
- DCHECK_IMPLIES(p->InToSpace(),
- p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ if (p->InYoungGeneration()) {
+ DCHECK_IMPLIES(
+ p->IsToPage(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate()) {
@@ -1402,7 +1406,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
case NEW_TO_OLD: {
page->heap()->new_space()->from_space().RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
- DCHECK(!new_page->InNewSpace());
+ DCHECK(!new_page->InYoungGeneration());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
break;
}
@@ -1738,8 +1742,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(FLAG_gc_stats)) {
heap()->CreateObjectStats();
- ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
- heap()->dead_object_stats_);
+ ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
+ heap()->dead_object_stats_.get());
collector.Collect();
if (V8_UNLIKELY(FLAG_gc_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
@@ -1847,7 +1851,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
- heap()->isolate()->global_handles()->IdentifyWeakHandles(
+ heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
&IsUnmarkedHeapObject);
ProcessMarkingWorklist();
}
@@ -1928,7 +1932,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
ClearWeakReferences();
ClearWeakCollections();
- ClearJSWeakCells();
+ ClearJSWeakRefs();
}
MarkDependentCodeForDeoptimization();
@@ -1937,7 +1941,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
- DCHECK(weak_objects_.js_weak_cells.IsEmpty());
+ DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
@@ -1997,7 +2001,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
String inferred_name = shared_info->inferred_name();
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
- int function_literal_id = shared_info->FunctionLiteralId(isolate());
shared_info->DiscardCompiledMetadata(
isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
@@ -2041,7 +2044,7 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
UncompiledData::Initialize(
uncompiled_data, inferred_name, start_position, end_position,
- function_literal_id,
+ kFunctionLiteralIdInvalid,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
@@ -2278,15 +2281,13 @@ void MarkCompactCollector::ClearWeakReferences() {
}
}
-void MarkCompactCollector::ClearJSWeakCells() {
+void MarkCompactCollector::ClearJSWeakRefs() {
if (!FLAG_harmony_weak_refs) {
return;
}
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
- // We do not insert cleared weak cells into the list, so the value
- // cannot be undefined here.
- JSReceiver target = JSReceiver::cast(weak_ref->target());
+ HeapObject target = HeapObject::cast(weak_ref->target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
} else {
@@ -2296,38 +2297,38 @@ void MarkCompactCollector::ClearJSWeakCells() {
RecordSlot(weak_ref, slot, target);
}
}
- JSWeakCell weak_cell;
- while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
- // We do not insert cleared weak cells into the list, so the value
- // cannot be a Smi here.
+ WeakCell weak_cell;
+ while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
HeapObject target = HeapObject::cast(weak_cell->target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
- // The value of the JSWeakCell is dead.
- JSWeakFactory weak_factory = JSWeakFactory::cast(weak_cell->factory());
- if (!weak_factory->scheduled_for_cleanup()) {
- heap()->AddDirtyJSWeakFactory(
- weak_factory,
+ DCHECK(!target->IsUndefined());
+ // The value of the WeakCell is dead.
+ JSFinalizationGroup finalization_group =
+ JSFinalizationGroup::cast(weak_cell->finalization_group());
+ if (!finalization_group->scheduled_for_cleanup()) {
+ heap()->AddDirtyJSFinalizationGroup(
+ finalization_group,
[](HeapObject object, ObjectSlot slot, Object target) {
if (target->IsHeapObject()) {
RecordSlot(object, slot, HeapObject::cast(target));
}
});
}
- // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
- // thus we need to record the slots it writes. The normal write barrier is
- // not enough, since it's disabled before GC.
+ // We're modifying the pointers in WeakCell and JSFinalizationGroup during
+ // GC; thus we need to record the slots it writes. The normal write
+ // barrier is not enough, since it's disabled before GC.
weak_cell->Nullify(isolate(),
[](HeapObject object, ObjectSlot slot, Object target) {
if (target->IsHeapObject()) {
RecordSlot(object, slot, HeapObject::cast(target));
}
});
- DCHECK(weak_factory->NeedsCleanup());
- DCHECK(weak_factory->scheduled_for_cleanup());
+ DCHECK(finalization_group->NeedsCleanup());
+ DCHECK(finalization_group->scheduled_for_cleanup());
} else {
- // The value of the JSWeakCell is alive.
+ // The value of the WeakCell is alive.
ObjectSlot slot =
- HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
@@ -2342,7 +2343,7 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
weak_objects_.js_weak_refs.Clear();
- weak_objects_.js_weak_cells.Clear();
+ weak_objects_.weak_cells.Clear();
weak_objects_.bytecode_flushing_candidates.Clear();
weak_objects_.flushed_js_functions.Clear();
}
@@ -2448,10 +2449,10 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
"Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
- DCHECK(Heap::InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromHeapObject(heap_obj)->IsFlagSet(
- Page::COMPACTION_WAS_ABORTED));
+ DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromHeapObject(heap_obj)->IsFlagSet(
+ Page::COMPACTION_WAS_ABORTED));
typename TSlot::TObject target =
MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
if (access_mode == AccessMode::NON_ATOMIC) {
@@ -2459,7 +2460,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
} else {
slot.Release_CompareAndSwap(old, target);
}
- DCHECK(!Heap::InFromSpace(target));
+ DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
DCHECK(heap_obj->map()->IsMap());
@@ -2595,6 +2596,7 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip();
+ heap()->new_lo_space()->ResetPendingObject();
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
@@ -2640,7 +2642,7 @@ class Evacuator : public Malloced {
return kPageNewToOld;
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
return kPageNewToNew;
- if (chunk->InNewSpace()) return kObjectsNewToOld;
+ if (chunk->InYoungGeneration()) return kObjectsNewToOld;
return kObjectsOldToOld;
}
@@ -2847,7 +2849,7 @@ class PageEvacuationTask : public ItemParallelJob::Task {
evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
- };
+ }
private:
Evacuator* evacuator_;
@@ -2878,7 +2880,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run(isolate()->async_counters());
+ job->Run();
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
@@ -2936,11 +2938,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
- LargePage* current = heap()->new_lo_space()->first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
- while (current) {
- LargePage* next_current = current->next_page();
+
+ for (auto it = heap()->new_lo_space()->begin();
+ it != heap()->new_lo_space()->end();) {
+ LargePage* current = *it;
+ it++;
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
@@ -2948,7 +2952,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
evacuation_job.AddItem(new EvacuationItem(current));
}
- current = next_current;
}
if (evacuation_job.NumberOfItems() == 0) return;
@@ -3014,13 +3017,13 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjectsNoFail");
- DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
- if (chunk->owner()->identity() == LO_SPACE) {
+ if (chunk->IsLargePage()) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
- DCHECK(marking_state->IsBlack(object));
- const bool success = visitor->Visit(object, object->Size());
- USE(success);
- DCHECK(success);
+ if (marking_state->IsBlack(object)) {
+ const bool success = visitor->Visit(object, object->Size());
+ USE(success);
+ DCHECK(success);
+ }
} else {
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
@@ -3043,13 +3046,22 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitGreyObjectsNoFail");
- for (auto object_and_size :
- LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject const object = object_and_size.first;
- DCHECK(marking_state->IsGrey(object));
- const bool success = visitor->Visit(object, object_and_size.second);
- USE(success);
- DCHECK(success);
+ if (chunk->IsLargePage()) {
+ HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
+ if (marking_state->IsGrey(object)) {
+ const bool success = visitor->Visit(object, object->Size());
+ USE(success);
+ DCHECK(success);
+ }
+ } else {
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
+ HeapObject const object = object_and_size.first;
+ DCHECK(marking_state->IsGrey(object));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
}
if (iteration_mode == kClearMarkbits) {
marking_state->ClearLiveness(chunk);
@@ -3159,7 +3171,7 @@ class PointersUpdatingTask : public ItemParallelJob::Task {
item->Process();
item->MarkFinished();
}
- };
+ }
private:
GCTracer* tracer_;
@@ -3252,7 +3264,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (!(*slot).GetHeapObject(&heap_object)) {
return REMOVE_SLOT;
}
- if (Heap::InFromSpace(heap_object)) {
+ if (Heap::InFromPage(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
HeapObjectReference::Update(THeapObjectSlot(slot),
@@ -3265,10 +3277,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (Heap::InToSpace(heap_object)) {
+ if (Heap::InToPage(heap_object)) {
return KEEP_SLOT;
}
- } else if (Heap::InToSpace(heap_object)) {
+ } else if (Heap::InToPage(heap_object)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set, or
// if the slot was already updated during old->old updating.
@@ -3287,7 +3299,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
return KEEP_SLOT;
} else {
- DCHECK(!Heap::InNewSpace(heap_object));
+ DCHECK(!Heap::InYoungGeneration(heap_object));
}
return REMOVE_SLOT;
}
@@ -3528,7 +3540,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run(isolate()->async_counters());
+ updating_job.Run();
}
{
@@ -3560,7 +3572,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run(isolate()->async_counters());
+ updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations();
}
}
@@ -3762,7 +3774,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
marking_state_(
heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
- Bitmap* bitmap(const MemoryChunk* chunk) override {
+ ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const MemoryChunk* chunk) override {
return marking_state_->bitmap(chunk);
}
@@ -3801,7 +3814,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK_IMPLIES(Heap::InNewSpace(heap_object), IsMarked(heap_object));
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
}
template <typename TSlot>
@@ -3834,7 +3847,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
+ Heap::InToPage(heap_object));
}
template <typename TSlot>
@@ -3869,10 +3883,10 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
#endif // VERIFY_HEAP
bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
- DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
- return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->IsGrey(HeapObject::cast(*p));
+ DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
+ return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->IsGrey(HeapObject::cast(*p));
}
} // namespace
@@ -3924,7 +3938,7 @@ class YoungGenerationMarkingVisitor final
template <typename TSlot>
V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
typename TSlot::TObject target = *slot;
- if (Heap::InNewSpace(target)) {
+ if (Heap::InYoungGeneration(target)) {
// Treat weak references as strong.
// TODO(marja): Proper weakness handling for minor-mcs.
HeapObject target_object = target.GetHeapObject();
@@ -4030,15 +4044,16 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(value.ptr());
- if (p->InNewSpace()) {
- DCHECK_IMPLIES(p->InToSpace(),
- p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ if (p->InYoungGeneration()) {
+ DCHECK_IMPLIES(
+ p->IsToPage(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
}
}
}
@@ -4090,7 +4105,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run(isolate()->async_counters());
+ updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations();
}
@@ -4102,7 +4117,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
heap()->ProcessWeakListRoots(&evacuation_object_retainer);
// Update pointers from external string table.
- heap()->UpdateNewSpaceReferencesInExternalStringTable(
+ heap()->UpdateYoungReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
}
}
@@ -4173,6 +4188,10 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
+ // Since we promote all surviving large objects immediatelly, all remaining
+ // large objects must be dead.
+ // TODO(ulan): Don't free all as soon as we have an intermediate generation.
+ heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
@@ -4190,6 +4209,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
void MinorMarkCompactCollector::MakeIterable(
Page* p, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode) {
+ CHECK(!p->IsLargePage());
// We have to clear the full collectors markbits for the areas that we
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
@@ -4284,7 +4304,7 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
Object RetainAs(Object object) override {
HeapObject heap_object = HeapObject::cast(object);
- if (!Heap::InNewSpace(heap_object)) return object;
+ if (!Heap::InYoungGeneration(heap_object)) return object;
// Young generation marking only marks to grey instead of black.
DCHECK(!marking_state_->IsBlack(heap_object));
@@ -4308,8 +4328,8 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() {
// Internalized strings are always stored in old space, so there is no need
// to clean them here.
YoungGenerationExternalStringTableCleaner external_visitor(this);
- heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
- heap()->external_string_table_.CleanUpNewSpaceStrings();
+ heap()->external_string_table_.IterateYoung(&external_visitor);
+ heap()->external_string_table_.CleanUpYoung();
}
{
@@ -4329,6 +4349,9 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
}
new_space->Flip();
new_space->ResetLinearAllocationArea();
+
+ heap()->new_lo_space()->Flip();
+ heap()->new_lo_space()->ResetPendingObject();
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
@@ -4394,10 +4417,10 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
static_cast<void*>(this), marking_time);
}
- };
+ }
void MarkObject(Object object) {
- if (!Heap::InNewSpace(object)) return;
+ if (!Heap::InYoungGeneration(object)) return;
HeapObject heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
const int size = visitor_.Visit(heap_object);
@@ -4482,10 +4505,10 @@ class PageMarkingItem : public MarkingItem {
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
- if (Heap::InNewSpace(object)) {
+ if (Heap::InYoungGeneration(object)) {
// Marking happens before flipping the young generation, so the object
- // has to be in ToSpace.
- DCHECK(Heap::InToSpace(object));
+ // has to be in a to page.
+ DCHECK(Heap::InToPage(object));
HeapObject heap_object;
bool success = object.GetHeapObject(&heap_object);
USE(success);
@@ -4532,7 +4555,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
- job.Run(isolate()->async_counters());
+ job.Run();
DCHECK(worklist()->IsEmpty());
}
}
@@ -4556,14 +4579,13 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ isolate()->global_handles()->MarkYoungWeakUnmodifiedObjectsPending(
&IsUnmarkedObjectForYoungGeneration);
+ isolate()->global_handles()->IterateYoungWeakUnmodifiedRootsForFinalizers(
+ &root_visitor);
isolate()
->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ ->IterateYoungWeakUnmodifiedRootsForPhantomHandles(
&root_visitor, &IsUnmarkedObjectForYoungGeneration);
ProcessMarkingWorklist();
}
@@ -4663,7 +4685,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
marking_state->live_bytes(chunk));
- if (chunk->owner()->identity() != NEW_LO_SPACE) {
+ if (!chunk->IsLargePage()) {
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
@@ -4686,7 +4708,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
marking_state->live_bytes(chunk));
- DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
+ DCHECK(!chunk->IsLargePage());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
@@ -4727,6 +4749,20 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
}
evacuation_job.AddItem(new EvacuationItem(page));
}
+
+ // Promote young generation large objects.
+ for (auto it = heap()->new_lo_space()->begin();
+ it != heap()->new_lo_space()->end();) {
+ LargePage* current = *it;
+ it++;
+ HeapObject object = current->GetObject();
+ DCHECK(!non_atomic_marking_state_.IsBlack(object));
+ if (non_atomic_marking_state_.IsGrey(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_job.AddItem(new EvacuationItem(current));
+ }
+ }
if (evacuation_job.NumberOfItems() == 0) return;
YoungGenerationMigrationObserver observer(heap(),
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 0d2f4c0434..8d93830f35 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
+#include <atomic>
#include <vector>
#include "src/heap/concurrent-marking.h"
@@ -14,7 +15,7 @@
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
-#include "src/objects/js-weak-refs.h" // For Worklist<JSWeakCell, ...>
+#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
@@ -184,7 +185,9 @@ class LiveObjectRange {
: chunk_(chunk),
bitmap_(bitmap),
start_(chunk_->area_start()),
- end_(chunk->area_end()) {}
+ end_(chunk->area_end()) {
+ DCHECK(!chunk->IsLargePage());
+ }
inline iterator begin();
inline iterator end();
@@ -297,8 +300,8 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap_;
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
+ return chunk->young_generation_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -318,20 +321,24 @@ class MinorNonAtomicMarkingState final
: public MarkingStateBase<MinorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap_;
+ ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const MemoryChunk* chunk) const {
+ return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->young_generation_live_byte_count_ += by;
+ chunk->young_generation_live_byte_count_.fetch_add(
+ by, std::memory_order_relaxed);
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->young_generation_live_byte_count_;
+ return chunk->young_generation_live_byte_count_.load(
+ std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->young_generation_live_byte_count_ = value;
+ chunk->young_generation_live_byte_count_.store(value,
+ std::memory_order_relaxed);
}
};
@@ -339,14 +346,15 @@ class MinorNonAtomicMarkingState final
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
- return chunk->marking_bitmap_;
+ return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
- // Concurrent marking uses local live bytes.
+ // Concurrent marking uses local live bytes so we may do these accesses
+ // non-atomically.
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
chunk->live_byte_count_ += by;
}
@@ -363,23 +371,16 @@ class IncrementalMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
- return chunk->marking_bitmap_;
+ return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
- }
-
- intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
- }
-
- void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
+ std::atomic_fetch_add(
+ reinterpret_cast<std::atomic<intptr_t>*>(&chunk->live_byte_count_), by);
}
};
@@ -387,11 +388,12 @@ class MajorNonAtomicMarkingState final
: public MarkingStateBase<MajorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
- Bitmap* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
- return chunk->marking_bitmap_;
+ return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -446,7 +448,7 @@ struct WeakObjects {
Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
Worklist<JSWeakRef, 64> js_weak_refs;
- Worklist<JSWeakCell, 64> js_weak_cells;
+ Worklist<WeakCell, 64> weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
Worklist<JSFunction, 64> flushed_js_functions;
@@ -528,6 +530,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
embedder_.Update(callback);
}
+ void ShareWorkIfGlobalPoolIsEmpty() {
+ if (!shared_.IsLocalEmpty(kMainThread) && shared_.IsGlobalPoolEmpty()) {
+ shared_.FlushToGlobal(kMainThread);
+ }
+ }
+
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
@@ -658,8 +666,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.js_weak_refs.Push(kMainThread, weak_ref);
}
- void AddWeakCell(JSWeakCell weak_cell) {
- weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
+ void AddWeakCell(WeakCell weak_cell) {
+ weak_objects_.weak_cells.Push(kMainThread, weak_cell);
}
inline void AddBytecodeFlushingCandidate(SharedFunctionInfo flush_candidate);
@@ -701,10 +709,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
- private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
+ // Used by wrapper tracing.
+ V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
+
+ private:
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
@@ -724,9 +735,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// This is for non-incremental marking only.
V8_INLINE void MarkRootObject(Root root, HeapObject obj);
- // Used by wrapper tracing.
- V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
-
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor);
@@ -817,9 +825,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// transition.
void ClearWeakReferences();
- // Goes through the list of encountered JSWeakCells and clears those with dead
- // values.
- void ClearJSWeakCells();
+ // Goes through the list of encountered JSWeakRefs and WeakCells and clears
+ // those with dead values.
+ void ClearJSWeakRefs();
void AbortWeakObjects();
@@ -904,9 +912,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// used, so it is okay if this counter overflows and wraps around.
unsigned epoch_ = 0;
- friend class EphemeronHashTableMarkingTask;
friend class FullEvacuator;
- friend class Heap;
friend class RecordMigratedSlotVisitor;
};
@@ -938,7 +944,7 @@ class MarkingVisitor final
V8_INLINE int VisitMap(Map map, Map object);
V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
- V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell object);
+ V8_INLINE int VisitWeakCell(Map map, WeakCell object);
V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
// ObjectVisitor implementation.
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
index 93b5c06a45..ceda7d68d2 100644
--- a/deps/v8/src/heap/marking.cc
+++ b/deps/v8/src/heap/marking.cc
@@ -7,89 +7,9 @@
namespace v8 {
namespace internal {
-void Bitmap::Clear() {
- base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
- for (int i = 0; i < CellsCount(); i++) {
- base::Relaxed_Store(cell_base + i, 0);
- }
- // This fence prevents re-ordering of publishing stores with the mark-bit
- // clearing stores.
- base::SeqCst_MemoryFence();
-}
-
-void Bitmap::MarkAllBits() {
- base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
- for (int i = 0; i < CellsCount(); i++) {
- base::Relaxed_Store(cell_base + i, 0xffffffff);
- }
- // This fence prevents re-ordering of publishing stores with the mark-bit
- // clearing stores.
- base::SeqCst_MemoryFence();
-}
-
-void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
- if (start_index >= end_index) return;
- end_index--;
-
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 1s.
- SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
- ~(start_index_mask - 1));
- // Then fill all in between cells with 1s.
- base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- base::Relaxed_Store(cell_base + i, ~0u);
- }
- // Finally, fill all bits until the end address in the last cell with 1s.
- SetBitsInCell<AccessMode::ATOMIC>(end_cell_index,
- end_index_mask | (end_index_mask - 1));
- } else {
- SetBitsInCell<AccessMode::ATOMIC>(
- start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
- }
- // This fence prevents re-ordering of publishing stores with the mark-
- // bit setting stores.
- base::SeqCst_MemoryFence();
-}
-
-void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
- if (start_index >= end_index) return;
- end_index--;
-
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 0s.
- ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
- ~(start_index_mask - 1));
- // Then fill all in between cells with 0s.
- base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- base::Relaxed_Store(cell_base + i, 0);
- }
- // Finally, set all bits until the end address in the last cell with 0s.
- ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index,
- end_index_mask | (end_index_mask - 1));
- } else {
- ClearBitsInCell<AccessMode::ATOMIC>(
- start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
- }
- // This fence prevents re-ordering of publishing stores with the mark-
- // bit clearing stores.
- base::SeqCst_MemoryFence();
-}
-
-bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
+template <>
+bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
+ uint32_t start_index, uint32_t end_index) {
if (start_index >= end_index) return false;
end_index--;
@@ -116,7 +36,9 @@ bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
}
}
-bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
+template <>
+bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
+ uint32_t start_index, uint32_t end_index) {
if (start_index >= end_index) return true;
end_index--;
@@ -193,7 +115,8 @@ class CellPrinter {
} // anonymous namespace
-void Bitmap::Print() {
+template <>
+void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print() {
CellPrinter printer;
for (int i = 0; i < CellsCount(); i++) {
printer.Print(i, cells()[i]);
@@ -202,7 +125,8 @@ void Bitmap::Print() {
PrintF("\n");
}
-bool Bitmap::IsClean() {
+template <>
+bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean() {
for (int i = 0; i < CellsCount(); i++) {
if (cells()[i] != 0) {
return false;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index df73f1c5c1..ec5b06cde1 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -135,31 +135,36 @@ class V8_EXPORT_PRIVATE Bitmap {
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
return MarkBit(cell, mask);
}
+};
+template <AccessMode mode>
+class ConcurrentBitmap : public Bitmap {
+ public:
void Clear();
void MarkAllBits();
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
- template <AccessMode mode = AccessMode::NON_ATOMIC>
void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
// Sets bits in the given cell. The mask specifies bits to set: if a
// bit is set in the mask then the corresponding bit is set in the cell.
- template <AccessMode mode = AccessMode::NON_ATOMIC>
void SetBitsInCell(uint32_t cell_index, uint32_t mask);
- // Sets all bits in the range [start_index, end_index). The cells at the
- // boundary of the range are updated with atomic compare and swap operation.
- // The inner cells are updated with relaxed write.
+ // Sets all bits in the range [start_index, end_index). If the access is
+ // atomic, the cells at the boundary of the range are updated with atomic
+ // compare and swap operation. The inner cells are updated with relaxed write.
void SetRange(uint32_t start_index, uint32_t end_index);
- // Clears all bits in the range [start_index, end_index). The cells at the
- // boundary of the range are updated with atomic compare and swap operation.
- // The inner cells are updated with relaxed write.
+ // Clears all bits in the range [start_index, end_index). If the access is
+ // atomic, the cells at the boundary of the range are updated with atomic
+ // compare and swap operation. The inner cells are updated with relaxed write.
void ClearRange(uint32_t start_index, uint32_t end_index);
+ // The following methods are *not* safe to use in a concurrent context so they
+ // are not implemented for `AccessMode::ATOMIC`.
+
// Returns true if all bits in the range [start_index, end_index) are set.
bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
@@ -169,32 +174,174 @@ class V8_EXPORT_PRIVATE Bitmap {
void Print();
bool IsClean();
+
+ private:
+ // Clear all bits in the cell range [start_cell_index, end_cell_index). If the
+ // access is atomic then *still* use a relaxed memory ordering.
+ void ClearCellRangeRelaxed(uint32_t start_cell_index,
+ uint32_t end_cell_index);
+
+ // Set all bits in the cell range [start_cell_index, end_cell_index). If the
+ // access is atomic then *still* use a relaxed memory ordering.
+ void SetCellRangeRelaxed(uint32_t start_cell_index, uint32_t end_cell_index);
};
template <>
-inline void Bitmap::SetBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0;
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0xffffffff);
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetCellRangeRelaxed(
+ uint32_t start_cell_index, uint32_t end_cell_index) {
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0xffffffff;
+ }
+}
+
+template <AccessMode mode>
+inline void ConcurrentBitmap<mode>::Clear() {
+ ClearCellRangeRelaxed(0, CellsCount());
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <AccessMode mode>
+inline void ConcurrentBitmap<mode>::MarkAllBits() {
+ SetCellRangeRelaxed(0, CellsCount());
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <>
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
cells()[cell_index] |= mask;
}
template <>
-inline void Bitmap::SetBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
}
template <>
-inline void Bitmap::ClearBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
cells()[cell_index] &= ~mask;
}
template <>
-inline void Bitmap::ClearBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
- uint32_t mask) {
+inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearBitsInCell(
+ uint32_t cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
}
+template <AccessMode mode>
+void ConcurrentBitmap<mode>::SetRange(uint32_t start_index,
+ uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 1s.
+ SetBitsInCell(start_cell_index, ~(start_index_mask - 1));
+ // Then fill all in between cells with 1s.
+ SetCellRangeRelaxed(start_cell_index + 1, end_cell_index);
+ // Finally, fill all bits until the end address in the last cell with 1s.
+ SetBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
+ } else {
+ SetBitsInCell(start_cell_index,
+ end_index_mask | (end_index_mask - start_index_mask));
+ }
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // setting stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <AccessMode mode>
+void ConcurrentBitmap<mode>::ClearRange(uint32_t start_index,
+ uint32_t end_index) {
+ if (start_index >= end_index) return;
+ end_index--;
+
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 0s.
+ ClearBitsInCell(start_cell_index, ~(start_index_mask - 1));
+ // Then fill all in between cells with 0s.
+ ClearCellRangeRelaxed(start_cell_index + 1, end_cell_index);
+ // Finally, set all bits until the end address in the last cell with 0s.
+ ClearBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
+ } else {
+ ClearBitsInCell(start_cell_index,
+ end_index_mask | (end_index_mask - start_index_mask));
+ }
+ if (mode == AccessMode::ATOMIC) {
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::SeqCst_MemoryFence();
+ }
+}
+
+template <>
+V8_EXPORT_PRIVATE bool
+ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
+ uint32_t start_index, uint32_t end_index);
+
+template <>
+V8_EXPORT_PRIVATE bool
+ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
+ uint32_t start_index, uint32_t end_index);
+
+template <>
+void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print();
+
+template <>
+V8_EXPORT_PRIVATE bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean();
+
class Marking : public AllStatic {
public:
// TODO(hpayer): The current mark bit operations use as default NON_ATOMIC
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 475728b769..6d0dfe5012 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -86,7 +86,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
const int kIncrementalMarkingDelayMs = 500;
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
- heap()->incremental_marking()->AdvanceIncrementalMarking(
+ heap()->incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 10fd67e907..affd574ba2 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -578,6 +578,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kHasKeyed:
if (obj == *isolate->factory()->uninitialized_symbol() ||
obj == *isolate->factory()->premonomorphic_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 6f2d2c58d1..aea5920dff 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -59,6 +59,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
case kVisitWeakArray:
return visitor->VisitWeakArray(map, object);
+ case kDataOnlyVisitorIdCount:
case kVisitorIdCount:
UNREACHABLE();
}
@@ -160,6 +161,19 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
return static_cast<ResultType>(object->size());
}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
+ Map map, HeapObject object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = WeakArrayBodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object, object->map_slot());
+ }
+ WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
+ return size;
+}
+
template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
NativeContext object) {
@@ -184,25 +198,12 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitJSWeakCell(Map map,
- JSWeakCell js_weak_cell) {
+int NewSpaceVisitor<ConcreteVisitor>::VisitWeakCell(Map map,
+ WeakCell weak_cell) {
UNREACHABLE();
return 0;
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
- Map map, HeapObject object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = WeakArrayBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object->map_slot());
- }
- WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
- return size;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 696b12a31c..52a9e94b49 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -5,35 +5,15 @@
#ifndef V8_HEAP_OBJECTS_VISITING_H_
#define V8_HEAP_OBJECTS_VISITING_H_
-#include "src/allocation.h"
-#include "src/layout-descriptor.h"
-#include "src/objects-body-descriptors.h"
#include "src/objects.h"
-#include "src/objects/hash-table.h"
-#include "src/objects/ordered-hash-table.h"
-#include "src/objects/string.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/map.h"
#include "src/visitors.h"
namespace v8 {
namespace internal {
-class BigInt;
-class BytecodeArray;
-class DataHandler;
-class EmbedderDataArray;
-class JSArrayBuffer;
-class JSDataView;
-class JSRegExp;
-class JSTypedArray;
-class JSWeakCell;
-class JSWeakRef;
-class JSWeakCollection;
-class NativeContext;
-class UncompiledDataWithoutPreparseData;
-class UncompiledDataWithPreparseData;
-class WasmInstanceObject;
-
-#define TYPED_VISITOR_ID_LIST(V) \
+#define TYPED_VISITOR_ID_LIST_CLASSES(V) \
V(AllocationSite, AllocationSite) \
V(BigInt, BigInt) \
V(ByteArray, ByteArray) \
@@ -51,14 +31,13 @@ class WasmInstanceObject;
V(FeedbackVector, FeedbackVector) \
V(FixedArray, FixedArray) \
V(FixedDoubleArray, FixedDoubleArray) \
- V(FixedFloat64Array, FixedFloat64Array) \
V(FixedTypedArrayBase, FixedTypedArrayBase) \
V(JSArrayBuffer, JSArrayBuffer) \
V(JSDataView, JSDataView) \
V(JSFunction, JSFunction) \
V(JSObject, JSObject) \
V(JSTypedArray, JSTypedArray) \
- V(JSWeakCell, JSWeakCell) \
+ V(WeakCell, WeakCell) \
V(JSWeakCollection, JSWeakCollection) \
V(JSWeakRef, JSWeakRef) \
V(Map, Map) \
@@ -82,6 +61,17 @@ class WasmInstanceObject;
V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
V(WasmInstanceObject, WasmInstanceObject)
+#define FORWARD_DECLARE(TypeName, Type) class Type;
+TYPED_VISITOR_ID_LIST_CLASSES(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+#define TYPED_VISITOR_ID_LIST_TYPEDEFS(V) \
+ V(FixedFloat64Array, FixedFloat64Array)
+
+#define TYPED_VISITOR_ID_LIST(V) \
+ TYPED_VISITOR_ID_LIST_CLASSES(V) \
+ TYPED_VISITOR_ID_LIST_TYPEDEFS(V)
+
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
// the BodyDescriptor of the object.
@@ -144,7 +134,7 @@ class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
}
int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
- int VisitJSWeakCell(Map map, JSWeakCell js_weak_cell);
+ int VisitWeakCell(Map map, WeakCell weak_cell);
};
class WeakObjectRetainer;
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
new file mode 100644
index 0000000000..a2c086fc0a
--- /dev/null
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/read-only-heap.h"
+
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+ReadOnlyHeap* ReadOnlyHeap::GetOrCreateReadOnlyHeap(Heap* heap) {
+ return new ReadOnlyHeap(new ReadOnlySpace(heap));
+}
+
+void ReadOnlyHeap::MaybeDeserialize(Isolate* isolate,
+ ReadOnlyDeserializer* des) {
+ des->DeserializeInto(isolate);
+}
+
+void ReadOnlyHeap::OnHeapTearDown() {
+ delete read_only_space_;
+ delete this;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
new file mode 100644
index 0000000000..d2b0db012d
--- /dev/null
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_READ_ONLY_HEAP_H_
+#define V8_HEAP_READ_ONLY_HEAP_H_
+
+#include "src/base/macros.h"
+#include "src/heap/heap.h"
+#include "src/roots.h"
+#include "src/snapshot/read-only-deserializer.h"
+
+namespace v8 {
+namespace internal {
+
+class ReadOnlySpace;
+
+// This class transparently manages read-only space, roots and cache creation
+// and destruction. Eventually this will allow sharing these artifacts between
+// isolates.
+class ReadOnlyHeap {
+ public:
+ static ReadOnlyHeap* GetOrCreateReadOnlyHeap(Heap* heap);
+ // If necessary, deserialize read-only objects and set up read-only object
+ // cache.
+ void MaybeDeserialize(Isolate* isolate, ReadOnlyDeserializer* des);
+ // Frees ReadOnlySpace and itself when sharing is disabled. No-op otherwise.
+ // Read-only data should not be used within the current isolate after this is
+ // called.
+ void OnHeapTearDown();
+
+ std::vector<Object>* read_only_object_cache() {
+ return &read_only_object_cache_;
+ }
+ ReadOnlySpace* read_only_space() const { return read_only_space_; }
+
+ private:
+ ReadOnlySpace* read_only_space_ = nullptr;
+ std::vector<Object> read_only_object_cache_;
+
+ explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_READ_ONLY_HEAP_H_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index b6b442b56e..b890350207 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -266,8 +266,10 @@ class RememberedSet : public AllStatic {
class UpdateTypedSlotHelper {
public:
- // Updates a typed slot using an untyped slot callback.
- // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
+ // Updates a typed slot using an untyped slot callback where |addr| depending
+ // on slot type represents either address for respective RelocInfo or address
+ // of the uncompressed constant pool entry.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
Address addr, Callback callback) {
@@ -284,8 +286,6 @@ class UpdateTypedSlotHelper {
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
- // TODO(ishell): the incoming addr represents MaybeObjectSlot(addr).
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
return callback(FullMaybeObjectSlot(addr));
}
case CLEARED_SLOT:
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index bfd8e11ff8..1ac96b7362 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -91,25 +91,6 @@ bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
}
-// White list for objects that for sure only contain data.
-bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
- switch (visitor_id) {
- case kVisitSeqOneByteString:
- return true;
- case kVisitSeqTwoByteString:
- return true;
- case kVisitByteArray:
- return true;
- case kVisitFixedDoubleArray:
- return true;
- case kVisitDataObject:
- return true;
- default:
- break;
- }
- return false;
-}
-
void Scavenger::PageMemoryFence(MaybeObject object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
@@ -148,10 +129,9 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
}
template <typename THeapObjectSlot>
-CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
- THeapObjectSlot slot,
- HeapObject object,
- int object_size) {
+CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
+ Map map, THeapObjectSlot slot, HeapObject object, int object_size,
+ ObjectFields object_fields) {
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
@@ -169,14 +149,15 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object->synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
- DCHECK(!Heap::InFromSpace(*slot));
- return Heap::InToSpace(*slot)
+ DCHECK(!Heap::InFromPage(*slot));
+ return Heap::InToPage(*slot)
? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
: CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
-
- copied_list_.Push(ObjectAndSize(target, object_size));
+ if (object_fields == ObjectFields::kMaybePointers) {
+ copied_list_.Push(ObjectAndSize(target, object_size));
+ }
copied_size_ += object_size;
return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
}
@@ -186,7 +167,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
template <typename THeapObjectSlot>
CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
HeapObject object,
- int object_size) {
+ int object_size,
+ ObjectFields object_fields) {
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
@@ -203,13 +185,13 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object->synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
- DCHECK(!Heap::InFromSpace(*slot));
- return Heap::InToSpace(*slot)
+ DCHECK(!Heap::InFromPage(*slot));
+ return Heap::InToPage(*slot)
? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
: CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
- if (!ContainsOnlyData(map->visitor_id())) {
+ if (object_fields == ObjectFields::kMaybePointers) {
promotion_list_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
@@ -225,19 +207,20 @@ SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
: REMOVE_SLOT;
}
-bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size) {
+bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
+ ObjectFields object_fields) {
// TODO(hpayer): Make this check size based, i.e.
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
FLAG_young_generation_large_objects &&
- MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace())) {
+ MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
if (object->map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
-
- if (!ContainsOnlyData(map->visitor_id())) {
+ promoted_size_ += object_size;
+ if (object_fields == ObjectFields::kMaybePointers) {
promotion_list_.PushLargeObject(object, map, object_size);
}
}
@@ -247,18 +230,17 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size) {
}
template <typename THeapObjectSlot>
-SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
- THeapObjectSlot slot,
- HeapObject object,
- int object_size) {
+SlotCallbackResult Scavenger::EvacuateObjectDefault(
+ Map map, THeapObjectSlot slot, HeapObject object, int object_size,
+ ObjectFields object_fields) {
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result;
- if (HandleLargeObject(map, object, object_size)) {
- return REMOVE_SLOT;
+ if (HandleLargeObject(map, object, object_size, object_fields)) {
+ return KEEP_SLOT;
}
SLOW_DCHECK(static_cast<size_t>(object_size) <=
@@ -267,7 +249,7 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
- result = SemiSpaceCopyObject(map, slot, object, object_size);
+ result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
if (result != CopyAndForwardResult::FAILURE) {
return RememberedSetEntryNeeded(result);
}
@@ -276,13 +258,13 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
// We may want to promote this object if the object was already semi-space
// copied in a previes young generation GC or if the semi-space copy above
// failed.
- result = PromoteObject(map, slot, object, object_size);
+ result = PromoteObject(map, slot, object, object_size, object_fields);
if (result != CopyAndForwardResult::FAILURE) {
return RememberedSetEntryNeeded(result);
}
// If promotion failed, we try to copy the object to the other semi-space.
- result = SemiSpaceCopyObject(map, slot, object, object_size);
+ result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
if (result != CopyAndForwardResult::FAILURE) {
return RememberedSetEntryNeeded(result);
}
@@ -305,12 +287,15 @@ SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
String actual = object->actual();
// ThinStrings always refer to internalized strings, which are always in old
// space.
- DCHECK(!Heap::InNewSpace(actual));
- slot.StoreHeapObject(actual);
+ DCHECK(!Heap::InYoungGeneration(actual));
+ HeapObjectReference::Update(slot, actual);
return REMOVE_SLOT;
}
- return EvacuateObjectDefault(map, slot, object, object_size);
+ DCHECK_EQ(ObjectFields::kMaybePointers,
+ Map::ObjectFieldsFrom(map->visitor_id()));
+ return EvacuateObjectDefault(map, slot, object, object_size,
+ ObjectFields::kMaybePointers);
}
template <typename THeapObjectSlot>
@@ -326,9 +311,9 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
HeapObject first = HeapObject::cast(object->unchecked_first());
- slot.StoreHeapObject(first);
+ HeapObjectReference::Update(slot, first);
- if (!Heap::InNewSpace(first)) {
+ if (!Heap::InYoungGeneration(first)) {
object->map_slot().Release_Store(
MapWord::FromForwardingAddress(first).ToMap());
return REMOVE_SLOT;
@@ -338,20 +323,23 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
if (first_word.IsForwardingAddress()) {
HeapObject target = first_word.ToForwardingAddress();
- slot.StoreHeapObject(target);
+ HeapObjectReference::Update(slot, target);
object->map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
- return Heap::InToSpace(target) ? KEEP_SLOT : REMOVE_SLOT;
+ return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
- EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
+ EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map),
+ Map::ObjectFieldsFrom(map->visitor_id()));
object->map_slot().Release_Store(
MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
return result;
}
-
- return EvacuateObjectDefault(map, slot, object, object_size);
+ DCHECK_EQ(ObjectFields::kMaybePointers,
+ Map::ObjectFieldsFrom(map->visitor_id()));
+ return EvacuateObjectDefault(map, slot, object, object_size,
+ ObjectFields::kMaybePointers);
}
template <typename THeapObjectSlot>
@@ -360,12 +348,13 @@ SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- SLOW_DCHECK(Heap::InFromSpace(source));
+ SLOW_DCHECK(Heap::InFromPage(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
// Cannot use ::cast() below because that would add checks in debug mode
// that require re-reading the map.
- switch (map->visitor_id()) {
+ VisitorId visitor_id = map->visitor_id();
+ switch (visitor_id) {
case kVisitThinString:
// At the moment we don't allow weak pointers to thin strings.
DCHECK(!(*slot)->IsWeak());
@@ -377,7 +366,8 @@ SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
return EvacuateShortcutCandidate(
map, slot, ConsString::unchecked_cast(source), size);
default:
- return EvacuateObjectDefault(map, slot, source, size);
+ return EvacuateObjectDefault(map, slot, source, size,
+ Map::ObjectFieldsFrom(visitor_id));
}
}
@@ -387,7 +377,7 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- DCHECK(Heap::InFromSpace(object));
+ DCHECK(Heap::InFromPage(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
MapWord first_word = object->synchronized_map_word();
@@ -396,19 +386,11 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject dest = first_word.ToForwardingAddress();
- DCHECK(Heap::InFromSpace(*p));
- if ((*p)->IsWeak()) {
- p.store(HeapObjectReference::Weak(dest));
- } else {
- DCHECK((*p)->IsStrong());
- p.store(HeapObjectReference::Strong(dest));
- }
- DCHECK_IMPLIES(Heap::InNewSpace(dest),
- (Heap::InToSpace(dest) ||
- MemoryChunk::FromHeapObject(dest)->owner()->identity() ==
- NEW_LO_SPACE));
+ HeapObjectReference::Update(p, dest);
+ DCHECK_IMPLIES(Heap::InYoungGeneration(dest),
+ Heap::InToPage(dest) || Heap::IsLargeObject(dest));
- return Heap::InToSpace(dest) ? KEEP_SLOT : REMOVE_SLOT;
+ return Heap::InYoungGeneration(dest) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
@@ -426,15 +408,15 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
using THeapObjectSlot = typename TSlot::THeapObjectSlot;
MaybeObject object = *slot;
- if (Heap::InFromSpace(object)) {
+ if (Heap::InFromPage(object)) {
HeapObject heap_object = object->GetHeapObject();
SlotCallbackResult result =
ScavengeObject(THeapObjectSlot(slot), heap_object);
DCHECK_IMPLIES(result == REMOVE_SLOT,
- !heap->IsInYoungGeneration((*slot)->GetHeapObject()));
+ !heap->InYoungGeneration((*slot)->GetHeapObject()));
return result;
- } else if (Heap::InToSpace(object)) {
+ } else if (Heap::InToPage(object)) {
// Already updated slot. This can happen when processing of the work list
// is interleaved with processing roots.
return KEEP_SLOT;
@@ -479,8 +461,9 @@ void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
template <typename TSlot>
void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
- if (Heap::InNewSpace(heap_object)) {
- scavenger_->ScavengeObject(HeapObjectSlot(slot), heap_object);
+ if (Heap::InYoungGeneration(heap_object)) {
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ scavenger_->ScavengeObject(THeapObjectSlot(slot), heap_object);
}
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 76939a87e8..df0ed8886e 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -14,6 +14,9 @@
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/transitions-inl.h"
#include "src/utils-inl.h"
namespace v8 {
@@ -62,7 +65,7 @@ class ScavengingTask final : public ItemParallelJob::Task {
static_cast<void*>(this), scavenging_time,
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
- };
+ }
private:
Heap* const heap_;
@@ -72,9 +75,9 @@ class ScavengingTask final : public ItemParallelJob::Task {
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
- IterateAndScavengePromotedObjectsVisitor(Heap* heap, Scavenger* scavenger,
+ IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
bool record_slots)
- : heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
+ : scavenger_(scavenger), record_slots_(record_slots) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
@@ -119,7 +122,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
- if (Heap::InFromSpace(target)) {
+ if (Heap::InFromPage(target)) {
SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
bool success = (*slot)->GetHeapObject(&target);
USE(success);
@@ -134,25 +137,29 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
HeapObject::cast(target)));
} else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, ObjectSlot(slot),
- target);
+ // We should never try to record off-heap slots.
+ DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
+ // We cannot call MarkCompactCollector::RecordSlot because that checks
+ // that the host page is not in young generation, which does not hold
+ // for pending large pages.
+ RememberedSet<OLD_TO_OLD>::Insert(MemoryChunk::FromHeapObject(host),
+ slot.address());
}
}
- Heap* const heap_;
Scavenger* const scavenger_;
const bool record_slots_;
};
static bool IsUnscavengedHeapObject(Heap* heap, FullObjectSlot p) {
- return Heap::InFromSpace(*p) &&
+ return Heap::InFromPage(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
Object RetainAs(Object object) override {
- if (!Heap::InFromSpace(object)) {
+ if (!Heap::InFromPage(object)) {
return object;
}
@@ -219,7 +226,7 @@ void ScavengerCollector::CollectGarbage() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run(isolate_->async_counters());
+ job.Run();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
@@ -227,17 +234,16 @@ void ScavengerCollector::CollectGarbage() {
// Scavenge weak global handles.
TRACE_GC(heap_->tracer(),
GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
- isolate_->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ isolate_->global_handles()->MarkYoungWeakUnmodifiedObjectsPending(
&IsUnscavengedHeapObject);
- isolate_->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
- &root_scavenge_visitor);
+ isolate_->global_handles()->IterateYoungWeakUnmodifiedRootsForFinalizers(
+ &root_scavenge_visitor);
scavengers[kMainThreadId]->Process();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
isolate_->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ ->IterateYoungWeakUnmodifiedRootsForPhantomHandles(
&root_scavenge_visitor, &IsUnscavengedHeapObject);
}
@@ -245,6 +251,8 @@ void ScavengerCollector::CollectGarbage() {
// Finalize parallel scavenging.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
+ DCHECK(surviving_new_large_objects_.empty());
+
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
@@ -257,8 +265,8 @@ void ScavengerCollector::CollectGarbage() {
{
// Update references into new space
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
- heap_->UpdateNewSpaceReferencesInExternalStringTable(
- &Heap::UpdateNewSpaceReferenceInExternalStringTableEntry);
+ heap_->UpdateYoungReferencesInExternalStringTable(
+ &Heap::UpdateYoungReferenceInExternalStringTableEntry);
heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
}
@@ -287,7 +295,7 @@ void ScavengerCollector::CollectGarbage() {
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
// TODO(hpayer): Don't free all as soon as we have an intermediate generation.
- heap_->new_lo_space()->FreeAllObjects();
+ heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
@@ -298,7 +306,7 @@ void ScavengerCollector::CollectGarbage() {
});
// Update how much has survived scavenge.
- heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
+ heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
@@ -365,7 +373,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
const bool record_slots =
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
- IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
+ IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
target->IterateBodyFast(map, size, &visitor);
}
@@ -456,9 +464,9 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
Object object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
- if (!Heap::InNewSpace(object)) return;
-
- scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
+ if (Heap::InYoungGeneration(object)) {
+ scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
+ }
}
RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 0dfe44628a..e122ab8cdf 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -159,28 +159,28 @@ class Scavenger {
RememberedSetEntryNeeded(CopyAndForwardResult result);
template <typename THeapObjectSlot>
- V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map map,
- THeapObjectSlot slot,
- HeapObject object,
- int object_size);
+ V8_INLINE CopyAndForwardResult
+ SemiSpaceCopyObject(Map map, THeapObjectSlot slot, HeapObject object,
+ int object_size, ObjectFields object_fields);
template <typename THeapObjectSlot>
V8_INLINE CopyAndForwardResult PromoteObject(Map map, THeapObjectSlot slot,
HeapObject object,
- int object_size);
+ int object_size,
+ ObjectFields object_fields);
template <typename THeapObjectSlot>
V8_INLINE SlotCallbackResult EvacuateObject(THeapObjectSlot slot, Map map,
HeapObject source);
- V8_INLINE bool HandleLargeObject(Map map, HeapObject object, int object_size);
+ V8_INLINE bool HandleLargeObject(Map map, HeapObject object, int object_size,
+ ObjectFields object_fields);
// Different cases for object evacuation.
template <typename THeapObjectSlot>
- V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map map,
- THeapObjectSlot slot,
- HeapObject object,
- int object_size);
+ V8_INLINE SlotCallbackResult
+ EvacuateObjectDefault(Map map, THeapObjectSlot slot, HeapObject object,
+ int object_size, ObjectFields object_fields);
template <typename THeapObjectSlot>
inline SlotCallbackResult EvacuateThinString(Map map, THeapObjectSlot slot,
@@ -195,8 +195,6 @@ class Scavenger {
void IterateAndScavengePromotedObject(HeapObject target, Map map, int size);
- static inline bool ContainsOnlyData(VisitorId visitor_id);
-
ScavengerCollector* const collector_;
Heap* const heap_;
PromotionList::View promotion_list_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 14f2842d83..a3d690ece6 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -9,7 +9,8 @@
#include "src/contexts.h"
#include "src/heap-symbols.h"
#include "src/heap/factory.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/ic/handler-configuration.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/layout-descriptor.h"
@@ -109,7 +110,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
// JSObjects have maps with a mutable prototype_validity_cell, so they cannot
// go in RO_SPACE.
AllocationResult allocation =
- AllocateRaw(Map::kSize, is_js_object ? MAP_SPACE : RO_SPACE);
+ AllocateRaw(Map::kSize, is_js_object ? AllocationType::kMap
+ : AllocationType::kReadOnly);
if (!allocation.To(&result)) return allocation;
result->set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
@@ -124,7 +126,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object result;
- AllocationResult allocation = AllocateRaw(Map::kSize, RO_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(Map::kSize, AllocationType::kReadOnly);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
Map map = Map::unchecked_cast(result);
@@ -150,6 +153,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
+ map->clear_padding();
map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
@@ -170,7 +174,7 @@ AllocationResult Heap::Allocate(Map map, AllocationSpace space) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
HeapObject result;
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, Heap::SelectType(space));
if (!allocation.To(&result)) return allocation;
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode =
@@ -185,7 +189,7 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
HeapObject object;
AllocationResult allocation = AllocateRaw(
- size, RO_SPACE,
+ size, AllocationType::kReadOnly,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
@@ -195,9 +199,7 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
FixedTypedArrayBase elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
- reinterpret_cast<void*>(
- ExternalReference::fixed_typed_array_base_data_offset().address()),
- SKIP_WRITE_BARRIER);
+ FixedTypedArrayBase::ExternalPointerPtrForOnHeapArray());
elements->set_length(0);
return elements;
}
@@ -243,7 +245,8 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty array.
{
- AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), RO_SPACE);
+ AllocationResult alloc =
+ AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(obj)->set_length(0);
@@ -251,7 +254,8 @@ bool Heap::CreateInitialMaps() {
set_empty_fixed_array(FixedArray::cast(obj));
{
- AllocationResult alloc = AllocateRaw(WeakFixedArray::SizeFor(0), RO_SPACE);
+ AllocationResult alloc =
+ AllocateRaw(WeakFixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.weak_fixed_array_map(),
SKIP_WRITE_BARRIER);
@@ -260,8 +264,8 @@ bool Heap::CreateInitialMaps() {
set_empty_weak_fixed_array(WeakFixedArray::cast(obj));
{
- AllocationResult allocation =
- AllocateRaw(WeakArrayList::SizeForCapacity(0), RO_SPACE);
+ AllocationResult allocation = AllocateRaw(WeakArrayList::SizeForCapacity(0),
+ AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
obj->set_map_after_allocation(roots.weak_array_list_map(),
SKIP_WRITE_BARRIER);
@@ -283,7 +287,7 @@ bool Heap::CreateInitialMaps() {
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- DCHECK(!InNewSpace(roots.undefined_value()));
+ DCHECK(!InYoungGeneration(roots.undefined_value()));
{
AllocationResult allocation = Allocate(roots.the_hole_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
@@ -314,7 +318,7 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
int size = DescriptorArray::SizeFor(0);
- if (!AllocateRaw(size, RO_SPACE).To(&obj)) return false;
+ if (!AllocateRaw(size, AllocationType::kReadOnly).To(&obj)) return false;
obj->set_map_after_allocation(roots.descriptor_array_map(),
SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
@@ -427,7 +431,7 @@ bool Heap::CreateInitialMaps() {
{
// The invalid_prototype_validity_cell is needed for JSObject maps.
Smi value = Smi::FromInt(Map::kPrototypeChainInvalid);
- AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
+ AllocationResult alloc = AllocateRaw(Cell::kSize, AllocationType::kOld);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
Cell::cast(obj)->set_value(value);
@@ -496,15 +500,21 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
UncompiledDataWithPreparseData::kSize,
uncompiled_data_with_preparse_data)
+#if V8_SFI_HAS_UNIQUE_ID
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE,
+ SharedFunctionInfoWithID::kAlignedSize, shared_function_info)
+#else
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
+#endif
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
+ ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE,
- JSObject::kHeaderSizeForEmbedderFields + kEmbedderDataSlotSize,
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
@@ -513,7 +523,8 @@ bool Heap::CreateInitialMaps() {
}
{
- AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), RO_SPACE);
+ AllocationResult alloc =
+ AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(obj)->set_length(0);
@@ -522,7 +533,8 @@ bool Heap::CreateInitialMaps() {
{
// Empty boilerplate needs a field for literal_flags
- AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(1), RO_SPACE);
+ AllocationResult alloc =
+ AllocateRaw(FixedArray::SizeFor(1), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.object_boilerplate_description_map(),
SKIP_WRITE_BARRIER);
@@ -564,14 +576,16 @@ bool Heap::CreateInitialMaps() {
// Empty arrays.
{
- if (!AllocateRaw(ByteArray::SizeFor(0), RO_SPACE).To(&obj)) return false;
+ if (!AllocateRaw(ByteArray::SizeFor(0), AllocationType::kReadOnly).To(&obj))
+ return false;
obj->set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
ByteArray::cast(obj)->set_length(0);
set_empty_byte_array(ByteArray::cast(obj));
}
{
- if (!AllocateRaw(FixedArray::SizeFor(0), RO_SPACE).To(&obj)) {
+ if (!AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly)
+ .To(&obj)) {
return false;
}
obj->set_map_after_allocation(roots.property_array_map(),
@@ -592,7 +606,7 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- DCHECK(!InNewSpace(roots.empty_fixed_array()));
+ DCHECK(!InYoungGeneration(roots.empty_fixed_array()));
roots.bigint_map()->SetConstructorFunctionIndex(
Context::BIGINT_FUNCTION_INDEX);
@@ -635,7 +649,7 @@ void Heap::CreateInitialObjects() {
// There's no "current microtask" in the beginning.
set_current_microtask(roots.undefined_value());
- set_dirty_js_weak_factories(roots.undefined_value());
+ set_dirty_js_finalization_groups(roots.undefined_value());
set_weak_refs_keep_during_job(roots.undefined_value());
// Allocate cache for single character one byte strings.
@@ -785,6 +799,7 @@ void Heap::CreateInitialObjects() {
set_retaining_path_targets(roots.empty_weak_array_list());
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
+ set_pending_optimize_for_test_bytecode(roots.undefined_value());
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index ed996d0c73..86312d82af 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,11 +5,13 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/heap/spaces.h"
+
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/spaces.h"
#include "src/msan.h"
#include "src/objects/code-inl.h"
@@ -119,8 +121,10 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
// SemiSpace
bool SemiSpace::Contains(HeapObject o) {
- return id_ == kToSpace ? MemoryChunk::FromHeapObject(o)->InToSpace()
- : MemoryChunk::FromHeapObject(o)->InFromSpace();
+ MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
+ if (memory_chunk->IsLargePage()) return false;
+ return id_ == kToSpace ? memory_chunk->IsToPage()
+ : memory_chunk->IsFromPage();
}
bool SemiSpace::Contains(Object o) {
@@ -234,10 +238,6 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
amount);
}
-bool MemoryChunk::IsInNewLargeObjectSpace() const {
- return owner()->identity() == NEW_LO_SPACE;
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -428,10 +428,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result =
- alignment == kDoubleAligned
- ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
- : AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result = alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
@@ -514,10 +513,16 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
- return alignment == kDoubleAligned
- ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ return alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
#else
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes);
#endif
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 606279292a..c2e6ffb54e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -307,8 +307,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
}
}
-void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
- CancelAndWaitForPendingTasks();
+void MemoryAllocator::Unmapper::PrepareForGC() {
// Free non-regular chunks because they cannot be re-used.
PerformFreeMemoryOnQueuedNonRegularChunks();
}
@@ -670,6 +669,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
}
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
+ DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
+ DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
@@ -708,10 +709,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
+ chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
@@ -748,6 +746,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
}
LargePage* page = static_cast<LargePage*>(chunk);
+ page->SetFlag(MemoryChunk::LARGE_PAGE);
page->list_node().Initialize();
return page;
}
@@ -1312,7 +1311,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
- if (!heap_->IsLargeMemoryChunk(this)) {
+ if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
@@ -1466,6 +1465,12 @@ void MemoryChunk::ReleaseMarkingBitmap() {
// -----------------------------------------------------------------------------
// PagedSpace implementation
+void Space::CheckOffsetsAreConsistent() const {
+ static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
+ "ID offset inconsistent");
+ DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
+}
+
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
StartNextInlineAllocationStep();
@@ -2198,7 +2203,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInNewSpaceMask);
+ current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
@@ -2660,17 +2665,16 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
page->set_owner(this);
page->SetFlags(flags, mask);
if (id_ == kToSpace) {
- page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
- page->SetFlag(MemoryChunk::IN_TO_SPACE);
+ page->ClearFlag(MemoryChunk::FROM_PAGE);
+ page->SetFlag(MemoryChunk::TO_PAGE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
page, 0);
} else {
- page->SetFlag(MemoryChunk::IN_FROM_SPACE);
- page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+ page->SetFlag(MemoryChunk::FROM_PAGE);
+ page->ClearFlag(MemoryChunk::TO_PAGE);
}
- DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
- page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+ DCHECK(page->InYoungGeneration());
}
}
@@ -2759,10 +2763,10 @@ void SemiSpace::Verify() {
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE));
+ CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
+ : MemoryChunk::TO_PAGE));
+ CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
+ : MemoryChunk::FROM_PAGE));
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
if (!is_from_space) {
// The pointers-from-here-are-interesting flag isn't updated dynamically
@@ -2910,10 +2914,11 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
// We can't use .is_null() here because *map_location returns an
// Object (for which "is null" is not defined, as it would be
// indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
- if (*map_location == Map()) {
+ if (map_location.contains_value(kNullAddress)) {
map_location.store(ReadOnlyRoots(heap).free_space_map());
} else {
- DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
+ DCHECK(map_location.contains_value(
+ ReadOnlyRoots(heap).free_space_map().ptr()));
}
n = n->next();
}
@@ -3121,8 +3126,8 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
- DCHECK_EQ(*cur->map_slot(),
- page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
+ DCHECK(cur->map_slot().contains_value(
+ page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -3299,9 +3304,6 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
}
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
- const size_t page_size = MemoryAllocator::GetCommitPageSize();
- const size_t area_start_offset =
- RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
@@ -3313,8 +3315,8 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
// page allocator manually.
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(page->executable());
- CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
- page->size() - area_start_offset, access));
+ CHECK(
+ SetPermissions(page_allocator, page->address(), page->size(), access));
}
}
@@ -3420,11 +3422,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id),
- size_(0),
- page_count_(0),
- objects_size_(0),
- chunk_map_(1024) {}
+ : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
@@ -3465,6 +3463,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion();
+ AllocationStep(object_size, object->address(), object_size);
return object;
}
@@ -3475,13 +3474,12 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
- Register(page, object_size);
+ AddPage(page, object_size);
HeapObject object = page->GetObject();
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
- AllocationStep(object_size, object->address(), object_size);
return page;
}
@@ -3493,29 +3491,17 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
return CommittedMemory();
}
-
-// GC support
-Object LargeObjectSpace::FindObject(Address a) {
- LargePage* page = FindPage(a);
- if (page != nullptr) {
- return page->GetObject();
- }
- return Smi::kZero; // Signaling not found.
-}
-
-LargePage* LargeObjectSpace::FindPage(Address a) {
+LargePage* CodeLargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
- if (page->Contains(a)) {
- return page;
- }
+ CHECK(page->Contains(a));
+ return page;
}
return nullptr;
}
-
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
@@ -3532,10 +3518,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
}
}
-void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
- // There may be concurrent access on the chunk map. We have to take the lock
- // here.
- base::MutexGuard guard(&chunk_map_mutex_);
+void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
@@ -3543,13 +3526,8 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
}
}
-void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
- RemoveChunkMapEntries(page, page->address());
-}
-
-void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
- Address free_start) {
- for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
+void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
+ for (Address current = page->address();
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_.erase(current);
@@ -3558,35 +3536,31 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
- DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
- DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ DCHECK(page->IsLargePage());
+ DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
+ DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject()->Size());
- reinterpret_cast<NewLargeObjectSpace*>(page->owner())
- ->Unregister(page, object_size);
- Register(page, object_size);
- page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+ static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
+ AddPage(page, object_size);
+ page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->set_owner(this);
}
-void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
+void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
-
- InsertChunkMapEntries(page);
}
-void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
+void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
-
- RemoveChunkMapEntries(page);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
@@ -3595,20 +3569,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
- objects_size_ = 0;
+ size_t surviving_object_size = 0;
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
+ size_t size = static_cast<size_t>(object->Size());
if (marking_state->IsBlack(object)) {
Address free_start;
- size_t size = static_cast<size_t>(object->Size());
- objects_size_ += size;
+ surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
- RemoveChunkMapEntries(current, free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
@@ -3618,19 +3591,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
AccountUncommitted(bytes_to_free);
}
} else {
- memory_chunk_list_.Remove(current);
-
- // Free the chunk.
- size_ -= static_cast<int>(current->size());
- AccountUncommitted(current->size());
- page_count_--;
-
- RemoveChunkMapEntries(current);
+ RemovePage(current, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
}
current = next_current;
}
+ objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) {
@@ -3638,11 +3605,18 @@ bool LargeObjectSpace::Contains(HeapObject object) {
bool owned = (chunk->owner() == this);
- SLOW_DCHECK(!owned || FindObject(object->address())->IsHeapObject());
+ SLOW_DCHECK(!owned || ContainsSlow(object->address()));
return owned;
}
+bool LargeObjectSpace::ContainsSlow(Address addr) {
+ for (LargePage* page : *this) {
+ if (page->Contains(addr)) return true;
+ }
+ return false;
+}
+
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
}
@@ -3682,7 +3656,8 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object->IsFreeSpace() || object->IsFeedbackMetadata() ||
object->IsContext() ||
object->IsUncompiledDataWithoutPreparseData() ||
- object->IsPreparseData())) {
+ object->IsPreparseData()) &&
+ !FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object->map()->instance_type());
}
@@ -3765,52 +3740,109 @@ void Page::Print() {
#endif // DEBUG
-NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
- : LargeObjectSpace(heap, NEW_LO_SPACE) {}
+NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
+ : LargeObjectSpace(heap, NEW_LO_SPACE),
+ pending_object_(0),
+ capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
- // TODO(hpayer): Add heap growing strategy here.
+ // Do not allocate more objects if promoting the existing object would exceed
+ // the old generation capacity.
+ if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
+ return AllocationResult::Retry(identity());
+ }
+
+ // Allocation for the first object must succeed independent from the capacity.
+ if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
+ return AllocationResult::Retry(identity());
+ }
+
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
+
+ // The size of the first object may exceed the capacity.
+ capacity_ = Max(capacity_, SizeOfObjects());
+
+ HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- page->SetFlag(MemoryChunk::IN_TO_SPACE);
+ page->SetFlag(MemoryChunk::TO_PAGE);
+ pending_object_.store(result->address(), std::memory_order_relaxed);
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ heap()
+ ->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->ClearLiveness(page);
+ }
+#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
- return page->GetObject();
+ DCHECK(page->IsLargePage());
+ DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ AllocationStep(object_size, result->address(), object_size);
+ return result;
}
-size_t NewLargeObjectSpace::Available() {
- // TODO(hpayer): Update as soon as we have a growing strategy.
- return 0;
-}
+size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
- chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
- chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
- }
-}
-
-void NewLargeObjectSpace::FreeAllObjects() {
- LargePage* current = first_page();
- while (current) {
- LargePage* next_current = current->next_page();
- Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
- current);
- current = next_current;
+ chunk->SetFlag(MemoryChunk::FROM_PAGE);
+ chunk->ClearFlag(MemoryChunk::TO_PAGE);
+ }
+}
+
+void NewLargeObjectSpace::FreeDeadObjects(
+ const std::function<bool(HeapObject)>& is_dead) {
+ bool is_marking = heap()->incremental_marking()->IsMarking();
+ size_t surviving_object_size = 0;
+ bool freed_pages = false;
+ for (auto it = begin(); it != end();) {
+ LargePage* page = *it;
+ it++;
+ HeapObject object = page->GetObject();
+ size_t size = static_cast<size_t>(object->Size());
+ if (is_dead(object)) {
+ freed_pages = true;
+ RemovePage(page, size);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ if (FLAG_concurrent_marking && is_marking) {
+ heap()->concurrent_marking()->ClearMemoryChunkData(page);
+ }
+ } else {
+ surviving_object_size += size;
+ }
}
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
- objects_size_ = 0;
+ objects_size_ = surviving_object_size;
+ if (freed_pages) {
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ }
+}
+
+void NewLargeObjectSpace::SetCapacity(size_t capacity) {
+ capacity_ = Max(capacity, SizeOfObjects());
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
- : LargeObjectSpace(heap, CODE_LO_SPACE) {}
+ : LargeObjectSpace(heap, CODE_LO_SPACE),
+ chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
+void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
+ LargeObjectSpace::AddPage(page, object_size);
+ InsertChunkMapEntries(page);
+}
+
+void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
+ RemoveChunkMapEntries(page);
+ LargeObjectSpace::RemovePage(page, object_size);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index e0bd39ea2b..8fe3357430 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -114,11 +114,6 @@ class Space;
// Some assertion macros used in the debugging mode.
-#define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask)
-
-#define DCHECK_OBJECT_ALIGNED(address) \
- DCHECK_EQ(0, (address)&kObjectAlignmentMask)
-
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
@@ -269,10 +264,12 @@ class MemoryChunk {
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
- // A page in new space has one of the next two flags set.
- IN_FROM_SPACE = 1u << 3,
- IN_TO_SPACE = 1u << 4,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
+ // A page in the from-space or a young large page that was not scavenged
+ // yet.
+ FROM_PAGE = 1u << 3,
+ // A page in the to-space or a young large page that was scavenged.
+ TO_PAGE = 1u << 4,
+ LARGE_PAGE = 1u << 5,
EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7,
@@ -322,7 +319,8 @@ class MemoryChunk {
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
- INCREMENTAL_MARKING = 1u << 18
+ INCREMENTAL_MARKING = 1u << 18,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19
};
using Flags = uintptr_t;
@@ -335,10 +333,12 @@ class MemoryChunk {
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
- static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
+ static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
+
+ static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInNewSpaceMask;
+ kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. Sweeper threads that are done with their
@@ -365,6 +365,8 @@ class MemoryChunk {
kReservationOffset + 3 * kSystemPointerSize;
static const intptr_t kHeaderSentinelOffset =
kHeapOffset + kSystemPointerSize;
+ static const intptr_t kOwnerOffset =
+ kHeaderSentinelOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset // NOLINT
@@ -377,8 +379,8 @@ class MemoryChunk {
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_
+ kSystemPointerSize // Address owner_
- + kIntptrSize // intptr_t progress_bar_
- + kIntptrSize // std::atomic<intptr_t> live_byte_count_
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
@@ -428,7 +430,7 @@ class MemoryChunk {
if (mark == kNullAddress) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromTopOrLimit.
+ // to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
@@ -539,19 +541,20 @@ class MemoryChunk {
Address HighWaterMark() { return address() + high_water_mark_; }
- int progress_bar() {
+ size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return static_cast<int>(progress_bar_.load(std::memory_order_relaxed));
+ return progress_bar_.load(std::memory_order_acquire);
}
- void set_progress_bar(int progress_bar) {
+ bool TrySetProgressBar(size_t old_value, size_t new_value) {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- progress_bar_.store(progress_bar, std::memory_order_relaxed);
+ return progress_bar_.compare_exchange_strong(old_value, new_value,
+ std::memory_order_acq_rel);
}
void ResetProgressBar() {
if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- set_progress_bar(0);
+ progress_bar_.store(0, std::memory_order_release);
}
}
@@ -568,12 +571,11 @@ class MemoryChunk {
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
- return static_cast<uint32_t>(addr - this->address()) >>
- kSystemPointerSizeLog2;
+ return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
- return this->address() + (index << kSystemPointerSizeLog2);
+ return this->address() + (index << kTaggedSizeLog2);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
@@ -633,17 +635,20 @@ class MemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
-
- bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
-
- bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+ bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
+ bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
+ bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
+ bool InYoungGeneration() const {
+ return (flags_ & kIsInYoungGenerationMask) != 0;
+ }
+ bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
+ bool InNewLargeObjectSpace() const {
+ return InYoungGeneration() && IsLargePage();
+ }
bool InOldSpace() const;
-
bool InLargeObjectSpace() const;
- inline bool IsInNewLargeObjectSpace() const;
Space* owner() const { return owner_; }
@@ -685,6 +690,16 @@ class MemoryChunk {
VirtualMemory* reserved_memory() { return &reservation_; }
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* marking_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
+ }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* young_generation_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
+ }
+
size_t size_;
uintptr_t flags_;
@@ -701,19 +716,19 @@ class MemoryChunk {
// guaranteed to not contain such a pointer.
Address header_sentinel_;
+ // The space owning this memory chunk.
+ std::atomic<Space*> owner_;
+
// Start and end of allocatable memory on this chunk.
Address area_start_;
Address area_end_;
- // The space owning this memory chunk.
- std::atomic<Space*> owner_;
-
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
- std::atomic<intptr_t> progress_bar_;
+ std::atomic<size_t> progress_bar_;
// Count of bytes marked black on page.
- std::atomic<intptr_t> live_byte_count_;
+ intptr_t live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
@@ -790,7 +805,7 @@ static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
-// Page* p = Page::FromTopOrLimit(top);
+// Page* p = Page::FromAllocationAreaAddress(address);
class Page : public MemoryChunk {
public:
static const intptr_t kCopyAllFlags = ~0;
@@ -973,8 +988,11 @@ class Space : public Malloced {
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
0;
+ CheckOffsetsAreConsistent();
}
+ void CheckOffsetsAreConsistent() const;
+
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
@@ -1084,6 +1102,8 @@ class Space : public Malloced {
std::atomic<size_t>* external_backing_store_bytes_;
private:
+ static const intptr_t kIdOffset = 9 * kSystemPointerSize;
+
bool allocation_observers_paused_;
Heap* heap_;
AllocationSpace id_;
@@ -1204,8 +1224,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if (!heap_->IsLargeMemoryChunk(chunk) &&
- chunk->executable() != EXECUTABLE) {
+ if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
@@ -1231,7 +1250,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
V8_EXPORT_PRIVATE void FreeQueuedChunks();
void CancelAndWaitForPendingTasks();
- void PrepareForMarkCompact();
+ void PrepareForGC();
void EnsureUnmappingCompleted();
V8_EXPORT_PRIVATE void TearDown();
size_t NumberOfCommittedChunks();
@@ -1418,14 +1437,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
- private:
- void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
- size_t requested);
-
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
+ private:
+ void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
+ size_t requested);
+
// FreeMemory can be called concurrently when PreFree was executed before.
void PerformFreeMemory(MemoryChunk* chunk);
@@ -1930,7 +1949,7 @@ class V8_EXPORT_PRIVATE FreeList {
static const size_t kTinyListMax = 0x1f * kTaggedSize;
static const size_t kSmallListMax = 0xff * kTaggedSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x3fff * kTaggedSize;
+ static const size_t kLargeListMax = 0x2fff * kTaggedSize;
static const size_t kTinyAllocationMax = kTiniestListMax;
static const size_t kSmallAllocationMax = kTinyListMax;
static const size_t kMediumAllocationMax = kSmallListMax;
@@ -2703,7 +2722,7 @@ class NewSpace : public SpaceWithLinearArea {
}
void MovePageFromSpaceToSpace(Page* page) {
- DCHECK(page->InFromSpace());
+ DCHECK(page->IsFromPage());
from_space_.RemovePage(page);
to_space_.PrependPage(page);
}
@@ -2966,6 +2985,10 @@ class ReadOnlySpace : public PagedSpace {
explicit ReadOnlySpace(Heap* heap);
+ // TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
+ // memory_chunk_list_.
+ ~ReadOnlySpace() override { MarkAsReadWrite(); }
+
bool writable() const { return !is_marked_read_only_; }
void ClearStringPaddingIfNeeded();
@@ -3018,37 +3041,25 @@ class LargeObjectSpace : public Space {
int PageCount() { return page_count_; }
- // Finds an object for a given address, returns a Smi if it is not found.
- // The function iterates through all objects in this space, may be slow.
- Object FindObject(Address a);
-
- // Finds a large object page containing the given address, returns nullptr
- // if such a page doesn't exist.
- LargePage* FindPage(Address a);
-
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
// Frees unmarked objects.
void FreeUnmarkedObjects();
- void InsertChunkMapEntries(LargePage* page);
- void RemoveChunkMapEntries(LargePage* page);
- void RemoveChunkMapEntries(LargePage* page, Address free_start);
-
void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
- bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
+ bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
- void Register(LargePage* page, size_t object_size);
- void Unregister(LargePage* page, size_t object_size);
+ virtual void AddPage(LargePage* page, size_t object_size);
+ virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
@@ -3062,8 +3073,6 @@ class LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
- base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
-
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
@@ -3082,19 +3091,12 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects
private:
- // The chunk_map_mutex_ has to be used when the chunk map is accessed
- // concurrently.
- base::Mutex chunk_map_mutex_;
-
- // Page-aligned addresses to their corresponding LargePage.
- std::unordered_map<Address, LargePage*> chunk_map_;
-
friend class LargeObjectIterator;
};
class NewLargeObjectSpace : public LargeObjectSpace {
public:
- explicit NewLargeObjectSpace(Heap* heap);
+ NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
@@ -3103,7 +3105,21 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void Flip();
- void FreeAllObjects();
+ void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
+
+ void SetCapacity(size_t capacity);
+
+ // The last allocated object that is not guaranteed to be initialized when
+ // the concurrent marker visits it.
+ Address pending_object() {
+ return pending_object_.load(std::memory_order_relaxed);
+ }
+
+ void ResetPendingObject() { pending_object_.store(0); }
+
+ private:
+ std::atomic<Address> pending_object_;
+ size_t capacity_;
};
class CodeLargeObjectSpace : public LargeObjectSpace {
@@ -3112,6 +3128,22 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
+
+ // Finds a large object page containing the given address, returns nullptr
+ // if such a page doesn't exist.
+ LargePage* FindPage(Address a);
+
+ protected:
+ void AddPage(LargePage* page, size_t object_size) override;
+ void RemovePage(LargePage* page, size_t object_size) override;
+
+ private:
+ static const size_t kInitialChunkMapCapacity = 1024;
+ void InsertChunkMapEntries(LargePage* page);
+ void RemoveChunkMapEntries(LargePage* page);
+
+ // Page-aligned addresses to their corresponding LargePage.
+ std::unordered_map<Address, LargePage*> chunk_map_;
};
class LargeObjectIterator : public ObjectIterator {
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index dabeabb41b..8e7fb59975 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -91,21 +91,21 @@ int RelocInfo::target_address_size() {
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Object(Memory<Address>(pc_)));
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>::cast(Memory<Handle<Object>>(pc_));
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory<Address>(pc_) = target->ptr();
+ WriteUnalignedValue(pc_, target->ptr());
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
+ FlushInstructionCache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -114,21 +114,21 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- Memory<Address>(pc_) = target;
+ WriteUnalignedValue(pc_, target);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
+ FlushInstructionCache(pc_, sizeof(Address));
}
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
@@ -139,7 +139,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
- return static_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
+ return ReadUnalignedValue<Address>(pc_);
}
void RelocInfo::set_target_runtime_entry(Address target,
@@ -159,7 +159,7 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
- Memory<Address>(pc_) = kNullAddress;
+ WriteUnalignedValue(pc_, kNullAddress);
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
IsOffHeapTarget(rmode_)) {
// Effectively write zero into the relocation.
@@ -170,33 +170,14 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
void Assembler::emit(uint32_t x) {
- *reinterpret_cast<uint32_t*>(pc_) = x;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint32_t);
}
void Assembler::emit_q(uint64_t x) {
- *reinterpret_cast<uint64_t*>(pc_) = x;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint64_t);
}
@@ -251,21 +232,21 @@ void Assembler::emit_b(Immediate x) {
void Assembler::emit_w(const Immediate& x) {
DCHECK(RelocInfo::IsNone(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.immediate());
- reinterpret_cast<uint16_t*>(pc_)[0] = value;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
pc_ += sizeof(uint16_t);
}
Address Assembler::target_address_at(Address pc, Address constant_pool) {
- return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+ return pc + sizeof(int32_t) + ReadUnalignedValue<int32_t>(pc);
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
- *reinterpret_cast<int32_t*>(pc) = target - (pc + sizeof(int32_t));
+ WriteUnalignedValue(pc, target - (pc + sizeof(int32_t)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, sizeof(int32_t));
+ FlushInstructionCache(pc, sizeof(int32_t));
}
}
@@ -315,7 +296,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory<Address>(pc) = target;
+ WriteUnalignedValue(pc, target);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index b0c359034a..4a5d895652 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -207,7 +207,7 @@ bool RelocInfo::IsInConstantPool() {
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
- return Memory<uint32_t>(pc_);
+ return ReadUnalignedValue<uint32_t>(pc_);
}
// -----------------------------------------------------------------------------
@@ -297,7 +297,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
}
}
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
- Memory<Handle<Object>>(pc) = object;
+ WriteUnalignedValue(pc, object);
}
}
@@ -314,8 +314,11 @@ Assembler::Assembler(const AssemblerOptions& options,
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
- int code_comments_size = WriteCodeComments();
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
+ const int code_comments_size = WriteCodeComments();
+
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
@@ -323,16 +326,25 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size =
- (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
void Assembler::FinalizeJumpOptimizationInfo() {
@@ -3199,8 +3211,8 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- int32_t* p = reinterpret_cast<int32_t*>(buffer_start_ + pos);
- *p += pc_delta;
+ Address p = reinterpret_cast<Address>(buffer_start_ + pos);
+ WriteUnalignedValue(p, ReadUnalignedValue<int>(p) + pc_delta);
}
// Relocate pc-relative references.
@@ -3277,7 +3289,7 @@ void Assembler::emit_operand(int code, Operand adr) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode_);
if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
- emit_label(*reinterpret_cast<Label**>(pc_));
+ emit_label(ReadUnalignedValue<Label*>(reinterpret_cast<Address>(pc_)));
} else {
pc_ += sizeof(int32_t);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 96bf2c7eeb..37cc81fe52 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -51,6 +51,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -202,9 +204,11 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_half_pointer_size = times_2,
- times_pointer_size = times_4,
- times_twice_pointer_size = times_8
+
+ times_half_system_pointer_size = times_2,
+ times_system_pointer_size = times_4,
+
+ times_tagged_size = times_4,
};
class V8_EXPORT_PRIVATE Operand {
@@ -275,8 +279,8 @@ class V8_EXPORT_PRIVATE Operand {
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode) {
DCHECK(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
+ Address p = reinterpret_cast<Address>(&buf_[len_]);
+ WriteUnalignedValue(p, disp);
len_ += sizeof(int32_t);
rmode_ = rmode;
}
@@ -296,7 +300,7 @@ class V8_EXPORT_PRIVATE Operand {
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
-static_assert(sizeof(Operand) <= 2 * kPointerSize,
+static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
// -----------------------------------------------------------------------------
@@ -373,13 +377,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::unique_ptr<AssemblerBuffer> = {});
virtual ~Assembler() {}
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
void FinalizeJumpOptimizationInfo();
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
// Read/Modify the code target in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
@@ -405,11 +419,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- static constexpr int kSpecialTargetSize = kPointerSize;
+ static constexpr int kSpecialTargetSize = kSystemPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
- static constexpr int kCallTargetAddressOffset = kPointerSize;
+ static constexpr int kCallTargetAddressOffset = kSystemPointerSize;
// One byte opcode for test al, 0xXX.
static constexpr byte kTestAlByte = 0xA8;
@@ -887,10 +901,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }
- SSE_CMP_P(cmpeq, 0x0);
- SSE_CMP_P(cmplt, 0x1);
- SSE_CMP_P(cmple, 0x2);
- SSE_CMP_P(cmpneq, 0x4);
+ SSE_CMP_P(cmpeq, 0x0)
+ SSE_CMP_P(cmplt, 0x1)
+ SSE_CMP_P(cmple, 0x2)
+ SSE_CMP_P(cmpneq, 0x4)
#undef SSE_CMP_P
@@ -1505,7 +1519,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vpd(opcode, dst, src1, src2); \
}
- PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
+ PACKED_OP_LIST(AVX_PACKED_OP_DECLARE)
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
@@ -1518,10 +1532,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vcmpps(dst, src1, src2, imm8); \
}
- AVX_CMP_P(vcmpeq, 0x0);
- AVX_CMP_P(vcmplt, 0x1);
- AVX_CMP_P(vcmple, 0x2);
- AVX_CMP_P(vcmpneq, 0x4);
+ AVX_CMP_P(vcmpeq, 0x0)
+ AVX_CMP_P(vcmplt, 0x1)
+ AVX_CMP_P(vcmple, 0x2)
+ AVX_CMP_P(vcmpneq, 0x4)
#undef AVX_CMP_P
@@ -1650,14 +1664,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, Register src);
- byte* addr_at(int pos) { return buffer_start_ + pos; }
+ Address addr_at(int pos) {
+ return reinterpret_cast<Address>(buffer_start_ + pos);
+ }
private:
uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ return ReadUnalignedValue<uint32_t>(addr_at(pos));
}
void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ WriteUnalignedValue(addr_at(pos), x);
}
// code emission
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 01da1c61b8..34427c95ed 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -33,7 +33,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movsd(Operand(esp, offset), xmm_reg);
}
- STATIC_ASSERT(kFloatSize == kPointerSize);
+ STATIC_ASSERT(kFloatSize == kSystemPointerSize);
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ sub(esp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
@@ -49,15 +49,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
__ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
+ kDoubleRegsSize + kFloatRegsSize;
// The bailout id is passed in ebx by the caller.
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
__ sub(edx, ebp);
__ neg(edx);
@@ -70,13 +70,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize),
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
+ __ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kPointerSize),
+ __ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id.
+ __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
+ __ mov(Operand(esp, 5 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate)));
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -89,7 +89,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(esi, offset));
}
@@ -116,7 +117,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ fnclex();
// Remove the return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 1 * kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
@@ -140,7 +141,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, esi);
- __ mov(Operand(esp, 0 * kPointerSize), eax);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
@@ -156,7 +157,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_4, 0));
+ __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: esi = current FrameDescription*, ecx = loop
@@ -170,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kPointerSize));
+ __ add(eax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
@@ -189,7 +190,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
__ push(Operand(esi, offset));
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 75cd9a258a..4bb9b3fa7b 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -11,6 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/disasm.h"
#include "src/ia32/sse-instr.h"
+#include "src/utils.h"
namespace disasm {
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index 20f9450e7d..7fa30d0cea 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -15,21 +15,21 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
- static constexpr int kCallerFPOffset = -6 * kPointerSize;
+ static constexpr int kCallerFPOffset = -6 * kSystemPointerSize;
// EntryFrame is used by JSEntry, JSConstructEntry and JSRunMicrotasksEntry.
// All of them take |root_register_value| as the first parameter.
- static constexpr int kRootRegisterValueOffset = +2 * kPointerSize;
+ static constexpr int kRootRegisterValueOffset = +2 * kSystemPointerSize;
// Rest of parameters passed to JSEntry and JSConstructEntry.
- static constexpr int kNewTargetArgOffset = +3 * kPointerSize;
- static constexpr int kFunctionArgOffset = +4 * kPointerSize;
- static constexpr int kReceiverArgOffset = +5 * kPointerSize;
- static constexpr int kArgcOffset = +6 * kPointerSize;
- static constexpr int kArgvOffset = +7 * kPointerSize;
+ static constexpr int kNewTargetArgOffset = +3 * kSystemPointerSize;
+ static constexpr int kFunctionArgOffset = +4 * kSystemPointerSize;
+ static constexpr int kReceiverArgOffset = +5 * kSystemPointerSize;
+ static constexpr int kArgcOffset = +6 * kSystemPointerSize;
+ static constexpr int kArgvOffset = +7 * kSystemPointerSize;
// Rest of parameters passed to JSRunMicrotasksEntry.
- static constexpr int kMicrotaskQueueArgOffset = +3 * kPointerSize;
+ static constexpr int kMicrotaskQueueArgOffset = +3 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -38,12 +38,12 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
- static constexpr int kCallerFPOffset = 0 * kPointerSize;
- static constexpr int kCallerPCOffset = +1 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
+ static constexpr int kCallerPCOffset = +1 * kSystemPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static constexpr int kCallerSPDisplacement = +2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = +2 * kSystemPointerSize;
static constexpr int kConstantPoolOffset = 0; // Not used
};
@@ -57,7 +57,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kSimd128Size;
};
@@ -66,13 +66,13 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static constexpr int kLocal0Offset =
StandardFrameConstants::kExpressionsOffset;
- static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kLastParameterOffset = +2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static constexpr int kParam0Offset = -2 * kPointerSize;
- static constexpr int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kSystemPointerSize;
+ static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 48a279b6e4..2e279cc258 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -100,6 +100,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // edx : function template info
+ // ecx : number of arguments (on the stack, not including receiver)
+ Register registers[] = {edx, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments (on the stack, not including receiver)
@@ -206,9 +214,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- edx, // kApiFunctionAddress
- ecx, // kArgc
+ edx, // kApiFunctionAddress
+ ecx, // kArgc
+ eax, // kCallData
+ edi, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 2bde18e0fd..5367b7a2d6 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -15,6 +15,7 @@
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/ia32/assembler-ia32-inl.h"
#include "src/macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -129,6 +130,19 @@ void MacroAssembler::PushRoot(RootIndex index) {
}
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch,
+ Label* on_in_range,
+ Label::Distance near_jump) {
+ if (lower_limit != 0) {
+ lea(scratch, Operand(value, 0u - lower_limit));
+ cmp(scratch, Immediate(higher_limit - lower_limit));
+ } else {
+ cmp(value, Immediate(higher_limit));
+ }
+ j(below_equal, on_in_range, near_jump);
+}
+
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
// TODO(jgruber): Add support for enable_root_array_delta_access.
@@ -192,8 +206,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
- FieldOperand(destination,
- FixedArray::kHeaderSize + constant_index * kPointerSize));
+ FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
@@ -234,7 +247,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -256,7 +269,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
push(reg);
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -292,7 +305,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
pop(reg);
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -325,13 +338,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so so offset must be a multiple of kTaggedSize.
+ DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(dst, Immediate(kPointerSize - 1));
+ test_b(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -791,17 +804,17 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(+2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(+1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
push(Immediate(StackFrame::TypeToMarker(frame_type)));
- DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
+ DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
- DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
+ DCHECK_EQ(-3 * kSystemPointerSize, ExitFrameConstants::kCodeOffset);
Move(scratch, CodeObject());
push(scratch); // Accessed from ExitFrame::code_slot.
@@ -826,7 +839,8 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+ int space =
+ XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
sub(esp, Immediate(space));
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -834,7 +848,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
- sub(esp, Immediate(argc * kPointerSize));
+ sub(esp, Immediate(argc * kSystemPointerSize));
}
// Get the required frame alignment for the OS.
@@ -853,9 +867,9 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
EnterExitFramePrologue(frame_type, edi);
// Set up argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
mov(edi, eax);
- lea(esi, Operand(ebp, eax, times_4, offset));
+ lea(esi, Operand(ebp, eax, times_system_pointer_size, offset));
// Reserve space for argc, argv and isolate.
EnterExitFrameEpilogue(argc, save_doubles);
@@ -879,11 +893,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
if (pop_arguments) {
// Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
+ mov(ecx, Operand(ebp, 1 * kSystemPointerSize));
+ mov(ebp, Operand(ebp, 0 * kSystemPointerSize));
// Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
+ lea(esp, Operand(esi, 1 * kSystemPointerSize));
// Push the return address to get ready to return.
push(ecx);
@@ -922,7 +936,7 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::PushStackHandler(Register scratch) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
push(Immediate(0)); // Padding.
@@ -941,7 +955,7 @@ void MacroAssembler::PopStackHandler(Register scratch) {
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
pop(ExternalReferenceAsOperand(handler_address, scratch));
- add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1032,15 +1046,17 @@ void TurboAssembler::PrepareForTailCall(
if (callee_args_count.is_reg()) {
sub(caller_args_count_reg, callee_args_count.reg());
lea(new_sp_reg,
- Operand(ebp, caller_args_count_reg, times_pointer_size,
+ Operand(ebp, caller_args_count_reg, times_system_pointer_size,
StandardFrameConstants::kCallerPCOffset -
- number_of_temp_values_after_return_address * kPointerSize));
+ number_of_temp_values_after_return_address *
+ kSystemPointerSize));
} else {
- lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset -
- (callee_args_count.immediate() +
- number_of_temp_values_after_return_address) *
- kPointerSize));
+ lea(new_sp_reg,
+ Operand(ebp, caller_args_count_reg, times_system_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ (callee_args_count.immediate() +
+ number_of_temp_values_after_return_address) *
+ kSystemPointerSize));
}
if (FLAG_debug_code) {
@@ -1053,7 +1069,8 @@ void TurboAssembler::PrepareForTailCall(
// place.
Register tmp_reg = scratch1;
mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+ mov(Operand(esp,
+ number_of_temp_values_after_return_address * kSystemPointerSize),
tmp_reg);
// Restore caller's frame pointer now as it could be overwritten by
@@ -1077,8 +1094,8 @@ void TurboAssembler::PrepareForTailCall(
jmp(&entry, Label::kNear);
bind(&loop);
dec(count_reg);
- mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
- mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+ mov(tmp_reg, Operand(esp, count_reg, times_system_pointer_size, 0));
+ mov(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
bind(&entry);
cmp(count_reg, Immediate(0));
j(not_equal, &loop, Label::kNear);
@@ -1183,9 +1200,10 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Push(fun);
Operand receiver_op =
actual.is_reg()
- ? Operand(ebp, actual.reg(), times_pointer_size, kPointerSize * 2)
- : Operand(ebp, actual.immediate() * times_pointer_size +
- kPointerSize * 2);
+ ? Operand(ebp, actual.reg(), times_system_pointer_size,
+ kSystemPointerSize * 2)
+ : Operand(ebp, actual.immediate() * times_system_pointer_size +
+ kSystemPointerSize * 2);
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -1307,7 +1325,7 @@ void TurboAssembler::Push(Immediate value) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- add(esp, Immediate(stack_elements * kPointerSize));
+ add(esp, Immediate(stack_elements * kSystemPointerSize));
}
}
@@ -1719,7 +1737,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
@@ -1774,12 +1792,12 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kPointerSize));
+ sub(esp, Immediate((num_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(esp, -frame_alignment);
- mov(Operand(esp, num_arguments * kPointerSize), scratch);
+ mov(Operand(esp, num_arguments * kSystemPointerSize), scratch);
} else {
- sub(esp, Immediate(num_arguments * kPointerSize));
+ sub(esp, Immediate(num_arguments * kSystemPointerSize));
}
}
@@ -1832,9 +1850,9 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
if (base::OS::ActivationFrameAlignment() != 0) {
- mov(esp, Operand(esp, num_arguments * kPointerSize));
+ mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
} else {
- add(esp, Immediate(num_arguments * kPointerSize));
+ add(esp, Immediate(num_arguments * kSystemPointerSize));
}
}
@@ -1865,10 +1883,12 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_pointer register contains the builtin index as a Smi.
- // Untagging is folded into the indexing operand below (we use times_2 instead
- // of times_4 since smis are already shifted by one).
- mov(builtin_pointer, Operand(kRootRegister, builtin_pointer, times_2,
- IsolateData::builtin_entry_table_offset()));
+ // Untagging is folded into the indexing operand below (we use
+ // times_half_system_pointer_size instead of times_system_pointer_size since
+ // smis are already shifted by one).
+ mov(builtin_pointer,
+ Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size,
+ IsolateData::builtin_entry_table_offset()));
call(builtin_pointer);
}
@@ -1904,8 +1924,9 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_builtin);
mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
- mov(destination, Operand(kRootRegister, destination, times_pointer_size,
- IsolateData::builtin_entry_table_offset()));
+ mov(destination,
+ Operand(kRootRegister, destination, times_system_pointer_size,
+ IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index d26152663a..0f74fa4526 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -460,6 +460,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch,
+ Label* on_in_range,
+ Label::Distance near_jump = Label::kFar);
+
// ---------------------------------------------------------------------------
// GC Support
// Notify the garbage collector that we wrote a pointer into an object.
@@ -684,11 +691,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue();
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch, Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@@ -713,18 +715,12 @@ inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
-}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
inline Operand ContextOperand(Register context, Register index) {
- return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+ return Operand(context, index, times_tagged_size, Context::SlotOffset(0));
}
inline Operand NativeContextOperand() {
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 0bbc9391b0..bc636e4164 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -99,84 +99,48 @@ TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
void AccessorAssembler::HandlePolymorphicCase(
Node* receiver_map, TNode<WeakFixedArray> feedback, Label* if_handler,
- TVariable<MaybeObject>* var_handler, Label* if_miss,
- int min_feedback_capacity) {
+ TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
- // Deferred so the unrolled case can omit frame construction in bytecode
- // handler.
- Label loop(this, Label::kDeferred);
-
// Iterate {feedback} array.
const int kEntrySize = 2;
- // Loading feedback's length is delayed until we need it when looking past
- // the first {min_feedback_capacity} (map, handler) pairs.
- Node* length = nullptr;
- CSA_ASSERT(this, SmiGreaterThanOrEqual(
- LoadWeakFixedArrayLength(feedback),
- SmiConstant(min_feedback_capacity * kEntrySize)));
-
- const int kUnrolledIterations = IC::kMaxPolymorphicMapCount;
- for (int i = 0; i < kUnrolledIterations; i++) {
- int map_index = i * kEntrySize;
- int handler_index = i * kEntrySize + 1;
-
- if (i >= min_feedback_capacity) {
- if (length == nullptr) length = LoadWeakFixedArrayLength(feedback);
- GotoIf(SmiGreaterThanOrEqual(SmiConstant(handler_index), CAST(length)),
- if_miss);
- }
+ // Load the {feedback} array length.
+ TNode<IntPtrT> length = LoadAndUntagWeakFixedArrayLength(feedback);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(1), length));
- Label next_entry(this);
+ // This is a hand-crafted loop that only compares against the {length}
+ // in the end, since we already know that we will have at least a single
+ // entry in the {feedback} array anyways.
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ Label loop(this, &var_index), loop_next(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
TNode<MaybeObject> maybe_cached_map =
- LoadWeakFixedArrayElement(feedback, map_index);
+ LoadWeakFixedArrayElement(feedback, var_index.value());
CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
- &next_entry);
+ &loop_next);
// Found, now call handler.
TNode<MaybeObject> handler =
- LoadWeakFixedArrayElement(feedback, handler_index);
+ LoadWeakFixedArrayElement(feedback, var_index.value(), kTaggedSize);
*var_handler = handler;
Goto(if_handler);
- BIND(&next_entry);
+ BIND(&loop_next);
+ var_index =
+ Signed(IntPtrAdd(var_index.value(), IntPtrConstant(kEntrySize)));
+ Branch(IntPtrLessThan(var_index.value(), length), &loop, if_miss);
}
- Goto(&loop);
-
- // Loop from {kUnrolledIterations}*kEntrySize to {length}.
- BIND(&loop);
- Node* start_index = IntPtrConstant(kUnrolledIterations * kEntrySize);
- Node* end_index = LoadAndUntagWeakFixedArrayLength(feedback);
- BuildFastLoop(
- start_index, end_index,
- [this, receiver_map, feedback, if_handler, var_handler](Node* index) {
- Label next_entry(this);
- TNode<MaybeObject> maybe_cached_map =
- LoadWeakFixedArrayElement(feedback, index);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
- GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)),
- &next_entry);
-
- // Found, now call handler.
- TNode<MaybeObject> handler =
- LoadWeakFixedArrayElement(feedback, index, kTaggedSize);
- *var_handler = handler;
- Goto(if_handler);
-
- BIND(&next_entry);
- },
- kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
- // The loop falls through if no handler was found.
- Goto(if_miss);
}
void AccessorAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
- ElementSupport support_elements) {
+ ElementSupport support_elements, LoadAccessMode access_mode) {
Comment("have_handler");
VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
@@ -195,14 +159,15 @@ void AccessorAssembler::HandleLoadICHandlerCase(
{
HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
handler, miss, exit_point, on_nonexistent,
- support_elements);
+ support_elements, access_mode);
}
BIND(&try_proto_handler);
{
GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point, ic_mode);
+ &if_smi_handler, miss, exit_point, ic_mode,
+ access_mode);
}
BIND(&call_handler);
@@ -276,8 +241,8 @@ void AccessorAssembler::HandleLoadAccessor(
BIND(&load);
Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(0);
- exit_point->Return(CallStub(callable, nullptr, context, callback, argc,
- data, api_holder.value(), p->receiver));
+ exit_point->Return(CallStub(callable, context, callback, argc, data,
+ api_holder.value(), p->receiver));
}
BIND(&runtime);
@@ -343,7 +308,8 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
void AccessorAssembler::HandleLoadICSmiHandlerCase(
const LoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
- OnNonExistent on_nonexistent, ElementSupport support_elements) {
+ OnNonExistent on_nonexistent, ElementSupport support_elements,
+ LoadAccessMode access_mode) {
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -354,8 +320,17 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Label if_element(this), if_indexed_string(this), if_property(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
&if_element);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kIndexedString)),
- &if_indexed_string, &if_property);
+
+ if (access_mode == LoadAccessMode::kHas) {
+ CSA_ASSERT(this,
+ WordNotEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kIndexedString)));
+ Goto(&if_property);
+ } else {
+ Branch(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kIndexedString)),
+ &if_indexed_string, &if_property);
+ }
BIND(&if_element);
Comment("element_load");
@@ -370,7 +345,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
EmitElementLoad(holder, elements, elements_kind, intptr_index,
is_jsarray_condition, &if_hole, &rebox_double,
&var_double_value, &unimplemented_elements_kind, &if_oob,
- miss, exit_point);
+ miss, exit_point, access_mode);
BIND(&unimplemented_elements_kind);
{
@@ -404,41 +379,63 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
miss);
BIND(&return_undefined);
- exit_point->Return(UndefinedConstant());
+ exit_point->Return(access_mode == LoadAccessMode::kHas
+ ? FalseConstant()
+ : UndefinedConstant());
}
BIND(&if_hole);
{
Comment("convert hole");
+
GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
GotoIf(IsNoElementsProtectorCellInvalid(), miss);
- exit_point->Return(UndefinedConstant());
+ exit_point->Return(access_mode == LoadAccessMode::kHas
+ ? FalseConstant()
+ : UndefinedConstant());
}
- BIND(&if_indexed_string);
- {
- Label if_oob(this, Label::kDeferred);
-
- Comment("indexed string");
- Node* intptr_index = TryToIntptr(p->name, miss);
- Node* length = LoadStringLengthAsWord(holder);
- GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
- TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
- TNode<String> result = StringFromSingleCharCode(code);
- Return(result);
-
- BIND(&if_oob);
- Node* allow_out_of_bounds =
- IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
- GotoIfNot(allow_out_of_bounds, miss);
- GotoIf(IsNoElementsProtectorCellInvalid(), miss);
- Return(UndefinedConstant());
+ if (access_mode != LoadAccessMode::kHas) {
+ BIND(&if_indexed_string);
+ {
+ Label if_oob(this, Label::kDeferred);
+
+ Comment("indexed string");
+ Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* length = LoadStringLengthAsWord(holder);
+ GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
+ TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
+ TNode<String> result = StringFromSingleCharCode(code);
+ Return(result);
+
+ BIND(&if_oob);
+ Node* allow_out_of_bounds =
+ IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
+ GotoIfNot(allow_out_of_bounds, miss);
+ GotoIf(IsNoElementsProtectorCellInvalid(), miss);
+ Return(UndefinedConstant());
+ }
}
BIND(&if_property);
Comment("property_load");
}
+ if (access_mode == LoadAccessMode::kHas) {
+ HandleLoadICSmiHandlerHasNamedCase(p, holder, handler_kind, miss,
+ exit_point);
+ } else {
+ HandleLoadICSmiHandlerLoadNamedCase(
+ p, holder, handler_kind, handler_word, &rebox_double, &var_double_value,
+ handler, miss, exit_point, on_nonexistent, support_elements);
+ }
+}
+
+void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
+ const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ TNode<WordT> handler_word, Label* rebox_double, Variable* var_double_value,
+ SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
+ OnNonExistent on_nonexistent, ElementSupport support_elements) {
Label constant(this), field(this), normal(this, Label::kDeferred),
interceptor(this, Label::kDeferred), nonexistent(this),
accessor(this, Label::kDeferred), global(this, Label::kDeferred),
@@ -478,7 +475,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
&module_export, &interceptor);
BIND(&field);
- HandleLoadField(holder, handler_word, &var_double_value, &rebox_double,
+ HandleLoadField(holder, handler_word, var_double_value, rebox_double,
exit_point);
BIND(&nonexistent);
@@ -625,8 +622,88 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
}
}
- BIND(&rebox_double);
- exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ BIND(rebox_double);
+ exit_point->Return(AllocateHeapNumberWithValue(var_double_value->value()));
+}
+
+void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
+ const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ Label* miss, ExitPoint* exit_point) {
+ Label return_true(this), return_false(this), return_lookup(this),
+ normal(this), global(this);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)),
+ &return_true);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
+ &return_true);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
+ &return_false);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNormal)),
+ &normal);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
+ &return_true);
+
+ GotoIf(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
+ &return_true);
+
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &return_true);
+
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ &return_true);
+
+ Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global,
+ &return_lookup);
+
+ BIND(&return_true);
+ exit_point->Return(TrueConstant());
+
+ BIND(&return_false);
+ exit_point->Return(FalseConstant());
+
+ BIND(&return_lookup);
+ {
+ CSA_ASSERT(
+ this,
+ Word32Or(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kInterceptor)),
+ Word32Or(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)),
+ WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kModuleExport)))));
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kHasProperty), p->context,
+ p->receiver, p->name);
+ }
+
+ BIND(&normal);
+ {
+ Comment("has_normal");
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
+ TVARIABLE(IntPtrT, var_name_index);
+ Label found(this);
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
+ &var_name_index, miss);
+
+ BIND(&found);
+ exit_point->Return(TrueConstant());
+ }
+
+ BIND(&global);
+ {
+ CSA_ASSERT(this, IsPropertyCell(holder));
+ // Ensure the property cell doesn't contain the hole.
+ Node* value = LoadObjectField(holder, PropertyCell::kValueOffset);
+ GotoIf(IsTheHole(value), miss);
+
+ exit_point->Return(TrueConstant());
+ }
}
// Performs actions common to both load and store handlers:
@@ -743,7 +820,7 @@ Node* AccessorAssembler::HandleProtoHandler(
void AccessorAssembler::HandleLoadICProtoHandler(
const LoadICParameters* p, Node* handler, Variable* var_holder,
Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point, ICMode ic_mode) {
+ ExitPoint* exit_point, ICMode ic_mode, LoadAccessMode access_mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
@@ -753,14 +830,18 @@ void AccessorAssembler::HandleLoadICProtoHandler(
nullptr,
// on_found_on_receiver
[=](Node* properties, Node* name_index) {
- VARIABLE(var_details, MachineRepresentation::kWord32);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- LoadPropertyFromNameDictionary(properties, name_index, &var_details,
- &var_value);
- Node* value =
- CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
- exit_point->Return(value);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ LoadPropertyFromNameDictionary(properties, name_index, &var_details,
+ &var_value);
+ Node* value =
+ CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, p->receiver, miss);
+ exit_point->Return(value);
+ }
},
miss, ic_mode);
@@ -831,13 +912,8 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_CHECK(this, IsAccessorInfo(accessor_info));
- // TODO(8580): Get the language mode lazily when required to avoid the
- // computation of GetLanguageMode here. Also make the computation of
- // language mode not dependent on vector.
- Node* language_mode = GetLanguageMode(p->vector, p->slot);
-
TailCallRuntime(Runtime::kStoreCallbackProperty, p->context, p->receiver,
- holder, accessor_info, p->name, p->value, language_mode);
+ holder, accessor_info, p->name, p->value);
}
void AccessorAssembler::HandleStoreICHandlerCase(
@@ -1087,6 +1163,11 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
BIND(&all_fine);
}
+TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(Node* details) {
+ return Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
+ Int32Constant(static_cast<int32_t>(VariableMode::kConst)));
+}
+
void AccessorAssembler::OverwriteExistingFastDataProperty(
Node* object, Node* object_map, Node* descriptors,
Node* descriptor_name_index, Node* details, Node* value, Label* slow,
@@ -1103,15 +1184,6 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&if_field);
{
- if (FLAG_track_constant_fields && !do_transitioning_store) {
- // TODO(ishell): Taking the slow path is not necessary if new and old
- // values are identical.
- GotoIf(Word32Equal(
- DecodeWord32<PropertyDetails::ConstnessField>(details),
- Int32Constant(static_cast<int32_t>(VariableMode::kConst))),
- slow);
- }
-
Node* representation =
DecodeWord32<PropertyDetails::RepresentationField>(details);
@@ -1141,6 +1213,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
if (FLAG_unbox_double_fields) {
if (do_transitioning_store) {
StoreMap(object, object_map);
+ } else if (FLAG_track_constant_fields) {
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Node* current_value =
+ LoadObjectField(object, field_offset, MachineType::Float64());
+ Branch(Float64Equal(current_value, double_value), &done, slow);
+ BIND(&if_mutable);
}
StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
MachineRepresentation::kFloat64);
@@ -1152,6 +1231,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
StoreObjectField(object, field_offset, mutable_heap_number);
} else {
Node* mutable_heap_number = LoadObjectField(object, field_offset);
+ if (FLAG_track_constant_fields) {
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Node* current_value = LoadHeapNumberValue(mutable_heap_number);
+ Branch(Float64Equal(current_value, double_value), &done, slow);
+ BIND(&if_mutable);
+ }
StoreHeapNumberValue(mutable_heap_number, double_value);
}
}
@@ -1162,6 +1248,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
if (do_transitioning_store) {
StoreMap(object, object_map);
+ } else if (FLAG_track_constant_fields) {
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Node* current_value =
+ LoadObjectField(object, field_offset, MachineType::AnyTagged());
+ Branch(WordEqual(current_value, value), &done, slow);
+ BIND(&if_mutable);
}
StoreObjectField(object, field_offset, value);
Goto(&done);
@@ -1211,11 +1304,26 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
Node* mutable_heap_number =
LoadPropertyArrayElement(properties, backing_store_index);
Node* double_value = ChangeNumberToFloat64(value);
+ if (FLAG_track_constant_fields) {
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Node* current_value = LoadHeapNumberValue(mutable_heap_number);
+ Branch(Float64Equal(current_value, double_value), &done, slow);
+ BIND(&if_mutable);
+ }
StoreHeapNumberValue(mutable_heap_number, double_value);
Goto(&done);
}
BIND(&tagged_rep);
{
+ if (FLAG_track_constant_fields) {
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Node* current_value =
+ LoadPropertyArrayElement(properties, backing_store_index);
+ Branch(WordEqual(current_value, value), &done, slow);
+ BIND(&if_mutable);
+ }
StorePropertyArrayElement(properties, backing_store_index, value);
Goto(&done);
}
@@ -1422,7 +1530,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&store);
Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(1);
- Return(CallStub(callable, nullptr, context, callback, argc, data,
+ Return(CallStub(callable, context, callback, argc, data,
api_holder.value(), p->receiver, p->value));
}
@@ -1434,17 +1542,6 @@ void AccessorAssembler::HandleStoreICProtoHandler(
}
}
-Node* AccessorAssembler::GetLanguageMode(Node* vector, Node* slot) {
- VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
- SmiConstant(LanguageMode::kStrict));
- Label language_mode_determined(this);
- BranchIfStrictMode(vector, slot, &language_mode_determined);
- var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
- Goto(&language_mode_determined);
- BIND(&language_mode_determined);
- return var_language_mode.value();
-}
-
void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Node* proxy, Label* miss,
ElementSupport support_elements) {
@@ -1454,18 +1551,13 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Label if_index(this), if_unique_name(this),
to_name_failed(this, Label::kDeferred);
- // TODO(8580): Get the language mode lazily when required to avoid the
- // computation of GetLanguageMode here. Also make the computation of
- // language mode not dependent on vector.
- Node* language_mode = GetLanguageMode(p->vector, p->slot);
-
if (support_elements == kSupportElements) {
TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
CallBuiltin(Builtins::kProxySetProperty, p->context, proxy,
- var_unique.value(), p->value, p->receiver, language_mode);
+ var_unique.value(), p->value, p->receiver);
Return(p->value);
// The index case is handled earlier by the runtime.
@@ -1476,11 +1568,11 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
BIND(&to_name_failed);
TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
- p->name, p->value, p->receiver, language_mode);
+ p->name, p->value, p->receiver);
} else {
Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
- p->value, p->receiver, language_mode);
+ p->value, p->receiver);
}
}
@@ -1789,7 +1881,7 @@ void AccessorAssembler::EmitElementLoad(
SloppyTNode<IntPtrT> intptr_index, Node* is_jsarray_condition,
Label* if_hole, Label* rebox_double, Variable* var_double_value,
Label* unimplemented_elements_kind, Label* out_of_bounds, Label* miss,
- ExitPoint* exit_point) {
+ ExitPoint* exit_point, LoadAccessMode access_mode) {
Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
if_dictionary(this);
@@ -1821,23 +1913,31 @@ void AccessorAssembler::EmitElementLoad(
BIND(&if_fast_packed);
{
Comment("fast packed elements");
- exit_point->Return(LoadFixedArrayElement(CAST(elements), intptr_index));
+ exit_point->Return(
+ access_mode == LoadAccessMode::kHas
+ ? TrueConstant()
+ : UnsafeLoadFixedArrayElement(CAST(elements), intptr_index));
}
BIND(&if_fast_holey);
{
Comment("fast holey elements");
- Node* element = LoadFixedArrayElement(CAST(elements), intptr_index);
+ Node* element = UnsafeLoadFixedArrayElement(CAST(elements), intptr_index);
GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
- exit_point->Return(element);
+ exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
+ : element);
}
BIND(&if_fast_double);
{
Comment("packed double elements");
- var_double_value->Bind(LoadFixedDoubleArrayElement(elements, intptr_index,
- MachineType::Float64()));
- Goto(rebox_double);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ var_double_value->Bind(LoadFixedDoubleArrayElement(
+ elements, intptr_index, MachineType::Float64()));
+ Goto(rebox_double);
+ }
}
BIND(&if_fast_holey_double);
@@ -1846,8 +1946,12 @@ void AccessorAssembler::EmitElementLoad(
Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
MachineType::Float64(), 0,
INTPTR_PARAMETERS, if_hole);
- var_double_value->Bind(value);
- Goto(rebox_double);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ var_double_value->Bind(value);
+ Goto(rebox_double);
+ }
}
BIND(&if_nonfast);
@@ -1869,7 +1973,8 @@ void AccessorAssembler::EmitElementLoad(
TNode<Object> value = BasicLoadNumberDictionaryElement(
CAST(elements), intptr_index, miss, if_hole);
- exit_point->Return(value);
+ exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
+ : value);
}
BIND(&if_typed_array);
@@ -1882,97 +1987,101 @@ void AccessorAssembler::EmitElementLoad(
// Bounds check.
Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object)));
GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
-
- Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
-
- Label uint8_elements(this), int8_elements(this), uint16_elements(this),
- int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this), bigint64_elements(this),
- biguint64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements,
- &bigint64_elements, &biguint64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
- BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
- const size_t kTypedElementsKindCount =
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- BIND(&uint8_elements);
- {
- Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&int8_elements);
- {
- Comment("INT8_ELEMENTS");
- Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&uint16_elements);
- {
- Comment("UINT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Uint16(), backing_store, index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&int16_elements);
- {
- Comment("INT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Int16(), backing_store, index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&uint32_elements);
- {
- Comment("UINT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Uint32(), backing_store, index);
- exit_point->Return(ChangeUint32ToTagged(element));
- }
- BIND(&int32_elements);
- {
- Comment("INT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Int32(), backing_store, index);
- exit_point->Return(ChangeInt32ToTagged(element));
- }
- BIND(&float32_elements);
- {
- Comment("FLOAT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Float32(), backing_store, index);
- var_double_value->Bind(ChangeFloat32ToFloat64(element));
- Goto(rebox_double);
- }
- BIND(&float64_elements);
- {
- Comment("FLOAT64_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(3));
- Node* element = Load(MachineType::Float64(), backing_store, index);
- var_double_value->Bind(element);
- Goto(rebox_double);
- }
- BIND(&bigint64_elements);
- {
- Comment("BIGINT64_ELEMENTS");
- exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
- }
- BIND(&biguint64_elements);
- {
- Comment("BIGUINT64_ELEMENTS");
- exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGUINT64_ELEMENTS, INTPTR_PARAMETERS));
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
+
+ Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+ int16_elements(this), uint32_elements(this), int32_elements(this),
+ float32_elements(this), float64_elements(this),
+ bigint64_elements(this), biguint64_elements(this);
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
+ int32_t elements_kinds[] = {
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
+ BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ BIND(&uint8_elements);
+ {
+ Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
+ Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&int8_elements);
+ {
+ Comment("INT8_ELEMENTS");
+ Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&uint16_elements);
+ {
+ Comment("UINT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Uint16(), backing_store, index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&int16_elements);
+ {
+ Comment("INT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Int16(), backing_store, index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&uint32_elements);
+ {
+ Comment("UINT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Uint32(), backing_store, index);
+ exit_point->Return(ChangeUint32ToTagged(element));
+ }
+ BIND(&int32_elements);
+ {
+ Comment("INT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Int32(), backing_store, index);
+ exit_point->Return(ChangeInt32ToTagged(element));
+ }
+ BIND(&float32_elements);
+ {
+ Comment("FLOAT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Float32(), backing_store, index);
+ var_double_value->Bind(ChangeFloat32ToFloat64(element));
+ Goto(rebox_double);
+ }
+ BIND(&float64_elements);
+ {
+ Comment("FLOAT64_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(3));
+ Node* element = Load(MachineType::Float64(), backing_store, index);
+ var_double_value->Bind(element);
+ Goto(rebox_double);
+ }
+ BIND(&bigint64_elements);
+ {
+ Comment("BIGINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
+ }
+ BIND(&biguint64_elements);
+ {
+ Comment("BIGUINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGUINT64_ELEMENTS,
+ INTPTR_PARAMETERS));
+ }
}
}
}
@@ -1990,43 +2099,6 @@ void AccessorAssembler::NameDictionaryNegativeLookup(Node* object,
BIND(&done);
}
-void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
- Label* if_strict) {
- Node* sfi =
- LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset);
- TNode<FeedbackMetadata> metadata = CAST(LoadObjectField(
- sfi, SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset));
- Node* slot_int = SmiToInt32(slot);
-
- // See VectorICComputer::index().
- const int kItemsPerWord = FeedbackMetadata::VectorICComputer::kItemsPerWord;
- Node* word_index = Int32Div(slot_int, Int32Constant(kItemsPerWord));
- Node* word_offset = Int32Mod(slot_int, Int32Constant(kItemsPerWord));
-
- int32_t first_item = FeedbackMetadata::kHeaderSize - kHeapObjectTag;
- Node* offset =
- ElementOffsetFromIndex(ChangeInt32ToIntPtr(word_index), UINT32_ELEMENTS,
- INTPTR_PARAMETERS, first_item);
-
- Node* data = Load(MachineType::Int32(), metadata, offset);
-
- // See VectorICComputer::decode().
- const int kBitsPerItem = FeedbackMetadata::kFeedbackSlotKindBits;
- Node* shift = Int32Mul(word_offset, Int32Constant(kBitsPerItem));
- const int kMask = FeedbackMetadata::VectorICComputer::kMask;
- Node* kind = Word32And(Word32Shr(data, shift), Int32Constant(kMask));
-
- STATIC_ASSERT(FeedbackSlotKind::kStoreGlobalSloppy <=
- FeedbackSlotKind::kLastSloppyKind);
- STATIC_ASSERT(FeedbackSlotKind::kStoreKeyedSloppy <=
- FeedbackSlotKind::kLastSloppyKind);
- STATIC_ASSERT(FeedbackSlotKind::kStoreNamedSloppy <=
- FeedbackSlotKind::kLastSloppyKind);
- GotoIfNot(Int32LessThanOrEqual(kind, Int32Constant(static_cast<int>(
- FeedbackSlotKind::kLastSloppyKind))),
- if_strict);
-}
-
void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
Node* bitfield2) {
Label is_prototype(this), cont(this);
@@ -2141,10 +2213,10 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// for a handler in the stub cache.
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
- Label if_descriptor_found(this), stub_cache(this);
+ Label if_descriptor_found(this), try_stub_cache(this);
TVARIABLE(IntPtrT, var_name_index);
- Label* notfound =
- use_stub_cache == kUseStubCache ? &stub_cache : &lookup_prototype_chain;
+ Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
+ : &lookup_prototype_chain;
DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
&var_name_index, notfound);
@@ -2157,6 +2229,13 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
}
if (use_stub_cache == kUseStubCache) {
+ Label stub_cache(this);
+ BIND(&try_stub_cache);
+ // When there is no feedback vector don't use stub cache.
+ GotoIfNot(IsUndefined(p->vector), &stub_cache);
+ // Fall back to the slow path for private symbols.
+ Branch(IsPrivateSymbol(p->name), slow, &lookup_prototype_chain);
+
BIND(&stub_cache);
Comment("stub cache probe for fast property load");
TVARIABLE(MaybeObject, var_handler);
@@ -2288,7 +2367,10 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
- Node* map32 = TruncateIntPtrToInt32(BitcastTaggedToWord(map));
+ Node* map_word = BitcastTaggedToWord(map);
+
+ Node* map32 = TruncateIntPtrToInt32(UncheckedCast<IntPtrT>(
+ WordXor(map_word, WordShr(map_word, StubCache::kMapKeyShift))));
// Base the offset on a simple combination of name and map.
Node* hash = Int32Add(hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
@@ -2391,17 +2473,18 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
// changes in control flow and logic. We currently have no way of ensuring
// that no frame is constructed, so it's easy to break this optimization by
// accident.
- Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred);
+ Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred),
+ no_feedback(this, Label::kDeferred);
- GotoIf(IsUndefined(p->vector), &miss);
+ Node* recv_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsDeprecatedMap(recv_map), &miss);
+
+ GotoIf(IsUndefined(p->vector), &no_feedback);
// Inlined fast path.
{
Comment("LoadIC_BytecodeHandler_fast");
- Node* recv_map = LoadReceiverMap(p->receiver);
- GotoIf(IsDeprecatedMap(recv_map), &miss);
-
TVARIABLE(MaybeObject, var_handler);
Label try_polymorphic(this), if_handler(this, &var_handler);
@@ -2418,7 +2501,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
GetHeapObjectIfStrong(feedback, &miss);
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &stub_call);
HandlePolymorphicCase(recv_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
}
@@ -2434,6 +2517,15 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
p->receiver, p->name, p->slot, p->vector);
}
+ BIND(&no_feedback);
+ {
+ Comment("LoadIC_BytecodeHandler_nofeedback");
+ // Call into the stub that implements the non-inlined parts of LoadIC.
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
+ p->context, p->receiver, p->name, p->slot, p->vector);
+ }
+
BIND(&miss);
{
Comment("LoadIC_BytecodeHandler_miss");
@@ -2469,7 +2561,7 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
Comment("LoadIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &non_inlined);
HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&non_inlined);
@@ -2518,20 +2610,61 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
}
}
+// TODO(8860): This check is only required so we can make prototypes fast on
+// the first load. This is not really useful when there is no feedback vector
+// and may not be important when lazily allocating feedback vectors. Once lazy
+// allocation of feedback vectors has landed try to eliminate this check.
+void AccessorAssembler::BranchIfPrototypeShouldbeFast(Node* receiver_map,
+ Label* prototype_not_fast,
+ Label* prototype_fast) {
+ VARIABLE(var_map, MachineRepresentation::kTagged);
+ var_map.Bind(receiver_map);
+ Label loop_body(this, &var_map);
+ Goto(&loop_body);
+
+ BIND(&loop_body);
+ {
+ Node* map = var_map.value();
+ Node* prototype = LoadMapPrototype(map);
+ GotoIf(IsNull(prototype), prototype_fast);
+ TNode<PrototypeInfo> proto_info =
+ LoadMapPrototypeInfo(receiver_map, prototype_not_fast);
+ GotoIf(IsNull(prototype), prototype_not_fast);
+ TNode<Uint32T> flags =
+ LoadObjectField<Uint32T>(proto_info, PrototypeInfo::kBitFieldOffset);
+ GotoIf(Word32Equal(flags, Uint32Constant(0)), prototype_not_fast);
+
+ Node* prototype_map = LoadMap(prototype);
+ var_map.Bind(prototype_map);
+ Goto(&loop_body);
+ }
+}
+
void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
- Label miss(this, Label::kDeferred);
+ Label miss(this, Label::kDeferred),
+ check_if_fast_prototype(this, Label::kDeferred),
+ check_function_prototype(this);
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
+ GotoIf(IsUndefined(p->vector), &check_if_fast_prototype);
// Optimistically write the state transition to the vector.
StoreFeedbackVectorSlot(p->vector, p->slot,
LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
kTaggedSize, SMI_PARAMETERS);
+ Goto(&check_function_prototype);
+ BIND(&check_if_fast_prototype);
+ {
+ BranchIfPrototypeShouldbeFast(receiver_map, &miss,
+ &check_function_prototype);
+ }
+
+ BIND(&check_function_prototype);
{
// Special case for Function.prototype load, because it's very common
// for ICs that are only executed once (MyFunc.prototype.foo = ...).
@@ -2551,28 +2684,34 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
BIND(&miss);
{
+ Label call_runtime(this, Label::kDeferred);
+ GotoIf(IsUndefined(p->vector), &call_runtime);
// Undo the optimistic state transition.
StoreFeedbackVectorSlot(p->vector, p->slot,
LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
+ Goto(&call_runtime);
+ BIND(&call_runtime);
TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
p->slot, p->vector);
}
}
-void AccessorAssembler::LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+void AccessorAssembler::LoadGlobalIC(Node* vector, Node* slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
ExitPoint* exit_point,
ParameterMode slot_mode) {
Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
- LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
+ GotoIf(IsUndefined(vector), &miss);
+
+ LoadGlobalIC_TryPropertyCellCase(CAST(vector), slot, lazy_context, exit_point,
&try_handler, &miss, slot_mode);
BIND(&try_handler);
- LoadGlobalIC_TryHandlerCase(vector, slot, lazy_context, lazy_name,
+ LoadGlobalIC_TryHandlerCase(CAST(vector), slot, lazy_context, lazy_name,
typeof_mode, exit_point, &miss, slot_mode);
BIND(&miss);
@@ -2655,27 +2794,30 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
on_nonexistent);
}
-void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
+void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
+ LoadAccessMode access_mode) {
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred),
try_polymorphic_name(this, Label::kDeferred),
- miss(this, Label::kDeferred);
+ miss(this, Label::kDeferred), generic(this, Label::kDeferred);
Node* receiver_map = LoadReceiverMap(p->receiver);
GotoIf(IsDeprecatedMap(receiver_map), &miss);
+ GotoIf(IsUndefined(p->vector), &generic);
+
// Check monomorphic case.
TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit,
- ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kSupportElements);
+ HandleLoadICHandlerCase(
+ p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kSupportElements, access_mode);
}
BIND(&try_polymorphic);
@@ -2685,20 +2827,26 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
Comment("KeyedLoadIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&try_megamorphic);
{
// Check megamorphic case.
Comment("KeyedLoadIC_try_megamorphic");
- GotoIfNot(
- WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
- &try_polymorphic_name);
+ Branch(WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
+ &generic, &try_polymorphic_name);
+ }
+
+ BIND(&generic);
+ {
// TODO(jkummerow): Inline this? Or some of it?
- TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ TailCallBuiltin(access_mode == LoadAccessMode::kLoad
+ ? Builtins::kKeyedLoadIC_Megamorphic
+ : Builtins::kKeyedHasIC_Megamorphic,
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
+
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a weak fixed array in the next
@@ -2742,16 +2890,20 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
// If the name comparison succeeded, we know we have a weak fixed array
// with at least one map/handler pair.
Node* name = var_name.value();
- TailCallBuiltin(Builtins::kKeyedLoadIC_PolymorphicName, p->context,
- p->receiver, name, p->slot, p->vector);
+ TailCallBuiltin(access_mode == LoadAccessMode::kLoad
+ ? Builtins::kKeyedLoadIC_PolymorphicName
+ : Builtins::kKeyedHasIC_PolymorphicName,
+ p->context, p->receiver, name, p->slot, p->vector);
}
}
BIND(&miss);
{
Comment("KeyedLoadIC_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ TailCallRuntime(access_mode == LoadAccessMode::kLoad
+ ? Runtime::kKeyedLoadIC_Miss
+ : Runtime::kKeyedHasIC_Miss,
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
}
@@ -2834,7 +2986,8 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
-void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
+void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
+ LoadAccessMode access_mode) {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
@@ -2857,22 +3010,23 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(vector, slot, kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
- 1);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss);
BIND(&if_handler);
{
ExitPoint direct_exit(this);
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit,
- ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kOnlyProperties);
+ HandleLoadICHandlerCase(
+ p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kOnlyProperties, access_mode);
}
BIND(&miss);
{
Comment("KeyedLoadIC_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
- vector);
+ TailCallRuntime(access_mode == LoadAccessMode::kLoad
+ ? Runtime::kKeyedLoadIC_Miss
+ : Runtime::kKeyedHasIC_Miss,
+ context, receiver, name, slot, vector);
}
}
@@ -2884,11 +3038,14 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
if_handler_from_stub_cache(this, &var_handler, Label::kDeferred),
try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred),
- try_uninitialized(this, Label::kDeferred), miss(this, Label::kDeferred);
+ try_uninitialized(this, Label::kDeferred), miss(this, Label::kDeferred),
+ no_feedback(this, Label::kDeferred);
Node* receiver_map = LoadReceiverMap(p->receiver);
GotoIf(IsDeprecatedMap(receiver_map), &miss);
+ GotoIf(IsUndefined(p->vector), &no_feedback);
+
// Check monomorphic case.
TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
@@ -2907,7 +3064,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
Comment("StoreIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&try_megamorphic);
@@ -2923,12 +3080,17 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&try_uninitialized);
{
// Check uninitialized case.
- GotoIfNot(
+ Branch(
WordEqual(strong_feedback, LoadRoot(RootIndex::kuninitialized_symbol)),
- &miss);
+ &no_feedback, &miss);
+ }
+
+ BIND(&no_feedback);
+ {
TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
p->name, p->value, p->slot, p->vector);
}
+
BIND(&miss);
{
TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
@@ -2937,14 +3099,18 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
- Label if_lexical_var(this), if_property_cell(this);
+ Label if_lexical_var(this), if_heapobject(this);
TNode<MaybeObject> maybe_weak_ref =
LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
- Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_property_cell);
+ Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
- BIND(&if_property_cell);
+ BIND(&if_heapobject);
{
Label try_handler(this), miss(this, Label::kDeferred);
+ GotoIf(
+ WordEqual(maybe_weak_ref, LoadRoot(RootIndex::kpremonomorphic_symbol)),
+ &miss);
+
CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
@@ -3065,11 +3231,14 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
Label if_handler(this, &var_handler),
try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred),
+ no_feedback(this, Label::kDeferred),
try_polymorphic_name(this, Label::kDeferred);
Node* receiver_map = LoadReceiverMap(p->receiver);
GotoIf(IsDeprecatedMap(receiver_map), &miss);
+ GotoIf(IsUndefined(p->vector), &no_feedback);
+
// Check monomorphic case.
TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
@@ -3089,18 +3258,22 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
HandlePolymorphicCase(receiver_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&try_megamorphic);
{
// Check megamorphic case.
Comment("KeyedStoreIC_try_megamorphic");
- GotoIfNot(
+ Branch(
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
- &try_polymorphic_name);
+ &no_feedback, &try_polymorphic_name);
+ }
+
+ BIND(&no_feedback);
+ {
TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
- p->receiver, p->name, p->value, p->slot, p->vector);
+ p->receiver, p->name, p->value, p->slot);
}
BIND(&try_polymorphic_name);
@@ -3114,7 +3287,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
p->vector, p->slot, kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
- &miss, 1);
+ &miss);
}
}
BIND(&miss);
@@ -3136,6 +3309,9 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
Node* array_map = LoadReceiverMap(p->receiver);
GotoIf(IsDeprecatedMap(array_map), &miss);
+
+ GotoIf(IsUndefined(p->vector), &miss);
+
TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot, p->vector, array_map, &if_handler,
&var_handler, &try_polymorphic);
@@ -3172,7 +3348,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)),
&try_megamorphic);
HandlePolymorphicCase(array_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&try_megamorphic);
@@ -3316,11 +3492,12 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
Node* context = Parameter(Descriptor::kContext);
ExitPoint direct_exit(this);
- LoadGlobalIC(CAST(vector), slot,
- // lazy_context
- [=] { return CAST(context); },
- // lazy_name
- [=] { return CAST(name); }, typeof_mode, &direct_exit);
+ LoadGlobalIC(
+ vector, slot,
+ // lazy_context
+ [=] { return CAST(context); },
+ // lazy_name
+ [=] { return CAST(name); }, typeof_mode, &direct_exit);
}
void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
@@ -3346,7 +3523,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
Node* context = Parameter(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
- KeyedLoadIC(&p);
+ KeyedLoadIC(&p, LoadAccessMode::kLoad);
}
void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
@@ -3398,7 +3575,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
Node* context = Parameter(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
- KeyedLoadICPolymorphicName(&p);
+ KeyedLoadICPolymorphicName(&p, LoadAccessMode::kLoad);
}
void AccessorAssembler::GenerateStoreGlobalIC() {
@@ -3541,6 +3718,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
{
Label cont(this);
GotoIf(IsJSObjectInstanceType(type), &cont);
+ GotoIf(InstanceTypeEqual(type, JS_PROXY_TYPE), &call_runtime);
GotoIfNot(IsStringInstanceType(type), &done);
Branch(SmiEqual(LoadStringLengthAsSmi(CAST(source)), SmiConstant(0)), &done,
&call_runtime);
@@ -3549,12 +3727,12 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(source))), &call_runtime);
- ForEachEnumerableOwnProperty(context, map, CAST(source),
- [=](TNode<Name> key, TNode<Object> value) {
- SetPropertyInLiteral(context, result, key,
- value);
- },
- &call_runtime);
+ ForEachEnumerableOwnProperty(
+ context, map, CAST(source), kPropertyAdditionOrder,
+ [=](TNode<Name> key, TNode<Object> value) {
+ SetPropertyInLiteral(context, result, key, value);
+ },
+ &call_runtime);
Goto(&done);
BIND(&call_runtime);
@@ -3575,10 +3753,13 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler);
Label miss(this, Label::kDeferred), try_polymorphic(this, Label::kDeferred),
- try_megamorphic(this, Label::kDeferred);
+ try_megamorphic(this, Label::kDeferred), slow(this, Label::kDeferred);
TNode<Map> source_map = LoadMap(UncheckedCast<HeapObject>(source));
GotoIf(IsDeprecatedMap(source_map), &miss);
+
+ GotoIf(IsUndefined(vector), &slow);
+
TNode<MaybeObject> feedback = TryMonomorphicCase(
slot, vector, source_map, &if_handler, &var_handler, &try_polymorphic);
@@ -3681,7 +3862,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
Comment("CloneObjectIC_try_polymorphic");
GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
HandlePolymorphicCase(source_map, CAST(strong_feedback), &if_handler,
- &var_handler, &miss, 2);
+ &var_handler, &miss);
}
BIND(&try_megamorphic);
@@ -3695,6 +3876,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
GotoIfNot(
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&miss);
+ Goto(&slow);
+ }
+
+ BIND(&slow);
+ {
TailCallBuiltin(Builtins::kCloneObjectIC_Slow, context, source, flags, slot,
vector);
}
@@ -3711,5 +3897,42 @@ void AccessorAssembler::GenerateCloneObjectIC() {
}
}
+void AccessorAssembler::GenerateKeyedHasIC() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadIC(&p, LoadAccessMode::kHas);
+}
+
+void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
+ // TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
+ Return(HasProperty(context, receiver, name,
+ HasPropertyLookupMode::kHasProperty));
+}
+
+void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 1022d0f160..beed6f655f 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -44,6 +44,9 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateStoreGlobalICTrampoline();
void GenerateCloneObjectIC();
void GenerateCloneObjectIC_Slow();
+ void GenerateKeyedHasIC();
+ void GenerateKeyedHasIC_Megamorphic();
+ void GenerateKeyedHasIC_PolymorphicName();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
@@ -82,7 +85,7 @@ class AccessorAssembler : public CodeStubAssembler {
Node* holder;
};
- void LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+ void LoadGlobalIC(Node* vector, Node* slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
ExitPoint* exit_point,
@@ -105,6 +108,7 @@ class AccessorAssembler : public CodeStubAssembler {
SloppyTNode<Object> value;
};
+ enum class LoadAccessMode { kLoad, kHas };
enum class ICMode { kNonGlobalIC, kGlobalIC };
enum ElementSupport { kOnlyProperties, kSupportElements };
void HandleStoreICHandlerCase(
@@ -123,8 +127,6 @@ class AccessorAssembler : public CodeStubAssembler {
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
- void BranchIfStrictMode(Node* vector, Node* slot, Label* if_strict);
-
void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
@@ -155,9 +157,10 @@ class AccessorAssembler : public CodeStubAssembler {
void LoadIC_Uninitialized(const LoadICParameters* p);
- void KeyedLoadIC(const LoadICParameters* p);
+ void KeyedLoadIC(const LoadICParameters* p, LoadAccessMode access_mode);
void KeyedLoadICGeneric(const LoadICParameters* p);
- void KeyedLoadICPolymorphicName(const LoadICParameters* p);
+ void KeyedLoadICPolymorphicName(const LoadICParameters* p,
+ LoadAccessMode access_mode);
void StoreIC(const StoreICParameters* p);
void StoreGlobalIC(const StoreICParameters* p);
void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
@@ -175,26 +178,29 @@ class AccessorAssembler : public CodeStubAssembler {
void HandlePolymorphicCase(Node* receiver_map, TNode<WeakFixedArray> feedback,
Label* if_handler,
TVariable<MaybeObject>* var_handler,
- Label* if_miss, int min_feedback_capacity);
+ Label* if_miss);
// LoadIC implementation.
void HandleLoadICHandlerCase(
const LoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
OnNonExistent on_nonexistent = OnNonExistent::kReturnUndefined,
- ElementSupport support_elements = kOnlyProperties);
+ ElementSupport support_elements = kOnlyProperties,
+ LoadAccessMode access_mode = LoadAccessMode::kLoad);
void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point,
OnNonExistent on_nonexistent,
- ElementSupport support_elements);
+ ElementSupport support_elements,
+ LoadAccessMode access_mode);
void HandleLoadICProtoHandler(const LoadICParameters* p, Node* handler,
Variable* var_holder, Variable* var_smi_handler,
Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point, ICMode ic_mode);
+ ExitPoint* exit_point, ICMode ic_mode,
+ LoadAccessMode access_mode);
void HandleLoadCallbackProperty(const LoadICParameters* p,
TNode<JSObject> holder,
@@ -213,6 +219,18 @@ class AccessorAssembler : public CodeStubAssembler {
void EmitAccessCheck(Node* expected_native_context, Node* context,
Node* receiver, Label* can_access, Label* miss);
+ void HandleLoadICSmiHandlerLoadNamedCase(
+ const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ TNode<WordT> handler_word, Label* rebox_double,
+ Variable* var_double_value, SloppyTNode<Object> handler, Label* miss,
+ ExitPoint* exit_point, OnNonExistent on_nonexistent,
+ ElementSupport support_elements);
+
+ void HandleLoadICSmiHandlerHasNamedCase(const LoadICParameters* p,
+ Node* holder,
+ TNode<IntPtrT> handler_kind,
+ Label* miss, ExitPoint* exit_point);
+
// LoadGlobalIC implementation.
void LoadGlobalIC_TryPropertyCellCase(
@@ -274,12 +292,14 @@ class AccessorAssembler : public CodeStubAssembler {
const OnFoundOnReceiver& on_found_on_receiver,
Label* miss, ICMode ic_mode);
- Node* GetLanguageMode(Node* vector, Node* slot);
-
Node* PrepareValueForStore(Node* handler_word, Node* holder,
Representation representation, Node* value,
Label* bailout);
+ void BranchIfPrototypeShouldbeFast(Node* receiver_map,
+ Label* prototype_not_fast,
+ Label* prototype_fast);
+
// Extends properties backing store by JSObject::kFieldsAdded elements,
// returns updated properties backing store.
Node* ExtendPropertiesBackingStore(Node* object, Node* index);
@@ -296,9 +316,11 @@ class AccessorAssembler : public CodeStubAssembler {
Label* if_hole, Label* rebox_double,
Variable* var_double_value,
Label* unimplemented_elements_kind, Label* out_of_bounds,
- Label* miss, ExitPoint* exit_point);
+ Label* miss, ExitPoint* exit_point,
+ LoadAccessMode access_mode = LoadAccessMode::kLoad);
void NameDictionaryNegativeLookup(Node* object, SloppyTNode<Name> name,
Label* miss);
+ TNode<BoolT> IsPropertyDetailsConst(Node* details);
// Stub cache access helpers.
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 1d9658e118..19ca5a9c6d 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -180,7 +180,7 @@ class LoadHandler final : public DataHandler {
// Decodes the KeyedAccessLoadMode from a {handler}.
static KeyedAccessLoadMode GetKeyedAccessLoadMode(MaybeObject handler);
- OBJECT_CONSTRUCTORS(LoadHandler, DataHandler)
+ OBJECT_CONSTRUCTORS(LoadHandler, DataHandler);
};
// A set of bit fields representing Smi handlers for stores and a HeapObject
@@ -301,7 +301,7 @@ class StoreHandler final : public DataHandler {
int descriptor, FieldIndex field_index,
Representation representation);
- OBJECT_CONSTRUCTORS(StoreHandler, DataHandler)
+ OBJECT_CONSTRUCTORS(StoreHandler, DataHandler);
};
} // namespace internal
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index c607679a2a..b050513adb 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -88,7 +88,7 @@ const char* GetModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_ic_stats) {
if (AddressIsDeoptimizedCode()) return;
- State new_state = nexus()->StateFromFeedback();
+ State new_state = nexus()->ic_state();
TraceIC(type, name, state(), new_state);
}
}
@@ -200,7 +200,7 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
DCHECK_IMPLIES(!vector.is_null(), kind_ == nexus_.kind());
- state_ = (vector.is_null()) ? NO_FEEDBACK : nexus_.StateFromFeedback();
+ state_ = (vector.is_null()) ? NO_FEEDBACK : nexus_.ic_state();
old_state_ = state_;
}
@@ -216,7 +216,7 @@ JSFunction IC::GetHostFunction() const {
return frame->function();
}
-static void LookupForRead(Isolate* isolate, LookupIterator* it) {
+static void LookupForRead(LookupIterator* it, bool is_has_property) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
@@ -227,7 +227,13 @@ static void LookupForRead(Isolate* isolate, LookupIterator* it) {
case LookupIterator::INTERCEPTOR: {
// If there is a getter, return; otherwise loop to perform the lookup.
Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (!holder->GetNamedInterceptor()->getter()->IsUndefined(isolate)) {
+ if (!holder->GetNamedInterceptor()->getter()->IsUndefined(
+ it->isolate())) {
+ return;
+ }
+ if (is_has_property &&
+ !holder->GetNamedInterceptor()->query()->IsUndefined(
+ it->isolate())) {
return;
}
break;
@@ -277,14 +283,13 @@ bool IC::RecomputeHandlerForName(Handle<Object> name) {
if (is_keyed()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name stub_name = nexus()->FindFirstName();
+ Name stub_name = nexus()->GetName();
if (*name != stub_name) return false;
}
return true;
}
-
void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
if (state() == NO_FEEDBACK) return;
update_receiver_map(receiver);
@@ -306,7 +311,6 @@ MaybeHandle<Object> IC::TypeError(MessageTemplate index, Handle<Object> object,
THROW_NEW_ERROR(isolate(), NewTypeError(index, key, object), Object);
}
-
MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
HandleScope scope(isolate());
THROW_NEW_ERROR(
@@ -429,7 +433,8 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
- if (object->IsNullOrUndefined(isolate())) {
+ if (IsAnyHas() ? !object->IsJSReceiver()
+ : object->IsNullOrUndefined(isolate())) {
if (use_ic && state() != PREMONOMORPHIC) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
@@ -441,7 +446,9 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
if (*name == ReadOnlyRoots(isolate()).iterator_symbol()) {
return Runtime::ThrowIteratorError(isolate(), object);
}
- return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
+ return TypeError(IsAnyHas() ? MessageTemplate::kInvalidInOperatorUse
+ : MessageTemplate::kNonObjectPropertyLoad,
+ object, name);
}
if (MigrateDeprecated(object)) use_ic = false;
@@ -450,9 +457,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
update_receiver_map(object);
}
- // Named lookup in the object.
+
LookupIterator it(isolate(), object, name);
- LookupForRead(isolate(), &it);
+
+ // Named lookup in the object.
+ LookupForRead(&it, IsAnyHas());
if (name->IsPrivate()) {
if (name->IsPrivateName() && !it.IsFound()) {
@@ -473,6 +482,14 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// Update inline cache and stub cache.
if (use_ic) UpdateCaches(&it);
+ if (IsAnyHas()) {
+ // Named lookup in the object.
+ Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+ if (maybe.IsNothing()) return MaybeHandle<Object>();
+ return maybe.FromJust() ? ReadOnlyRoots(isolate()).true_value_handle()
+ : ReadOnlyRoots(isolate()).false_value_handle();
+ }
+
// Get the property.
Handle<Object> result;
@@ -498,7 +515,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
global->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate(), script_contexts, str_name,
+ if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate(), script_contexts, lookup_result.context_index);
@@ -514,8 +531,9 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
if (use_ic) {
- if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(
+ lookup_result.context_index, lookup_result.slot_index,
+ lookup_result.mode == VariableMode::kConst)) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -546,7 +564,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
const MaybeObjectHandle& handler) {
DCHECK(IsHandler(*handler));
if (is_keyed() && state() != RECOMPUTE_HANDLER) {
- if (nexus()->FindFirstName() != *name) return false;
+ if (nexus()->GetName() != *name) return false;
}
Handle<Map> map = receiver_map();
MapHandles maps;
@@ -586,7 +604,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
int number_of_valid_maps =
number_of_maps - deprecated_maps - (handler_to_overwrite != -1);
- if (number_of_valid_maps >= kMaxPolymorphicMapCount) return false;
+ if (number_of_valid_maps >= FLAG_max_polymorphic_map_count) return false;
if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
return false;
}
@@ -595,7 +613,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
if (number_of_valid_maps == 1) {
ConfigureVectorState(name, receiver_map(), handler);
} else {
- if (is_keyed() && nexus()->FindFirstName() != *name) return false;
+ if (is_keyed() && nexus()->GetName() != *name) return false;
if (handler_to_overwrite >= 0) {
handlers[handler_to_overwrite] = handler;
if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
@@ -618,7 +636,6 @@ void IC::UpdateMonomorphicIC(const MaybeObjectHandle& handler,
ConfigureVectorState(name, receiver_map(), handler);
}
-
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
MapHandles maps;
MaybeObjectHandles handlers;
@@ -653,7 +670,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
DCHECK(IsHandler(*handler));
// Currently only load and store ICs support non-code handlers.
- DCHECK(IsAnyLoad() || IsAnyStore());
+ DCHECK(IsAnyLoad() || IsAnyStore() || IsAnyHas());
switch (state()) {
case NO_FEEDBACK:
break;
@@ -727,6 +744,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
StubCache* IC::stub_cache() {
+ DCHECK(!IsAnyHas());
if (IsAnyLoad()) {
return isolate()->load_stub_cache();
} else {
@@ -737,13 +755,15 @@ StubCache* IC::stub_cache() {
void IC::UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
const MaybeObjectHandle& handler) {
- stub_cache()->Set(*name, *map, *handler);
+ if (!IsAnyHas()) {
+ stub_cache()->Set(*name, *map, *handler);
+ }
}
void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
DCHECK_EQ(LookupIterator::ACCESSOR, lookup->state());
if (V8_LIKELY(!FLAG_runtime_stats)) return;
- if (IsAnyLoad()) {
+ if (IsAnyLoad() || IsAnyHas()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
} else {
DCHECK(IsAnyStore());
@@ -754,22 +774,29 @@ void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
ReadOnlyRoots roots(isolate());
- if (receiver->IsString() && *lookup->name() == roots.length_string()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
- return BUILTIN_CODE(isolate(), LoadIC_StringLength);
- }
- if (receiver->IsStringWrapper() && *lookup->name() == roots.length_string()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_StringWrapperLength);
- return BUILTIN_CODE(isolate(), LoadIC_StringWrapperLength);
- }
+ // `in` cannot be called on strings, and will always return true for string
+ // wrapper length and function prototypes. The latter two cases are given
+ // LoadHandler::LoadNativeDataProperty below.
+ if (!IsAnyHas()) {
+ if (receiver->IsString() && *lookup->name() == roots.length_string()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringLength);
+ return BUILTIN_CODE(isolate(), LoadIC_StringLength);
+ }
+
+ if (receiver->IsStringWrapper() &&
+ *lookup->name() == roots.length_string()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringWrapperLength);
+ return BUILTIN_CODE(isolate(), LoadIC_StringWrapperLength);
+ }
- // Use specialized code for getting prototype of functions.
- if (receiver->IsJSFunction() && *lookup->name() == roots.prototype_string() &&
- !JSFunction::cast(*receiver)->PrototypeRequiresRuntimeLookup()) {
- Handle<Code> stub;
- TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
- return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
+ // Use specialized code for getting prototype of functions.
+ if (receiver->IsJSFunction() &&
+ *lookup->name() == roots.prototype_string() &&
+ !JSFunction::cast(*receiver)->PrototypeRequiresRuntimeLookup()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
+ return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
+ }
}
Handle<Map> map = receiver_map();
@@ -1081,24 +1108,66 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
}
}
+namespace {
+
+bool AllowConvertHoleElementToUndefined(Isolate* isolate,
+ Handle<Map> receiver_map) {
+ if (receiver_map->IsJSTypedArrayMap()) {
+ // For JSTypedArray we never lookup elements in the prototype chain.
+ return true;
+ }
+
+ // For other {receiver}s we need to check the "no elements" protector.
+ if (isolate->IsNoElementsProtectorIntact()) {
+ if (receiver_map->IsStringMap()) {
+ return true;
+ }
+ if (receiver_map->IsJSObjectMap()) {
+ // For other JSObjects (including JSArrays) we can only continue if
+ // the {receiver}s prototype is either the initial Object.prototype
+ // or the initial Array.prototype, which are both guarded by the
+ // "no elements" protector checked above.
+ Handle<Object> receiver_prototype(receiver_map->prototype(), isolate);
+
+ if (isolate->IsInAnyContext(*receiver_prototype,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate->IsInAnyContext(*receiver_prototype,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+} // namespace
+
Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
KeyedAccessLoadMode load_mode) {
+ // Has a getter interceptor, or is any has and has a query interceptor.
if (receiver_map->has_indexed_interceptor() &&
- !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
- isolate()) &&
+ (!receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
+ isolate()) ||
+ (IsAnyHas() &&
+ !receiver_map->GetIndexedInterceptor()->query()->IsUndefined(
+ isolate()))) &&
!receiver_map->GetIndexedInterceptor()->non_masking()) {
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedInterceptorStub);
- return BUILTIN_CODE(isolate(), LoadIndexedInterceptorIC);
+ return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIndexedInterceptorIC)
+ : BUILTIN_CODE(isolate(), LoadIndexedInterceptorIC);
}
+
InstanceType instance_type = receiver_map->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedStringDH);
+ if (IsAnyHas()) return BUILTIN_CODE(isolate(), HasIC_Slow);
return LoadHandler::LoadIndexedString(isolate(), load_mode);
}
if (instance_type < FIRST_JS_RECEIVER_TYPE) {
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_SlowStub);
- return BUILTIN_CODE(isolate(), KeyedLoadIC_Slow);
+ return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIC_Slow)
+ : BUILTIN_CODE(isolate(), KeyedLoadIC_Slow);
}
if (instance_type == JS_PROXY_TYPE) {
return LoadHandler::LoadProxy(isolate());
@@ -1108,7 +1177,8 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
if (IsSloppyArgumentsElementsKind(elements_kind)) {
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
- return BUILTIN_CODE(isolate(), KeyedLoadIC_SloppyArguments);
+ return IsAnyHas() ? BUILTIN_CODE(isolate(), KeyedHasIC_SloppyArguments)
+ : BUILTIN_CODE(isolate(), KeyedLoadIC_SloppyArguments);
}
bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
@@ -1118,11 +1188,10 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
- // TODO(jkummerow): Use IsHoleyOrDictionaryElementsKind(elements_kind).
bool convert_hole_to_undefined =
- is_js_array && elements_kind == HOLEY_ELEMENTS &&
- *receiver_map ==
- isolate()->raw_native_context()->GetInitialJSArrayMap(elements_kind);
+ (elements_kind == HOLEY_SMI_ELEMENTS ||
+ elements_kind == HOLEY_ELEMENTS) &&
+ AllowConvertHoleElementToUndefined(isolate(), receiver_map);
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadElementDH);
return LoadHandler::LoadElement(isolate(), elements_kind,
convert_hole_to_undefined, is_js_array,
@@ -1198,46 +1267,39 @@ bool IsOutOfBoundsAccess(Handle<Object> receiver, uint32_t index) {
KeyedAccessLoadMode GetLoadMode(Isolate* isolate, Handle<Object> receiver,
uint32_t index) {
if (IsOutOfBoundsAccess(receiver, index)) {
- if (receiver->IsJSTypedArray()) {
- // For JSTypedArray we never lookup elements in the prototype chain.
+ DCHECK(receiver->IsHeapObject());
+ Handle<Map> receiver_map(Handle<HeapObject>::cast(receiver)->map(),
+ isolate);
+ if (AllowConvertHoleElementToUndefined(isolate, receiver_map)) {
return LOAD_IGNORE_OUT_OF_BOUNDS;
}
-
- // For other {receiver}s we need to check the "no elements" protector.
- if (isolate->IsNoElementsProtectorIntact()) {
- if (receiver->IsString()) {
- // ToObject(receiver) will have the initial String.prototype.
- return LOAD_IGNORE_OUT_OF_BOUNDS;
- }
- if (receiver->IsJSObject()) {
- // For other JSObjects (including JSArrays) we can only continue if
- // the {receiver}s prototype is either the initial Object.prototype
- // or the initial Array.prototype, which are both guarded by the
- // "no elements" protector checked above.
- Handle<Object> receiver_prototype(
- JSObject::cast(*receiver)->map()->prototype(), isolate);
- if (isolate->IsInAnyContext(*receiver_prototype,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
- isolate->IsInAnyContext(*receiver_prototype,
- Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
- return LOAD_IGNORE_OUT_OF_BOUNDS;
- }
- }
- }
}
return STANDARD_LOAD;
}
} // namespace
-MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
- Handle<Object> key) {
- if (MigrateDeprecated(object)) {
- Handle<Object> result;
+MaybeHandle<Object> KeyedLoadIC::RuntimeLoad(Handle<Object> object,
+ Handle<Object> key) {
+ Handle<Object> result;
+
+ if (IsKeyedLoadIC()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
Object);
- return result;
+ } else {
+ DCHECK(IsKeyedHasIC());
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Runtime::HasProperty(isolate(), object, key),
+ Object);
+ }
+ return result;
+}
+
+MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
+ Handle<Object> key) {
+ if (MigrateDeprecated(object)) {
+ return RuntimeLoad(object, key);
}
Handle<Object> load_handle;
@@ -1268,11 +1330,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (!load_handle.is_null()) return load_handle;
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Runtime::GetObjectProperty(isolate(), object, key),
- Object);
- return result;
+ return RuntimeLoad(object, key);
}
bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
@@ -1357,7 +1415,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
global->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate(), script_contexts, str_name,
+ if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate(), script_contexts, lookup_result.context_index);
@@ -1376,8 +1434,9 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
if (use_ic) {
- if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(
+ lookup_result.context_index, lookup_result.slot_index,
+ lookup_result.mode == VariableMode::kConst)) {
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -1402,8 +1461,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(isolate(), object, name, value, language_mode()),
+ isolate(), result, Object::SetProperty(isolate(), object, name, value),
Object);
return result;
}
@@ -1443,8 +1501,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
}
if (use_ic) UpdateCaches(&it, value, store_origin);
- MAYBE_RETURN_NULL(
- Object::SetProperty(&it, value, language_mode(), store_origin));
+ MAYBE_RETURN_NULL(Object::SetProperty(&it, value, store_origin));
return value;
}
@@ -1473,6 +1530,24 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
}
handler = ComputeHandler(lookup);
} else {
+ if (state() == UNINITIALIZED && IsStoreGlobalIC() &&
+ lookup->state() == LookupIterator::INTERCEPTOR) {
+ InterceptorInfo info =
+ lookup->GetHolder<JSObject>()->GetNamedInterceptor();
+ if (!lookup->HolderIsReceiverOrHiddenPrototype() &&
+ !info->getter()->IsUndefined(isolate())) {
+ // Utilize premonomorphic state for global store ics that run into
+ // an interceptor because the property doesn't exist yet.
+ // After we actually set the property, we'll have more information.
+ // Premonomorphism gives us a chance to find more information the
+ // second time.
+ TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_Premonomorphic);
+ ConfigureVectorState(receiver_map());
+ TraceIC("StoreGlobalIC", lookup->name());
+ return;
+ }
+ }
+
set_slow_stub_reason("LookupForWrite said 'false'");
// TODO(marja): change slow_stub to return MaybeObjectHandle.
handler = MaybeObjectHandle(slow_stub());
@@ -1814,7 +1889,6 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
}
-
Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
Handle<Map> map, KeyedAccessStoreMode store_mode) {
switch (store_mode) {
@@ -1952,7 +2026,6 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
}
}
-
static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
uint32_t index, Handle<Object> value) {
bool oob_access = IsOutOfBoundsAccess(receiver, index);
@@ -1997,7 +2070,6 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
}
}
-
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2008,7 +2080,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
Runtime::SetObjectProperty(isolate(), object, key, value,
- language_mode(), StoreOrigin::kMaybeKeyed),
+ StoreOrigin::kMaybeKeyed),
Object);
return result;
}
@@ -2083,7 +2155,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<JSArray>::cast(object)->elements()->IsCowArray();
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
- Runtime::SetObjectProperty(isolate(), object, key, value, language_mode(),
+ Runtime::SetObjectProperty(isolate(), object, key, value,
StoreOrigin::kMaybeKeyed),
Object);
@@ -2129,8 +2201,8 @@ void StoreOwnElement(Isolate* isolate, Handle<JSArray> array,
isolate, array, index, &success, LookupIterator::OWN);
DCHECK(success);
- CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE,
- kThrowOnError)
+ CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, NONE, Just(ShouldThrow::kThrowOnError))
.FromJust());
}
} // namespace
@@ -2176,20 +2248,6 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
// Static IC stub generators.
//
//
-namespace {
-
-// TODO(8580): Compute the language mode lazily to avoid the expensive
-// computation of language mode here.
-LanguageMode GetLanguageMode(Handle<FeedbackVector> vector, Context context) {
- LanguageMode language_mode = vector->shared_function_info()->language_mode();
- if (context->scope_info()->language_mode() > language_mode) {
- return context->scope_info()->language_mode();
- }
- return language_mode;
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -2274,7 +2332,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
native_context->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate, script_contexts, name,
+ if (ScriptContextTable::Lookup(isolate, *script_contexts, *name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
@@ -2334,51 +2392,27 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- FeedbackSlotKind kind = vector->GetKind(vector_slot);
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
- if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
- StoreIC ic(isolate, vector, vector_slot, kind, language_mode);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
- } else if (IsStoreGlobalICKind(kind)) {
- DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
- receiver = isolate->global_object();
- StoreGlobalIC ic(isolate, vector, vector_slot, kind, language_mode);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
- } else {
- DCHECK(IsKeyedStoreICKind(kind));
- KeyedStoreIC ic(isolate, vector, vector_slot, kind, language_mode);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+
+ // When there is no feedback vector it is OK to use the StoreNamedStrict as
+ // the feedback slot kind. We only need if it is StoreOwnICKind when
+ // installing the handler for storing const properties. This will happen only
+ // when feedback vector is available.
+ FeedbackSlotKind kind = FeedbackSlotKind::kStoreNamedStrict;
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ kind = vector->GetKind(vector_slot);
}
-}
-RUNTIME_FUNCTION(Runtime_StoreICNoFeedback_Miss) {
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- Handle<Object> value = args.at(0);
- Handle<Object> receiver = args.at(1);
- Handle<Name> key = args.at<Name>(2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
- CONVERT_INT32_ARG_CHECKED(is_own_property_value, 4);
- NamedPropertyType property_type =
- static_cast<NamedPropertyType>(is_own_property_value);
-
- FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
- ? FeedbackSlotKind::kStoreNamedStrict
- : FeedbackSlotKind::kStoreNamedSloppy;
- if (property_type == NamedPropertyType::kOwn) {
- language_mode = LanguageMode::kStrict;
- kind = FeedbackSlotKind::kStoreOwnNamed;
- }
- StoreIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
- language_mode);
+ DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind));
+ StoreIC ic(isolate, vector, vector_slot, kind);
+ ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2393,8 +2427,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
- StoreGlobalIC ic(isolate, vector, vector_slot, kind, language_mode);
+ StoreGlobalIC ic(isolate, vector, vector_slot, kind);
Handle<JSGlobalObject> global = isolate->global_object();
ic.UpdateState(global, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
@@ -2402,31 +2435,30 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
RUNTIME_FUNCTION(Runtime_StoreGlobalICNoFeedback_Miss) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Name> key = args.at<Name>(1);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 2);
- FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
- ? FeedbackSlotKind::kStoreGlobalStrict
- : FeedbackSlotKind::kStoreGlobalSloppy;
- StoreGlobalIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
- language_mode);
+ // TODO(mythria): Replace StoreGlobalStrict/Sloppy with StoreNamed.
+ StoreGlobalIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(),
+ FeedbackSlotKind::kStoreGlobalStrict);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
}
+// TODO(mythria): Remove Feedback vector and slot. Since they are not used apart
+// from the DCHECK.
RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
#ifdef DEBUG
{
Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
DCHECK(IsStoreGlobalICKind(slot_kind));
@@ -2441,7 +2473,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
native_context->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate, script_contexts, name,
+ if (ScriptContextTable::Lookup(isolate, *script_contexts, *name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
@@ -2462,11 +2494,9 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
return *value;
}
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, global, name, value, language_mode,
- StoreOrigin::kMaybeKeyed));
+ isolate, Runtime::SetObjectProperty(isolate, global, name, value,
+ StoreOrigin::kMaybeKeyed));
}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
@@ -2475,18 +2505,29 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
-
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- FeedbackSlotKind kind = vector->GetKind(vector_slot);
+
+ // When the feedback vector is not valid the slot can only be of type
+ // StoreKeyed. Storing in array literals falls back to
+ // StoreInArrayLiterIC_Miss. This function is also used from store handlers
+ // installed in feedback vectors. In such cases, we need to get the kind from
+ // feedback vector slot since the handlers are used for both for StoreKeyed
+ // and StoreInArrayLiteral kinds.
+ FeedbackSlotKind kind = FeedbackSlotKind::kStoreKeyedStrict;
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ kind = vector->GetKind(vector_slot);
+ }
// The elements store stubs miss into this function, but they are shared by
// different ICs.
if (IsKeyedStoreICKind(kind)) {
- KeyedStoreIC ic(isolate, vector, vector_slot, kind, language_mode);
+ KeyedStoreIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else {
@@ -2500,23 +2541,6 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
}
}
-RUNTIME_FUNCTION(Runtime_KeyedStoreICNoFeedback_Miss) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- // Runtime functions don't follow the IC's calling convention.
- Handle<Object> value = args.at(0);
- Handle<Object> receiver = args.at(1);
- Handle<Object> key = args.at(2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
-
- FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
- ? FeedbackSlotKind::kStoreKeyedStrict
- : FeedbackSlotKind::kStoreKeyedSloppy;
- KeyedStoreIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
- language_mode);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-}
-
RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2541,17 +2565,14 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
+ DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- Handle<Object> object = args.at(3);
- Handle<Object> key = args.at(4);
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
+ Handle<Object> object = args.at(1);
+ Handle<Object> key = args.at(2);
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
- StoreOrigin::kMaybeKeyed));
+ isolate, Runtime::SetObjectProperty(isolate, object, key, value,
+ StoreOrigin::kMaybeKeyed));
}
RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Slow) {
@@ -2588,11 +2609,9 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
return *value;
} else {
DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind));
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
- StoreOrigin::kMaybeKeyed));
+ isolate, Runtime::SetObjectProperty(isolate, object, key, value,
+ StoreOrigin::kMaybeKeyed));
}
}
@@ -2734,22 +2753,18 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<AccessorInfo> info = args.at<AccessorInfo>(2);
Handle<Name> name = args.at<Name>(3);
Handle<Object> value = args.at(4);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
if (V8_UNLIKELY(FLAG_runtime_stats)) {
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, receiver, name, value,
- language_mode, StoreOrigin::kMaybeKeyed));
+ isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
+ StoreOrigin::kMaybeKeyed));
}
DCHECK(info->IsCompatibleReceiver(*receiver));
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
PropertyCallbackArguments arguments(isolate, info->data(), *receiver, *holder,
- should_throw);
+ Nothing<ShouldThrow>());
arguments.CallAccessorSetter(info, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
@@ -2765,7 +2780,7 @@ RUNTIME_FUNCTION(Runtime_LoadCallbackProperty) {
DCHECK(info->IsCompatibleReceiver(*receiver));
PropertyCallbackArguments custom_args(isolate, info->data(), *receiver,
- *holder, kThrowOnError);
+ *holder, Just(kThrowOnError));
Handle<Object> result = custom_args.CallAccessorGetter(info, name);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (result.is_null()) return ReadOnlyRoots(isolate).undefined_value();
@@ -2813,7 +2828,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *holder, kDontThrow);
+ *holder, Just(kDontThrow));
Handle<Object> result = arguments.CallNamedGetter(interceptor, name);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2848,7 +2863,6 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
}
-
RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2859,7 +2873,6 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<JSObject> receiver = args.at<JSObject>(3);
Handle<Name> name = args.at<Name>(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
// TODO(ishell): Cache interceptor_holder in the store handler like we do
// for LoadHandler::kInterceptor case.
@@ -2876,7 +2889,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
DCHECK(!interceptor->non_masking());
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *receiver, kDontThrow);
+ *receiver, Just(kDontThrow));
Handle<Object> result = arguments.CallNamedSetter(interceptor, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2892,13 +2905,11 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
it.Next();
- MAYBE_RETURN(
- Object::SetProperty(&it, value, language_mode, StoreOrigin::kNamed),
- ReadOnlyRoots(isolate).exception());
+ MAYBE_RETURN(Object::SetProperty(&it, value, StoreOrigin::kNamed),
+ ReadOnlyRoots(isolate).exception());
return *value;
}
-
RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
// TODO(verwaest): This should probably get the holder and receiver as input.
HandleScope scope(isolate);
@@ -2909,7 +2920,7 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
- *receiver, kDontThrow);
+ *receiver, Just(kDontThrow));
Handle<Object> result = arguments.CallIndexedGetter(interceptor, index);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2924,5 +2935,61 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
return *result;
}
+
+RUNTIME_FUNCTION(Runtime_KeyedHasIC_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> receiver = args.at(0);
+ Handle<Object> key = args.at(1);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
+
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ KeyedLoadIC ic(isolate, vector, vector_slot, FeedbackSlotKind::kHasKeyed);
+ ic.UpdateState(receiver, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
+}
+
+RUNTIME_FUNCTION(Runtime_HasElementWithInterceptor) {
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ DCHECK_GE(args.smi_at(1), 0);
+ uint32_t index = args.smi_at(1);
+
+ Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
+ isolate);
+ PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+ *receiver, Just(kDontThrow));
+
+ if (!interceptor->query()->IsUndefined(isolate)) {
+ Handle<Object> result = arguments.CallIndexedQuery(interceptor, index);
+ if (!result.is_null()) {
+ int32_t value;
+ CHECK(result->ToInt32(&value));
+ return value == ABSENT ? ReadOnlyRoots(isolate).false_value()
+ : ReadOnlyRoots(isolate).true_value();
+ }
+ } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ Handle<Object> result = arguments.CallIndexedGetter(interceptor, index);
+ if (!result.is_null()) {
+ return ReadOnlyRoots(isolate).true_value();
+ }
+ }
+
+ LookupIterator it(isolate, receiver, index, receiver);
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
+ it.Next();
+ Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+ if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
+ return maybe.FromJust() ? ReadOnlyRoots(isolate).true_value()
+ : ReadOnlyRoots(isolate).false_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 9ed469410f..aa6ccd9c76 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -31,10 +31,6 @@ class IC {
static constexpr int kMaxKeyedPolymorphism = 4;
- // A polymorphic IC can handle at most 4 distinct maps before transitioning
- // to megamorphic state.
- static constexpr int kMaxPolymorphicMapCount = 4;
-
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
@@ -54,6 +50,7 @@ class IC {
state_ = RECOMPUTE_HANDLER;
}
+ bool IsAnyHas() const { return IsKeyedHasIC(); }
bool IsAnyLoad() const {
return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
}
@@ -134,9 +131,10 @@ class IC {
bool IsStoreIC() const { return IsStoreICKind(kind_); }
bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
+ bool IsKeyedHasIC() const { return IsKeyedHasICKind(kind_); }
bool is_keyed() const {
return IsKeyedLoadIC() || IsKeyedStoreIC() ||
- IsStoreInArrayLiteralICKind(kind_);
+ IsStoreInArrayLiteralICKind(kind_) || IsKeyedHasIC();
}
bool ShouldRecomputeHandler(Handle<String> name);
@@ -204,13 +202,12 @@ class IC {
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
-
class LoadIC : public IC {
public:
LoadIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
FeedbackSlotKind kind)
: IC(isolate, vector, slot, kind) {
- DCHECK(IsAnyLoad());
+ DCHECK(IsAnyLoad() || IsAnyHas());
}
static bool ShouldThrowReferenceError(FeedbackSlotKind kind) {
@@ -226,7 +223,8 @@ class LoadIC : public IC {
protected:
virtual Handle<Code> slow_stub() const {
- return BUILTIN_CODE(isolate(), LoadIC_Slow);
+ return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIC_Slow)
+ : BUILTIN_CODE(isolate(), LoadIC_Slow);
}
// Update the inline cache and the global stub cache based on the
@@ -264,6 +262,9 @@ class KeyedLoadIC : public LoadIC {
Handle<Object> key);
protected:
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> RuntimeLoad(Handle<Object> object,
+ Handle<Object> key);
+
// receiver is HeapObject because it could be a String or a JSObject
void UpdateLoadElement(Handle<HeapObject> receiver,
KeyedAccessLoadMode load_mode);
@@ -284,17 +285,14 @@ class KeyedLoadIC : public LoadIC {
bool CanChangeToAllowOutOfBounds(Handle<Map> receiver_map);
};
-
class StoreIC : public IC {
public:
StoreIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
- FeedbackSlotKind kind, LanguageMode language_mode)
- : IC(isolate, vector, slot, kind), language_mode_(language_mode) {
+ FeedbackSlotKind kind)
+ : IC(isolate, vector, slot, kind) {
DCHECK(IsAnyStore());
}
- LanguageMode language_mode() const { return language_mode_; }
-
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
StoreOrigin store_origin = StoreOrigin::kNamed);
@@ -314,11 +312,6 @@ class StoreIC : public IC {
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
StoreOrigin store_origin);
- // TODO(v8:8580): Instead of storing the language mode, compute it lazily
- // from the closure and context when needed. We only need it when throwing
- // exceptions, so it is OK to be slow.
- LanguageMode language_mode_;
-
private:
MaybeObjectHandle ComputeHandler(LookupIterator* lookup);
@@ -328,9 +321,8 @@ class StoreIC : public IC {
class StoreGlobalIC : public StoreIC {
public:
StoreGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot, FeedbackSlotKind kind,
- LanguageMode language_mode)
- : StoreIC(isolate, vector, slot, kind, language_mode) {}
+ FeedbackSlot slot, FeedbackSlotKind kind)
+ : StoreIC(isolate, vector, slot, kind) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
@@ -343,10 +335,8 @@ class StoreGlobalIC : public StoreIC {
enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
-
enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
-
class KeyedStoreIC : public StoreIC {
public:
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
@@ -354,9 +344,8 @@ class KeyedStoreIC : public StoreIC {
}
KeyedStoreIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot, FeedbackSlotKind kind,
- LanguageMode language_mode)
- : StoreIC(isolate, vector, slot, kind, language_mode) {}
+ FeedbackSlot slot, FeedbackSlotKind kind)
+ : StoreIC(isolate, vector, slot, kind) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
@@ -390,8 +379,7 @@ class StoreInArrayLiteralIC : public KeyedStoreIC {
StoreInArrayLiteralIC(Isolate* isolate, Handle<FeedbackVector> vector,
FeedbackSlot slot)
: KeyedStoreIC(isolate, vector, slot,
- FeedbackSlotKind::kStoreInArrayLiteral,
- LanguageMode::kStrict) {
+ FeedbackSlotKind::kStoreInArrayLiteral) {
DCHECK(IsStoreInArrayLiteralICKind(kind()));
}
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 2a8bd37130..650007ab1d 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -60,8 +60,7 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
void KeyedStoreGeneric(TNode<Context> context, TNode<Object> receiver,
TNode<Object> key, TNode<Object> value,
- Maybe<LanguageMode> language_mode, TNode<Smi> slot,
- TNode<FeedbackVector> vector);
+ Maybe<LanguageMode> language_mode);
void EmitGenericElementStore(Node* receiver, Node* receiver_map,
Node* instance_type, Node* intptr_index,
@@ -912,29 +911,21 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_callable);
{
- bool handle_strict = true;
- Label strict(this);
LanguageMode language_mode;
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
- Goto(&strict);
- } else {
- handle_strict = false;
- exit_point->Return(p->value);
- }
- } else {
- BranchIfStrictMode(p->vector, p->slot, &strict);
- exit_point->Return(p->value);
- }
-
- if (handle_strict) {
- BIND(&strict);
- {
exit_point->ReturnCallRuntime(
Runtime::kThrowTypeError, p->context,
SmiConstant(MessageTemplate::kNoSetterInCallback), p->name,
var_accessor_holder.value());
+ } else {
+ exit_point->Return(p->value);
}
+ } else {
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ SmiConstant(MessageTemplate::kNoSetterInCallback),
+ p->name, var_accessor_holder.value());
+ exit_point->Return(p->value);
}
}
}
@@ -943,28 +934,21 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (!ShouldReconfigureExisting()) {
BIND(&readonly);
{
- bool handle_strict = true;
- Label strict(this);
LanguageMode language_mode;
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
- Goto(&strict);
+ Node* type = Typeof(p->receiver);
+ ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
+ p->name, type, p->receiver);
} else {
- handle_strict = false;
exit_point->Return(p->value);
}
} else {
- BranchIfStrictMode(p->vector, p->slot, &strict);
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ SmiConstant(MessageTemplate::kStrictReadOnlyProperty),
+ p->name, Typeof(p->receiver), p->receiver);
exit_point->Return(p->value);
}
- if (handle_strict) {
- BIND(&strict);
- {
- Node* type = Typeof(p->receiver);
- ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
- p->name, type, p->receiver);
- }
- }
}
}
}
@@ -972,8 +956,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
void KeyedStoreGenericAssembler::KeyedStoreGeneric(
TNode<Context> context, TNode<Object> receiver, TNode<Object> key,
- TNode<Object> value, Maybe<LanguageMode> language_mode, TNode<Smi> slot,
- TNode<FeedbackVector> vector) {
+ TNode<Object> value, Maybe<LanguageMode> language_mode) {
TVARIABLE(IntPtrT, var_index);
TVARIABLE(Object, var_unique, key);
Label if_index(this), if_unique_name(this), not_internalized(this),
@@ -999,8 +982,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&if_unique_name);
{
Comment("key is unique name");
- StoreICParameters p(context, receiver, var_unique.value(), value, slot,
- vector);
+ StoreICParameters p(context, receiver, var_unique.value(), value, nullptr,
+ nullptr);
ExitPoint direct_exit(this);
EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &direct_exit,
&slow, language_mode);
@@ -1020,19 +1003,8 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
{
if (IsKeyedStore()) {
Comment("KeyedStoreGeneric_slow");
- if (language_mode.IsJust()) {
- TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
- value, SmiConstant(language_mode.FromJust()));
- } else {
- TVARIABLE(Smi, var_language_mode, SmiConstant(LanguageMode::kStrict));
- Label call_runtime(this);
- BranchIfStrictMode(vector, slot, &call_runtime);
- var_language_mode = SmiConstant(LanguageMode::kSloppy);
- Goto(&call_runtime);
- BIND(&call_runtime);
- TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
- value, var_language_mode.value());
- }
+ TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
+ value);
} else {
DCHECK(IsStoreInLiteral());
TailCallRuntime(Runtime::kStoreDataPropertyInLiteral, context, receiver,
@@ -1042,17 +1014,14 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
}
void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
- typedef StoreWithVectorDescriptor Descriptor;
+ typedef StoreDescriptor Descriptor;
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>(),
- slot, vector);
+ KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>());
}
void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
@@ -1060,8 +1029,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
TNode<Object> key,
TNode<Object> value,
LanguageMode language_mode) {
- KeyedStoreGeneric(context, receiver, key, value, Just(language_mode),
- TNode<Smi>(), TNode<FeedbackVector>());
+ KeyedStoreGeneric(context, receiver, key, value, Just(language_mode));
}
void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
@@ -1074,7 +1042,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
- Label miss(this);
+ Label miss(this, Label::kDeferred), store_property(this);
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
@@ -1084,19 +1052,29 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
// Optimistically write the state transition to the vector.
+ GotoIf(IsUndefined(vector), &store_property);
StoreFeedbackVectorSlot(vector, slot,
LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
+ Goto(&store_property);
- StoreICParameters p(context, receiver, name, value, slot, vector);
- EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
+ BIND(&store_property);
+ {
+ StoreICParameters p(context, receiver, name, value, slot, vector);
+ EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
+ }
BIND(&miss);
{
+ Label call_runtime(this);
// Undo the optimistic state transition.
+ GotoIf(IsUndefined(vector), &call_runtime);
StoreFeedbackVectorSlot(vector, slot,
LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
+ Goto(&call_runtime);
+
+ BIND(&call_runtime);
TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
receiver, name);
}
@@ -1127,7 +1105,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
unique_name, value);
} else {
CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
- value, SmiConstant(language_mode));
+ value);
}
Goto(&done);
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 8567799f3f..89a34ef80c 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -7,7 +7,7 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/counters.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h" // For InYoungGeneration().
#include "src/ic/ic-inl.h"
namespace v8 {
@@ -36,7 +36,8 @@ int StubCache::PrimaryOffset(Name name, Map map) {
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
- uint32_t map_low32bits = static_cast<uint32_t>(map.ptr());
+ uint32_t map_low32bits =
+ static_cast<uint32_t>(map.ptr() ^ (map.ptr() >> kMapKeyShift));
// Base the offset on a simple combination of name and map.
uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
@@ -67,8 +68,8 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name name, Map map,
MaybeObject handler) {
// Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
- DCHECK(!Heap::InNewSpace(name));
- DCHECK(!Heap::InNewSpace(handler));
+ DCHECK(!Heap::InYoungGeneration(name));
+ DCHECK(!Heap::InYoungGeneration(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
if (handler->ptr() != kNullAddress) DCHECK(IC::IsHandler(handler));
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 0b6f9d43d1..7eaa31bd95 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -89,6 +89,10 @@ class StubCache {
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+ // We compute the hash code for a map as follows:
+ // <code> = <address> ^ (<address> >> kMapKeyShift)
+ static const int kMapKeyShift = kPrimaryTableBits + kCacheIndexShift;
+
// Some magic number used in the secondary hash computation.
static const int kSecondaryMagic = 0xb16ca6e5;
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index 5c8f37df46..f55f2a9501 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -94,7 +94,7 @@ class IdentityMap : public IdentityMapBase {
explicit IdentityMap(Heap* heap,
AllocationPolicy allocator = AllocationPolicy())
: IdentityMapBase(heap), allocator_(allocator) {}
- ~IdentityMap() override { Clear(); };
+ ~IdentityMap() override { Clear(); }
// Searches this map for the given key using the object's address
// as the identity, returning:
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 10d476d9ee..f59f158edd 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -63,22 +63,35 @@ inspector_protocol_generate("protocol_generated_sources") {
config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- if (is_component_build) {
- defines = [ "BUILDING_V8_SHARED" ]
- }
+
+ configs = [ "../../:internal_config" ]
+ include_dirs = [ "../../include" ]
+}
+
+v8_header_set("inspector_test_headers") {
+ configs = [ ":inspector_config" ]
+
+ public_deps = [
+ "../..:v8_headers",
+ ]
+
+ sources = [
+ "test-interface.h",
+ ]
}
v8_source_set("inspector") {
deps = [
":protocol_generated_sources",
+ "../..:v8_version",
]
- configs = [ ":inspector_config" ]
- include_dirs = [
- "../..",
- "../../include",
- "$target_gen_dir/../..",
- "$target_gen_dir/../../include",
+
+ public_deps = [
+ ":inspector_test_headers",
]
+
+ configs = [ ":inspector_config" ]
+
sources = rebase_path(_protocol_generated, ".", target_gen_dir)
sources += [
"../../include/v8-inspector-protocol.h",
@@ -100,7 +113,6 @@ v8_source_set("inspector") {
"string-util.cc",
"string-util.h",
"test-interface.cc",
- "test-interface.h",
"v8-console-agent-impl.cc",
"v8-console-agent-impl.h",
"v8-console-message.cc",
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 19a30512ce..bce94d5f4f 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -5,10 +5,10 @@ include_rules = [
"+src/base/compiler-specific.h",
"+src/base/macros.h",
"+src/base/logging.h",
+ "+src/base/v8-fallthrough.h",
"+src/base/platform/platform.h",
"+src/base/platform/mutex.h",
"+src/conversions.h",
- "+src/flags.h",
"+src/v8memory.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 848dee532a..a42adce782 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -9,9 +9,6 @@ yangguo@chromium.org
# Changes to remote debugging protocol require devtools review to
# ensure backwards compatibility and commitment to maintain.
-per-file js_protocol.json=set noparent
-per-file js_protocol.json=dgozman@chromium.org
-per-file js_protocol.json=pfeldman@chromium.org
per-file js_protocol.pdl=set noparent
per-file js_protocol.pdl=dgozman@chromium.org
per-file js_protocol.pdl=pfeldman@chromium.org
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
index 63d1d74ab8..f56562341c 100644
--- a/deps/v8/src/inspector/custom-preview.cc
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -120,7 +120,7 @@ bool substituteObjectTags(int sessionId, const String16& groupName,
return false;
}
v8::Local<v8::Value> jsonWrapper;
- String16 serialized = wrapper->serialize();
+ String16 serialized = wrapper->toJSON();
if (!v8::JSON::Parse(context, toV8String(isolate, serialized))
.ToLocal(&jsonWrapper)) {
reportError(context, tryCatch, "cannot wrap value");
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index f1eb4fecf3..cae56d0082 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -585,7 +585,7 @@ Response InjectedScript::resolveCallArgument(
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
String16 value;
if (callArgument->hasValue()) {
- value = "(" + callArgument->getValue(nullptr)->serialize() + ")";
+ value = "(" + callArgument->getValue(nullptr)->toJSONString() + ")";
} else {
String16 unserializableValue = callArgument->getUnserializableValue("");
// Protect against potential identifier resolution for NaN and Infinity.
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 92f64c2cb9..8098aa5cac 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -64,6 +64,7 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
v8::WeakCallbackType::kParameter);
if (!info.hasMemoryOnConsole) return;
v8::Context::Scope contextScope(info.context);
+ v8::HandleScope handleScope(info.context->GetIsolate());
v8::Local<v8::Object> global = info.context->Global();
v8::Local<v8::Value> console;
if (global->Get(info.context, toV8String(m_inspector->isolate(), "console"))
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index 82927b44e6..c4aa29ce99 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -1,6 +1,6 @@
{
"protocol": {
- "path": "js_protocol.json",
+ "path": "js_protocol.pdl",
"package": "src/inspector/protocol",
"output": "protocol",
"namespace": ["v8_inspector", "protocol"],
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
deleted file mode 100644
index ea6c995088..0000000000
--- a/deps/v8/src/inspector/js_protocol.json
+++ /dev/null
@@ -1,3144 +0,0 @@
-{
- "version": {
- "major": "1",
- "minor": "3"
- },
- "domains": [
- {
- "domain": "Console",
- "description": "This domain is deprecated - use Runtime or Log instead.",
- "deprecated": true,
- "dependencies": [
- "Runtime"
- ],
- "types": [
- {
- "id": "ConsoleMessage",
- "description": "Console message.",
- "type": "object",
- "properties": [
- {
- "name": "source",
- "description": "Message source.",
- "type": "string",
- "enum": [
- "xml",
- "javascript",
- "network",
- "console-api",
- "storage",
- "appcache",
- "rendering",
- "security",
- "other",
- "deprecation",
- "worker"
- ]
- },
- {
- "name": "level",
- "description": "Message severity.",
- "type": "string",
- "enum": [
- "log",
- "warning",
- "error",
- "debug",
- "info"
- ]
- },
- {
- "name": "text",
- "description": "Message text.",
- "type": "string"
- },
- {
- "name": "url",
- "description": "URL of the message origin.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "line",
- "description": "Line number in the resource that generated this message (1-based).",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "column",
- "description": "Column number in the resource that generated this message (1-based).",
- "optional": true,
- "type": "integer"
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "clearMessages",
- "description": "Does nothing."
- },
- {
- "name": "disable",
- "description": "Disables console domain, prevents further console messages from being reported to the client."
- },
- {
- "name": "enable",
- "description": "Enables console domain, sends the messages collected so far to the client by means of the\n`messageAdded` notification."
- }
- ],
- "events": [
- {
- "name": "messageAdded",
- "description": "Issued when new console message is added.",
- "parameters": [
- {
- "name": "message",
- "description": "Console message that has been added.",
- "$ref": "ConsoleMessage"
- }
- ]
- }
- ]
- },
- {
- "domain": "Debugger",
- "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing\nbreakpoints, stepping through execution, exploring stack traces, etc.",
- "dependencies": [
- "Runtime"
- ],
- "types": [
- {
- "id": "BreakpointId",
- "description": "Breakpoint identifier.",
- "type": "string"
- },
- {
- "id": "CallFrameId",
- "description": "Call frame identifier.",
- "type": "string"
- },
- {
- "id": "Location",
- "description": "Location in the source code.",
- "type": "object",
- "properties": [
- {
- "name": "scriptId",
- "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "lineNumber",
- "description": "Line number in the script (0-based).",
- "type": "integer"
- },
- {
- "name": "columnNumber",
- "description": "Column number in the script (0-based).",
- "optional": true,
- "type": "integer"
- }
- ]
- },
- {
- "id": "ScriptPosition",
- "description": "Location in the source code.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "lineNumber",
- "type": "integer"
- },
- {
- "name": "columnNumber",
- "type": "integer"
- }
- ]
- },
- {
- "id": "CallFrame",
- "description": "JavaScript call frame. Array of call frames form the call stack.",
- "type": "object",
- "properties": [
- {
- "name": "callFrameId",
- "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused.",
- "$ref": "CallFrameId"
- },
- {
- "name": "functionName",
- "description": "Name of the JavaScript function called on this call frame.",
- "type": "string"
- },
- {
- "name": "functionLocation",
- "description": "Location in the source code.",
- "optional": true,
- "$ref": "Location"
- },
- {
- "name": "location",
- "description": "Location in the source code.",
- "$ref": "Location"
- },
- {
- "name": "url",
- "description": "JavaScript script name or url.",
- "type": "string"
- },
- {
- "name": "scopeChain",
- "description": "Scope chain for this call frame.",
- "type": "array",
- "items": {
- "$ref": "Scope"
- }
- },
- {
- "name": "this",
- "description": "`this` object for this call frame.",
- "$ref": "Runtime.RemoteObject"
- },
- {
- "name": "returnValue",
- "description": "The value being returned, if the function is at return point.",
- "optional": true,
- "$ref": "Runtime.RemoteObject"
- }
- ]
- },
- {
- "id": "Scope",
- "description": "Scope description.",
- "type": "object",
- "properties": [
- {
- "name": "type",
- "description": "Scope type.",
- "type": "string",
- "enum": [
- "global",
- "local",
- "with",
- "closure",
- "catch",
- "block",
- "script",
- "eval",
- "module"
- ]
- },
- {
- "name": "object",
- "description": "Object representing the scope. For `global` and `with` scopes it represents the actual\nobject; for the rest of the scopes, it is artificial transient object enumerating scope\nvariables as its properties.",
- "$ref": "Runtime.RemoteObject"
- },
- {
- "name": "name",
- "optional": true,
- "type": "string"
- },
- {
- "name": "startLocation",
- "description": "Location in the source code where scope starts",
- "optional": true,
- "$ref": "Location"
- },
- {
- "name": "endLocation",
- "description": "Location in the source code where scope ends",
- "optional": true,
- "$ref": "Location"
- }
- ]
- },
- {
- "id": "SearchMatch",
- "description": "Search match for resource.",
- "type": "object",
- "properties": [
- {
- "name": "lineNumber",
- "description": "Line number in resource content.",
- "type": "number"
- },
- {
- "name": "lineContent",
- "description": "Line with match content.",
- "type": "string"
- }
- ]
- },
- {
- "id": "BreakLocation",
- "type": "object",
- "properties": [
- {
- "name": "scriptId",
- "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "lineNumber",
- "description": "Line number in the script (0-based).",
- "type": "integer"
- },
- {
- "name": "columnNumber",
- "description": "Column number in the script (0-based).",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "type",
- "optional": true,
- "type": "string",
- "enum": [
- "debuggerStatement",
- "call",
- "return"
- ]
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "continueToLocation",
- "description": "Continues execution until specific location is reached.",
- "parameters": [
- {
- "name": "location",
- "description": "Location to continue to.",
- "$ref": "Location"
- },
- {
- "name": "targetCallFrames",
- "optional": true,
- "type": "string",
- "enum": [
- "any",
- "current"
- ]
- }
- ]
- },
- {
- "name": "disable",
- "description": "Disables debugger for given page."
- },
- {
- "name": "enable",
- "description": "Enables debugger for the given page. Clients should not assume that the debugging has been\nenabled until the result for this command is received.",
- "returns": [
- {
- "name": "debuggerId",
- "description": "Unique identifier of the debugger.",
- "experimental": true,
- "$ref": "Runtime.UniqueDebuggerId"
- }
- ]
- },
- {
- "name": "evaluateOnCallFrame",
- "description": "Evaluates expression on a given call frame.",
- "parameters": [
- {
- "name": "callFrameId",
- "description": "Call frame identifier to evaluate on.",
- "$ref": "CallFrameId"
- },
- {
- "name": "expression",
- "description": "Expression to evaluate.",
- "type": "string"
- },
- {
- "name": "objectGroup",
- "description": "String object group name to put result into (allows rapid releasing resulting object handles\nusing `releaseObjectGroup`).",
- "optional": true,
- "type": "string"
- },
- {
- "name": "includeCommandLineAPI",
- "description": "Specifies whether command line API should be available to the evaluated expression, defaults\nto false.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "silent",
- "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "returnByValue",
- "description": "Whether the result is expected to be a JSON object that should be sent by value.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the result.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "throwOnSideEffect",
- "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "timeout",
- "description": "Terminate execution after timing out (number of milliseconds).",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.TimeDelta"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Object wrapper for the evaluation result.",
- "$ref": "Runtime.RemoteObject"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "Runtime.ExceptionDetails"
- }
- ]
- },
- {
- "name": "getPossibleBreakpoints",
- "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be\nthe same.",
- "parameters": [
- {
- "name": "start",
- "description": "Start of range to search possible breakpoint locations in.",
- "$ref": "Location"
- },
- {
- "name": "end",
- "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end\nof scripts is used as end of range.",
- "optional": true,
- "$ref": "Location"
- },
- {
- "name": "restrictToFunction",
- "description": "Only consider locations which are in the same (non-nested) function as start.",
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "locations",
- "description": "List of the possible breakpoint locations.",
- "type": "array",
- "items": {
- "$ref": "BreakLocation"
- }
- }
- ]
- },
- {
- "name": "getScriptSource",
- "description": "Returns source for the script with given id.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Id of the script to get source for.",
- "$ref": "Runtime.ScriptId"
- }
- ],
- "returns": [
- {
- "name": "scriptSource",
- "description": "Script source.",
- "type": "string"
- }
- ]
- },
- {
- "name": "getStackTrace",
- "description": "Returns stack trace with given `stackTraceId`.",
- "experimental": true,
- "parameters": [
- {
- "name": "stackTraceId",
- "$ref": "Runtime.StackTraceId"
- }
- ],
- "returns": [
- {
- "name": "stackTrace",
- "$ref": "Runtime.StackTrace"
- }
- ]
- },
- {
- "name": "pause",
- "description": "Stops on the next JavaScript statement."
- },
- {
- "name": "pauseOnAsyncCall",
- "experimental": true,
- "parameters": [
- {
- "name": "parentStackTraceId",
- "description": "Debugger will pause when async call with given stack trace is started.",
- "$ref": "Runtime.StackTraceId"
- }
- ]
- },
- {
- "name": "removeBreakpoint",
- "description": "Removes JavaScript breakpoint.",
- "parameters": [
- {
- "name": "breakpointId",
- "$ref": "BreakpointId"
- }
- ]
- },
- {
- "name": "restartFrame",
- "description": "Restarts particular call frame from the beginning.",
- "parameters": [
- {
- "name": "callFrameId",
- "description": "Call frame identifier to evaluate on.",
- "$ref": "CallFrameId"
- }
- ],
- "returns": [
- {
- "name": "callFrames",
- "description": "New stack trace.",
- "type": "array",
- "items": {
- "$ref": "CallFrame"
- }
- },
- {
- "name": "asyncStackTrace",
- "description": "Async stack trace, if any.",
- "optional": true,
- "$ref": "Runtime.StackTrace"
- },
- {
- "name": "asyncStackTraceId",
- "description": "Async stack trace, if any.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTraceId"
- }
- ]
- },
- {
- "name": "resume",
- "description": "Resumes JavaScript execution."
- },
- {
- "name": "searchInContent",
- "description": "Searches for given string in script content.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Id of the script to search in.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "query",
- "description": "String to search for.",
- "type": "string"
- },
- {
- "name": "caseSensitive",
- "description": "If true, search is case sensitive.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "isRegex",
- "description": "If true, treats string parameter as regex.",
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "List of search matches.",
- "type": "array",
- "items": {
- "$ref": "SearchMatch"
- }
- }
- ]
- },
- {
- "name": "setAsyncCallStackDepth",
- "description": "Enables or disables async call stacks tracking.",
- "parameters": [
- {
- "name": "maxDepth",
- "description": "Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async\ncall stacks (default).",
- "type": "integer"
- }
- ]
- },
- {
- "name": "setBlackboxPatterns",
- "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in\nscripts with url matching one of the patterns. VM will try to leave blackboxed script by\nperforming 'step in' several times, finally resorting to 'step out' if unsuccessful.",
- "experimental": true,
- "parameters": [
- {
- "name": "patterns",
- "description": "Array of regexps that will be used to check script url for blackbox state.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ]
- },
- {
- "name": "setBlackboxedRanges",
- "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted\nscripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.\nPositions array contains positions where blackbox state is changed. First interval isn't\nblackboxed. Array should be sorted.",
- "experimental": true,
- "parameters": [
- {
- "name": "scriptId",
- "description": "Id of the script.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "positions",
- "type": "array",
- "items": {
- "$ref": "ScriptPosition"
- }
- }
- ]
- },
- {
- "name": "setBreakpoint",
- "description": "Sets JavaScript breakpoint at a given location.",
- "parameters": [
- {
- "name": "location",
- "description": "Location to set breakpoint in.",
- "$ref": "Location"
- },
- {
- "name": "condition",
- "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "breakpointId",
- "description": "Id of the created breakpoint for further reference.",
- "$ref": "BreakpointId"
- },
- {
- "name": "actualLocation",
- "description": "Location this breakpoint resolved into.",
- "$ref": "Location"
- }
- ]
- },
- {
- "name": "setBreakpointByUrl",
- "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this\ncommand is issued, all existing parsed scripts will have breakpoints resolved and returned in\n`locations` property. Further matching script parsing will result in subsequent\n`breakpointResolved` events issued. This logical breakpoint will survive page reloads.",
- "parameters": [
- {
- "name": "lineNumber",
- "description": "Line number to set breakpoint at.",
- "type": "integer"
- },
- {
- "name": "url",
- "description": "URL of the resources to set breakpoint on.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "urlRegex",
- "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or\n`urlRegex` must be specified.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "scriptHash",
- "description": "Script hash of the resources to set breakpoint on.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "columnNumber",
- "description": "Offset in the line to set breakpoint at.",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "condition",
- "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "breakpointId",
- "description": "Id of the created breakpoint for further reference.",
- "$ref": "BreakpointId"
- },
- {
- "name": "locations",
- "description": "List of the locations this breakpoint resolved into upon addition.",
- "type": "array",
- "items": {
- "$ref": "Location"
- }
- }
- ]
- },
- {
- "name": "setBreakpointOnFunctionCall",
- "description": "Sets JavaScript breakpoint before each call to the given function.\nIf another function was created from the same source as a given one,\ncalling it will also trigger the breakpoint.",
- "experimental": true,
- "parameters": [
- {
- "name": "objectId",
- "description": "Function object id.",
- "$ref": "Runtime.RemoteObjectId"
- },
- {
- "name": "condition",
- "description": "Expression to use as a breakpoint condition. When specified, debugger will\nstop on the breakpoint if this expression evaluates to true.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "breakpointId",
- "description": "Id of the created breakpoint for further reference.",
- "$ref": "BreakpointId"
- }
- ]
- },
- {
- "name": "setBreakpointsActive",
- "description": "Activates / deactivates all breakpoints on the page.",
- "parameters": [
- {
- "name": "active",
- "description": "New value for breakpoints active state.",
- "type": "boolean"
- }
- ]
- },
- {
- "name": "setPauseOnExceptions",
- "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or\nno exceptions. Initial pause on exceptions state is `none`.",
- "parameters": [
- {
- "name": "state",
- "description": "Pause on exceptions mode.",
- "type": "string",
- "enum": [
- "none",
- "uncaught",
- "all"
- ]
- }
- ]
- },
- {
- "name": "setReturnValue",
- "description": "Changes return value in top frame. Available only at return break position.",
- "experimental": true,
- "parameters": [
- {
- "name": "newValue",
- "description": "New return value.",
- "$ref": "Runtime.CallArgument"
- }
- ]
- },
- {
- "name": "setScriptSource",
- "description": "Edits JavaScript source live.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Id of the script to edit.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "scriptSource",
- "description": "New content of the script.",
- "type": "string"
- },
- {
- "name": "dryRun",
- "description": "If true the change will not actually be applied. Dry run may be used to get result\ndescription without actually modifying the code.",
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "callFrames",
- "description": "New stack trace in case editing has happened while VM was stopped.",
- "optional": true,
- "type": "array",
- "items": {
- "$ref": "CallFrame"
- }
- },
- {
- "name": "stackChanged",
- "description": "Whether current call stack was modified after applying the changes.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "asyncStackTrace",
- "description": "Async stack trace, if any.",
- "optional": true,
- "$ref": "Runtime.StackTrace"
- },
- {
- "name": "asyncStackTraceId",
- "description": "Async stack trace, if any.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTraceId"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details if any.",
- "optional": true,
- "$ref": "Runtime.ExceptionDetails"
- }
- ]
- },
- {
- "name": "setSkipAllPauses",
- "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).",
- "parameters": [
- {
- "name": "skip",
- "description": "New value for skip pauses state.",
- "type": "boolean"
- }
- ]
- },
- {
- "name": "setVariableValue",
- "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be\nmutated manually.",
- "parameters": [
- {
- "name": "scopeNumber",
- "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'\nscope types are allowed. Other scopes could be manipulated manually.",
- "type": "integer"
- },
- {
- "name": "variableName",
- "description": "Variable name.",
- "type": "string"
- },
- {
- "name": "newValue",
- "description": "New variable value.",
- "$ref": "Runtime.CallArgument"
- },
- {
- "name": "callFrameId",
- "description": "Id of callframe that holds variable.",
- "$ref": "CallFrameId"
- }
- ]
- },
- {
- "name": "stepInto",
- "description": "Steps into the function call.",
- "parameters": [
- {
- "name": "breakOnAsyncCall",
- "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled\nbefore next pause.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- }
- ]
- },
- {
- "name": "stepOut",
- "description": "Steps out of the function call."
- },
- {
- "name": "stepOver",
- "description": "Steps over the statement."
- }
- ],
- "events": [
- {
- "name": "breakpointResolved",
- "description": "Fired when breakpoint is resolved to an actual script and location.",
- "parameters": [
- {
- "name": "breakpointId",
- "description": "Breakpoint unique identifier.",
- "$ref": "BreakpointId"
- },
- {
- "name": "location",
- "description": "Actual breakpoint location.",
- "$ref": "Location"
- }
- ]
- },
- {
- "name": "paused",
- "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.",
- "parameters": [
- {
- "name": "callFrames",
- "description": "Call stack the virtual machine stopped on.",
- "type": "array",
- "items": {
- "$ref": "CallFrame"
- }
- },
- {
- "name": "reason",
- "description": "Pause reason.",
- "type": "string",
- "enum": [
- "XHR",
- "DOM",
- "EventListener",
- "exception",
- "assert",
- "debugCommand",
- "promiseRejection",
- "OOM",
- "other",
- "ambiguous"
- ]
- },
- {
- "name": "data",
- "description": "Object containing break-specific auxiliary properties.",
- "optional": true,
- "type": "object"
- },
- {
- "name": "hitBreakpoints",
- "description": "Hit breakpoints IDs",
- "optional": true,
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- {
- "name": "asyncStackTrace",
- "description": "Async stack trace, if any.",
- "optional": true,
- "$ref": "Runtime.StackTrace"
- },
- {
- "name": "asyncStackTraceId",
- "description": "Async stack trace, if any.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTraceId"
- },
- {
- "name": "asyncCallStackTraceId",
- "description": "Just scheduled async call will have this stack trace as parent stack during async execution.\nThis field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTraceId"
- }
- ]
- },
- {
- "name": "resumed",
- "description": "Fired when the virtual machine resumed execution."
- },
- {
- "name": "scriptFailedToParse",
- "description": "Fired when virtual machine fails to parse the script.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Identifier of the script parsed.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "url",
- "description": "URL or name of the script parsed (if any).",
- "type": "string"
- },
- {
- "name": "startLine",
- "description": "Line offset of the script within the resource with given URL (for script tags).",
- "type": "integer"
- },
- {
- "name": "startColumn",
- "description": "Column offset of the script within the resource with given URL.",
- "type": "integer"
- },
- {
- "name": "endLine",
- "description": "Last line of the script.",
- "type": "integer"
- },
- {
- "name": "endColumn",
- "description": "Length of the last line of the script.",
- "type": "integer"
- },
- {
- "name": "executionContextId",
- "description": "Specifies script creation context.",
- "$ref": "Runtime.ExecutionContextId"
- },
- {
- "name": "hash",
- "description": "Content hash of the script.",
- "type": "string"
- },
- {
- "name": "executionContextAuxData",
- "description": "Embedder-specific auxiliary data.",
- "optional": true,
- "type": "object"
- },
- {
- "name": "sourceMapURL",
- "description": "URL of source map associated with script (if any).",
- "optional": true,
- "type": "string"
- },
- {
- "name": "hasSourceURL",
- "description": "True, if this script has sourceURL.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "isModule",
- "description": "True, if this script is ES6 module.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "length",
- "description": "This script length.",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "stackTrace",
- "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTrace"
- }
- ]
- },
- {
- "name": "scriptParsed",
- "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected\nscripts upon enabling debugger.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Identifier of the script parsed.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "url",
- "description": "URL or name of the script parsed (if any).",
- "type": "string"
- },
- {
- "name": "startLine",
- "description": "Line offset of the script within the resource with given URL (for script tags).",
- "type": "integer"
- },
- {
- "name": "startColumn",
- "description": "Column offset of the script within the resource with given URL.",
- "type": "integer"
- },
- {
- "name": "endLine",
- "description": "Last line of the script.",
- "type": "integer"
- },
- {
- "name": "endColumn",
- "description": "Length of the last line of the script.",
- "type": "integer"
- },
- {
- "name": "executionContextId",
- "description": "Specifies script creation context.",
- "$ref": "Runtime.ExecutionContextId"
- },
- {
- "name": "hash",
- "description": "Content hash of the script.",
- "type": "string"
- },
- {
- "name": "executionContextAuxData",
- "description": "Embedder-specific auxiliary data.",
- "optional": true,
- "type": "object"
- },
- {
- "name": "isLiveEdit",
- "description": "True, if this script is generated as a result of the live edit operation.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "sourceMapURL",
- "description": "URL of source map associated with script (if any).",
- "optional": true,
- "type": "string"
- },
- {
- "name": "hasSourceURL",
- "description": "True, if this script has sourceURL.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "isModule",
- "description": "True, if this script is ES6 module.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "length",
- "description": "This script length.",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "stackTrace",
- "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
- "experimental": true,
- "optional": true,
- "$ref": "Runtime.StackTrace"
- }
- ]
- }
- ]
- },
- {
- "domain": "HeapProfiler",
- "experimental": true,
- "dependencies": [
- "Runtime"
- ],
- "types": [
- {
- "id": "HeapSnapshotObjectId",
- "description": "Heap snapshot object id.",
- "type": "string"
- },
- {
- "id": "SamplingHeapProfileNode",
- "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
- "type": "object",
- "properties": [
- {
- "name": "callFrame",
- "description": "Function location.",
- "$ref": "Runtime.CallFrame"
- },
- {
- "name": "selfSize",
- "description": "Allocations size in bytes for the node excluding children.",
- "type": "number"
- },
- {
- "name": "id",
- "description": "Node id. Ids are unique across all profiles collected between startSampling and stopSampling.",
- "type": "integer"
- },
- {
- "name": "children",
- "description": "Child nodes.",
- "type": "array",
- "items": {
- "$ref": "SamplingHeapProfileNode"
- }
- }
- ]
- },
- {
- "id": "SamplingHeapProfileSample",
- "description": "A single sample from a sampling profile.",
- "type": "object",
- "properties": [
- {
- "name": "size",
- "description": "Allocation size in bytes attributed to the sample.",
- "type": "number"
- },
- {
- "name": "nodeId",
- "description": "Id of the corresponding profile tree node.",
- "type": "integer"
- },
- {
- "name": "ordinal",
- "description": "Time-ordered sample ordinal number. It is unique across all profiles retrieved\nbetween startSampling and stopSampling.",
- "type": "number"
- }
- ]
- },
- {
- "id": "SamplingHeapProfile",
- "description": "Sampling profile.",
- "type": "object",
- "properties": [
- {
- "name": "head",
- "$ref": "SamplingHeapProfileNode"
- },
- {
- "name": "samples",
- "type": "array",
- "items": {
- "$ref": "SamplingHeapProfileSample"
- }
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "addInspectedHeapObject",
- "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details\n$x functions).",
- "parameters": [
- {
- "name": "heapObjectId",
- "description": "Heap snapshot object id to be accessible by means of $x command line API.",
- "$ref": "HeapSnapshotObjectId"
- }
- ]
- },
- {
- "name": "collectGarbage"
- },
- {
- "name": "disable"
- },
- {
- "name": "enable"
- },
- {
- "name": "getHeapObjectId",
- "parameters": [
- {
- "name": "objectId",
- "description": "Identifier of the object to get heap object id for.",
- "$ref": "Runtime.RemoteObjectId"
- }
- ],
- "returns": [
- {
- "name": "heapSnapshotObjectId",
- "description": "Id of the heap snapshot object corresponding to the passed remote object id.",
- "$ref": "HeapSnapshotObjectId"
- }
- ]
- },
- {
- "name": "getObjectByHeapObjectId",
- "parameters": [
- {
- "name": "objectId",
- "$ref": "HeapSnapshotObjectId"
- },
- {
- "name": "objectGroup",
- "description": "Symbolic group name that can be used to release multiple objects.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Evaluation result.",
- "$ref": "Runtime.RemoteObject"
- }
- ]
- },
- {
- "name": "getSamplingProfile",
- "returns": [
- {
- "name": "profile",
- "description": "Return the sampling profile being collected.",
- "$ref": "SamplingHeapProfile"
- }
- ]
- },
- {
- "name": "startSampling",
- "parameters": [
- {
- "name": "samplingInterval",
- "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The\ndefault value is 32768 bytes.",
- "optional": true,
- "type": "number"
- }
- ]
- },
- {
- "name": "startTrackingHeapObjects",
- "parameters": [
- {
- "name": "trackAllocations",
- "optional": true,
- "type": "boolean"
- }
- ]
- },
- {
- "name": "stopSampling",
- "returns": [
- {
- "name": "profile",
- "description": "Recorded sampling heap profile.",
- "$ref": "SamplingHeapProfile"
- }
- ]
- },
- {
- "name": "stopTrackingHeapObjects",
- "parameters": [
- {
- "name": "reportProgress",
- "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken\nwhen the tracking is stopped.",
- "optional": true,
- "type": "boolean"
- }
- ]
- },
- {
- "name": "takeHeapSnapshot",
- "parameters": [
- {
- "name": "reportProgress",
- "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.",
- "optional": true,
- "type": "boolean"
- }
- ]
- }
- ],
- "events": [
- {
- "name": "addHeapSnapshotChunk",
- "parameters": [
- {
- "name": "chunk",
- "type": "string"
- }
- ]
- },
- {
- "name": "heapStatsUpdate",
- "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
- "parameters": [
- {
- "name": "statsUpdate",
- "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment\nindex, the second integer is a total count of objects for the fragment, the third integer is\na total size of the objects for the fragment.",
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- ]
- },
- {
- "name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regularly sends a current value for last\nseen object id and corresponding timestamp. If the were changes in the heap since last event\nthen one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
- "parameters": [
- {
- "name": "lastSeenObjectId",
- "type": "integer"
- },
- {
- "name": "timestamp",
- "type": "number"
- }
- ]
- },
- {
- "name": "reportHeapSnapshotProgress",
- "parameters": [
- {
- "name": "done",
- "type": "integer"
- },
- {
- "name": "total",
- "type": "integer"
- },
- {
- "name": "finished",
- "optional": true,
- "type": "boolean"
- }
- ]
- },
- {
- "name": "resetProfiles"
- }
- ]
- },
- {
- "domain": "Profiler",
- "dependencies": [
- "Runtime",
- "Debugger"
- ],
- "types": [
- {
- "id": "ProfileNode",
- "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
- "type": "object",
- "properties": [
- {
- "name": "id",
- "description": "Unique id of the node.",
- "type": "integer"
- },
- {
- "name": "callFrame",
- "description": "Function location.",
- "$ref": "Runtime.CallFrame"
- },
- {
- "name": "hitCount",
- "description": "Number of samples where this node was on top of the call stack.",
- "optional": true,
- "type": "integer"
- },
- {
- "name": "children",
- "description": "Child node ids.",
- "optional": true,
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- {
- "name": "deoptReason",
- "description": "The reason of being not optimized. The function may be deoptimized or marked as don't\noptimize.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "positionTicks",
- "description": "An array of source position ticks.",
- "optional": true,
- "type": "array",
- "items": {
- "$ref": "PositionTickInfo"
- }
- }
- ]
- },
- {
- "id": "Profile",
- "description": "Profile.",
- "type": "object",
- "properties": [
- {
- "name": "nodes",
- "description": "The list of profile nodes. First item is the root node.",
- "type": "array",
- "items": {
- "$ref": "ProfileNode"
- }
- },
- {
- "name": "startTime",
- "description": "Profiling start timestamp in microseconds.",
- "type": "number"
- },
- {
- "name": "endTime",
- "description": "Profiling end timestamp in microseconds.",
- "type": "number"
- },
- {
- "name": "samples",
- "description": "Ids of samples top nodes.",
- "optional": true,
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- {
- "name": "timeDeltas",
- "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the\nprofile startTime.",
- "optional": true,
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- ]
- },
- {
- "id": "PositionTickInfo",
- "description": "Specifies a number of samples attributed to a certain source position.",
- "type": "object",
- "properties": [
- {
- "name": "line",
- "description": "Source line number (1-based).",
- "type": "integer"
- },
- {
- "name": "ticks",
- "description": "Number of samples attributed to the source line.",
- "type": "integer"
- }
- ]
- },
- {
- "id": "CoverageRange",
- "description": "Coverage data for a source range.",
- "type": "object",
- "properties": [
- {
- "name": "startOffset",
- "description": "JavaScript script source offset for the range start.",
- "type": "integer"
- },
- {
- "name": "endOffset",
- "description": "JavaScript script source offset for the range end.",
- "type": "integer"
- },
- {
- "name": "count",
- "description": "Collected execution count of the source range.",
- "type": "integer"
- }
- ]
- },
- {
- "id": "FunctionCoverage",
- "description": "Coverage data for a JavaScript function.",
- "type": "object",
- "properties": [
- {
- "name": "functionName",
- "description": "JavaScript function name.",
- "type": "string"
- },
- {
- "name": "ranges",
- "description": "Source ranges inside the function with coverage data.",
- "type": "array",
- "items": {
- "$ref": "CoverageRange"
- }
- },
- {
- "name": "isBlockCoverage",
- "description": "Whether coverage data for this function has block granularity.",
- "type": "boolean"
- }
- ]
- },
- {
- "id": "ScriptCoverage",
- "description": "Coverage data for a JavaScript script.",
- "type": "object",
- "properties": [
- {
- "name": "scriptId",
- "description": "JavaScript script id.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "url",
- "description": "JavaScript script name or url.",
- "type": "string"
- },
- {
- "name": "functions",
- "description": "Functions contained in the script that has coverage data.",
- "type": "array",
- "items": {
- "$ref": "FunctionCoverage"
- }
- }
- ]
- },
- {
- "id": "TypeObject",
- "description": "Describes a type collected during runtime.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "name",
- "description": "Name of a type collected with type profiling.",
- "type": "string"
- }
- ]
- },
- {
- "id": "TypeProfileEntry",
- "description": "Source offset and types for a parameter or return value.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "offset",
- "description": "Source offset of the parameter or end of function for return values.",
- "type": "integer"
- },
- {
- "name": "types",
- "description": "The types for this parameter or return value.",
- "type": "array",
- "items": {
- "$ref": "TypeObject"
- }
- }
- ]
- },
- {
- "id": "ScriptTypeProfile",
- "description": "Type profile data collected during runtime for a JavaScript script.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "scriptId",
- "description": "JavaScript script id.",
- "$ref": "Runtime.ScriptId"
- },
- {
- "name": "url",
- "description": "JavaScript script name or url.",
- "type": "string"
- },
- {
- "name": "entries",
- "description": "Type profile entries for parameters and return values of the functions in the script.",
- "type": "array",
- "items": {
- "$ref": "TypeProfileEntry"
- }
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "disable"
- },
- {
- "name": "enable"
- },
- {
- "name": "getBestEffortCoverage",
- "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to\ngarbage collection.",
- "returns": [
- {
- "name": "result",
- "description": "Coverage data for the current isolate.",
- "type": "array",
- "items": {
- "$ref": "ScriptCoverage"
- }
- }
- ]
- },
- {
- "name": "setSamplingInterval",
- "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.",
- "parameters": [
- {
- "name": "interval",
- "description": "New sampling interval in microseconds.",
- "type": "integer"
- }
- ]
- },
- {
- "name": "start"
- },
- {
- "name": "startPreciseCoverage",
- "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code\ncoverage may be incomplete. Enabling prevents running optimized code and resets execution\ncounters.",
- "parameters": [
- {
- "name": "callCount",
- "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "detailed",
- "description": "Collect block-based coverage.",
- "optional": true,
- "type": "boolean"
- }
- ]
- },
- {
- "name": "startTypeProfile",
- "description": "Enable type profile.",
- "experimental": true
- },
- {
- "name": "stop",
- "returns": [
- {
- "name": "profile",
- "description": "Recorded profile.",
- "$ref": "Profile"
- }
- ]
- },
- {
- "name": "stopPreciseCoverage",
- "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows\nexecuting optimized code."
- },
- {
- "name": "stopTypeProfile",
- "description": "Disable type profile. Disabling releases type profile data collected so far.",
- "experimental": true
- },
- {
- "name": "takePreciseCoverage",
- "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code\ncoverage needs to have started.",
- "returns": [
- {
- "name": "result",
- "description": "Coverage data for the current isolate.",
- "type": "array",
- "items": {
- "$ref": "ScriptCoverage"
- }
- }
- ]
- },
- {
- "name": "takeTypeProfile",
- "description": "Collect type profile.",
- "experimental": true,
- "returns": [
- {
- "name": "result",
- "description": "Type profile for all scripts since startTypeProfile() was turned on.",
- "type": "array",
- "items": {
- "$ref": "ScriptTypeProfile"
- }
- }
- ]
- }
- ],
- "events": [
- {
- "name": "consoleProfileFinished",
- "parameters": [
- {
- "name": "id",
- "type": "string"
- },
- {
- "name": "location",
- "description": "Location of console.profileEnd().",
- "$ref": "Debugger.Location"
- },
- {
- "name": "profile",
- "$ref": "Profile"
- },
- {
- "name": "title",
- "description": "Profile title passed as an argument to console.profile().",
- "optional": true,
- "type": "string"
- }
- ]
- },
- {
- "name": "consoleProfileStarted",
- "description": "Sent when new profile recording is started using console.profile() call.",
- "parameters": [
- {
- "name": "id",
- "type": "string"
- },
- {
- "name": "location",
- "description": "Location of console.profile().",
- "$ref": "Debugger.Location"
- },
- {
- "name": "title",
- "description": "Profile title passed as an argument to console.profile().",
- "optional": true,
- "type": "string"
- }
- ]
- }
- ]
- },
- {
- "domain": "Runtime",
- "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.\nEvaluation results are returned as mirror object that expose object type, string representation\nand unique identifier that can be used for further object reference. Original objects are\nmaintained in memory unless they are either explicitly released or are released along with the\nother objects in their object group.",
- "types": [
- {
- "id": "ScriptId",
- "description": "Unique script identifier.",
- "type": "string"
- },
- {
- "id": "RemoteObjectId",
- "description": "Unique object identifier.",
- "type": "string"
- },
- {
- "id": "UnserializableValue",
- "description": "Primitive value which cannot be JSON-stringified. Includes values `-0`, `NaN`, `Infinity`,\n`-Infinity`, and bigint literals.",
- "type": "string"
- },
- {
- "id": "RemoteObject",
- "description": "Mirror object referencing original JavaScript object.",
- "type": "object",
- "properties": [
- {
- "name": "type",
- "description": "Object type.",
- "type": "string",
- "enum": [
- "object",
- "function",
- "undefined",
- "string",
- "number",
- "boolean",
- "symbol",
- "bigint"
- ]
- },
- {
- "name": "subtype",
- "description": "Object subtype hint. Specified for `object` type values only.",
- "optional": true,
- "type": "string",
- "enum": [
- "array",
- "null",
- "node",
- "regexp",
- "date",
- "map",
- "set",
- "weakmap",
- "weakset",
- "iterator",
- "generator",
- "error",
- "proxy",
- "promise",
- "typedarray",
- "arraybuffer",
- "dataview"
- ]
- },
- {
- "name": "className",
- "description": "Object class (constructor) name. Specified for `object` type values only.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "value",
- "description": "Remote object value in case of primitive values or JSON values (if it was requested).",
- "optional": true,
- "type": "any"
- },
- {
- "name": "unserializableValue",
- "description": "Primitive value which can not be JSON-stringified does not have `value`, but gets this\nproperty.",
- "optional": true,
- "$ref": "UnserializableValue"
- },
- {
- "name": "description",
- "description": "String representation of the object.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "objectId",
- "description": "Unique object identifier (for non-primitive values).",
- "optional": true,
- "$ref": "RemoteObjectId"
- },
- {
- "name": "preview",
- "description": "Preview containing abbreviated property values. Specified for `object` type values only.",
- "experimental": true,
- "optional": true,
- "$ref": "ObjectPreview"
- },
- {
- "name": "customPreview",
- "experimental": true,
- "optional": true,
- "$ref": "CustomPreview"
- }
- ]
- },
- {
- "id": "CustomPreview",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "header",
- "description": "The JSON-stringified result of formatter.header(object, config) call.\nIt contains json ML array that represents RemoteObject.",
- "type": "string"
- },
- {
- "name": "bodyGetterId",
- "description": "If formatter returns true as a result of formatter.hasBody call then bodyGetterId will\ncontain RemoteObjectId for the function that returns result of formatter.body(object, config) call.\nThe result value is json ML array.",
- "optional": true,
- "$ref": "RemoteObjectId"
- }
- ]
- },
- {
- "id": "ObjectPreview",
- "description": "Object containing abbreviated remote object value.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "type",
- "description": "Object type.",
- "type": "string",
- "enum": [
- "object",
- "function",
- "undefined",
- "string",
- "number",
- "boolean",
- "symbol",
- "bigint"
- ]
- },
- {
- "name": "subtype",
- "description": "Object subtype hint. Specified for `object` type values only.",
- "optional": true,
- "type": "string",
- "enum": [
- "array",
- "null",
- "node",
- "regexp",
- "date",
- "map",
- "set",
- "weakmap",
- "weakset",
- "iterator",
- "generator",
- "error"
- ]
- },
- {
- "name": "description",
- "description": "String representation of the object.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "overflow",
- "description": "True iff some of the properties or entries of the original object did not fit.",
- "type": "boolean"
- },
- {
- "name": "properties",
- "description": "List of the properties.",
- "type": "array",
- "items": {
- "$ref": "PropertyPreview"
- }
- },
- {
- "name": "entries",
- "description": "List of the entries. Specified for `map` and `set` subtype values only.",
- "optional": true,
- "type": "array",
- "items": {
- "$ref": "EntryPreview"
- }
- }
- ]
- },
- {
- "id": "PropertyPreview",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "name",
- "description": "Property name.",
- "type": "string"
- },
- {
- "name": "type",
- "description": "Object type. Accessor means that the property itself is an accessor property.",
- "type": "string",
- "enum": [
- "object",
- "function",
- "undefined",
- "string",
- "number",
- "boolean",
- "symbol",
- "accessor",
- "bigint"
- ]
- },
- {
- "name": "value",
- "description": "User-friendly property value string.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "valuePreview",
- "description": "Nested value preview.",
- "optional": true,
- "$ref": "ObjectPreview"
- },
- {
- "name": "subtype",
- "description": "Object subtype hint. Specified for `object` type values only.",
- "optional": true,
- "type": "string",
- "enum": [
- "array",
- "null",
- "node",
- "regexp",
- "date",
- "map",
- "set",
- "weakmap",
- "weakset",
- "iterator",
- "generator",
- "error"
- ]
- }
- ]
- },
- {
- "id": "EntryPreview",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "key",
- "description": "Preview of the key. Specified for map-like collection entries.",
- "optional": true,
- "$ref": "ObjectPreview"
- },
- {
- "name": "value",
- "description": "Preview of the value.",
- "$ref": "ObjectPreview"
- }
- ]
- },
- {
- "id": "PropertyDescriptor",
- "description": "Object property descriptor.",
- "type": "object",
- "properties": [
- {
- "name": "name",
- "description": "Property name or symbol description.",
- "type": "string"
- },
- {
- "name": "value",
- "description": "The value associated with the property.",
- "optional": true,
- "$ref": "RemoteObject"
- },
- {
- "name": "writable",
- "description": "True if the value associated with the property may be changed (data descriptors only).",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "get",
- "description": "A function which serves as a getter for the property, or `undefined` if there is no getter\n(accessor descriptors only).",
- "optional": true,
- "$ref": "RemoteObject"
- },
- {
- "name": "set",
- "description": "A function which serves as a setter for the property, or `undefined` if there is no setter\n(accessor descriptors only).",
- "optional": true,
- "$ref": "RemoteObject"
- },
- {
- "name": "configurable",
- "description": "True if the type of this property descriptor may be changed and if the property may be\ndeleted from the corresponding object.",
- "type": "boolean"
- },
- {
- "name": "enumerable",
- "description": "True if this property shows up during enumeration of the properties on the corresponding\nobject.",
- "type": "boolean"
- },
- {
- "name": "wasThrown",
- "description": "True if the result was thrown during the evaluation.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "isOwn",
- "description": "True if the property is owned for the object.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "symbol",
- "description": "Property symbol object, if the property is of the `symbol` type.",
- "optional": true,
- "$ref": "RemoteObject"
- }
- ]
- },
- {
- "id": "InternalPropertyDescriptor",
- "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
- "type": "object",
- "properties": [
- {
- "name": "name",
- "description": "Conventional property name.",
- "type": "string"
- },
- {
- "name": "value",
- "description": "The value associated with the property.",
- "optional": true,
- "$ref": "RemoteObject"
- }
- ]
- },
- {
- "id": "CallArgument",
- "description": "Represents function call argument. Either remote object id `objectId`, primitive `value`,\nunserializable primitive value or neither of (for undefined) them should be specified.",
- "type": "object",
- "properties": [
- {
- "name": "value",
- "description": "Primitive value or serializable javascript object.",
- "optional": true,
- "type": "any"
- },
- {
- "name": "unserializableValue",
- "description": "Primitive value which can not be JSON-stringified.",
- "optional": true,
- "$ref": "UnserializableValue"
- },
- {
- "name": "objectId",
- "description": "Remote object handle.",
- "optional": true,
- "$ref": "RemoteObjectId"
- }
- ]
- },
- {
- "id": "ExecutionContextId",
- "description": "Id of an execution context.",
- "type": "integer"
- },
- {
- "id": "ExecutionContextDescription",
- "description": "Description of an isolated world.",
- "type": "object",
- "properties": [
- {
- "name": "id",
- "description": "Unique id of the execution context. It can be used to specify in which execution context\nscript evaluation should be performed.",
- "$ref": "ExecutionContextId"
- },
- {
- "name": "origin",
- "description": "Execution context origin.",
- "type": "string"
- },
- {
- "name": "name",
- "description": "Human readable name describing given context.",
- "type": "string"
- },
- {
- "name": "auxData",
- "description": "Embedder-specific auxiliary data.",
- "optional": true,
- "type": "object"
- }
- ]
- },
- {
- "id": "ExceptionDetails",
- "description": "Detailed information about exception (or error) that was thrown during script compilation or\nexecution.",
- "type": "object",
- "properties": [
- {
- "name": "exceptionId",
- "description": "Exception id.",
- "type": "integer"
- },
- {
- "name": "text",
- "description": "Exception text, which should be used together with exception object when available.",
- "type": "string"
- },
- {
- "name": "lineNumber",
- "description": "Line number of the exception location (0-based).",
- "type": "integer"
- },
- {
- "name": "columnNumber",
- "description": "Column number of the exception location (0-based).",
- "type": "integer"
- },
- {
- "name": "scriptId",
- "description": "Script ID of the exception location.",
- "optional": true,
- "$ref": "ScriptId"
- },
- {
- "name": "url",
- "description": "URL of the exception location, to be used when the script was not reported.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "stackTrace",
- "description": "JavaScript stack trace if available.",
- "optional": true,
- "$ref": "StackTrace"
- },
- {
- "name": "exception",
- "description": "Exception object if available.",
- "optional": true,
- "$ref": "RemoteObject"
- },
- {
- "name": "executionContextId",
- "description": "Identifier of the context where exception happened.",
- "optional": true,
- "$ref": "ExecutionContextId"
- }
- ]
- },
- {
- "id": "Timestamp",
- "description": "Number of milliseconds since epoch.",
- "type": "number"
- },
- {
- "id": "TimeDelta",
- "description": "Number of milliseconds.",
- "type": "number"
- },
- {
- "id": "CallFrame",
- "description": "Stack entry for runtime errors and assertions.",
- "type": "object",
- "properties": [
- {
- "name": "functionName",
- "description": "JavaScript function name.",
- "type": "string"
- },
- {
- "name": "scriptId",
- "description": "JavaScript script id.",
- "$ref": "ScriptId"
- },
- {
- "name": "url",
- "description": "JavaScript script name or url.",
- "type": "string"
- },
- {
- "name": "lineNumber",
- "description": "JavaScript script line number (0-based).",
- "type": "integer"
- },
- {
- "name": "columnNumber",
- "description": "JavaScript script column number (0-based).",
- "type": "integer"
- }
- ]
- },
- {
- "id": "StackTrace",
- "description": "Call frames for assertions or error messages.",
- "type": "object",
- "properties": [
- {
- "name": "description",
- "description": "String label of this stack trace. For async traces this may be a name of the function that\ninitiated the async call.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "callFrames",
- "description": "JavaScript function name.",
- "type": "array",
- "items": {
- "$ref": "CallFrame"
- }
- },
- {
- "name": "parent",
- "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
- "optional": true,
- "$ref": "StackTrace"
- },
- {
- "name": "parentId",
- "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
- "experimental": true,
- "optional": true,
- "$ref": "StackTraceId"
- }
- ]
- },
- {
- "id": "UniqueDebuggerId",
- "description": "Unique identifier of current debugger.",
- "experimental": true,
- "type": "string"
- },
- {
- "id": "StackTraceId",
- "description": "If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This\nallows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.",
- "experimental": true,
- "type": "object",
- "properties": [
- {
- "name": "id",
- "type": "string"
- },
- {
- "name": "debuggerId",
- "optional": true,
- "$ref": "UniqueDebuggerId"
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "awaitPromise",
- "description": "Add handler to promise with given promise object id.",
- "parameters": [
- {
- "name": "promiseObjectId",
- "description": "Identifier of the promise.",
- "$ref": "RemoteObjectId"
- },
- {
- "name": "returnByValue",
- "description": "Whether the result is expected to be a JSON object that should be sent by value.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the result.",
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Promise result. Will contain rejected value if promise was rejected.",
- "$ref": "RemoteObject"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details if stack strace is available.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "callFunctionOn",
- "description": "Calls function with given declaration on the given object. Object group of the result is\ninherited from the target object.",
- "parameters": [
- {
- "name": "functionDeclaration",
- "description": "Declaration of the function to call.",
- "type": "string"
- },
- {
- "name": "objectId",
- "description": "Identifier of the object to call function on. Either objectId or executionContextId should\nbe specified.",
- "optional": true,
- "$ref": "RemoteObjectId"
- },
- {
- "name": "arguments",
- "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target\nobject.",
- "optional": true,
- "type": "array",
- "items": {
- "$ref": "CallArgument"
- }
- },
- {
- "name": "silent",
- "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "returnByValue",
- "description": "Whether the result is expected to be a JSON object which should be sent by value.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the result.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "userGesture",
- "description": "Whether execution should be treated as initiated by user in the UI.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "awaitPromise",
- "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "executionContextId",
- "description": "Specifies execution context which global object will be used to call function on. Either\nexecutionContextId or objectId should be specified.",
- "optional": true,
- "$ref": "ExecutionContextId"
- },
- {
- "name": "objectGroup",
- "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not\nspecified and objectId is, objectGroup will be inherited from object.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Call result.",
- "$ref": "RemoteObject"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "compileScript",
- "description": "Compiles expression.",
- "parameters": [
- {
- "name": "expression",
- "description": "Expression to compile.",
- "type": "string"
- },
- {
- "name": "sourceURL",
- "description": "Source url to be set for the script.",
- "type": "string"
- },
- {
- "name": "persistScript",
- "description": "Specifies whether the compiled script should be persisted.",
- "type": "boolean"
- },
- {
- "name": "executionContextId",
- "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
- "optional": true,
- "$ref": "ExecutionContextId"
- }
- ],
- "returns": [
- {
- "name": "scriptId",
- "description": "Id of the script.",
- "optional": true,
- "$ref": "ScriptId"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "disable",
- "description": "Disables reporting of execution contexts creation."
- },
- {
- "name": "discardConsoleEntries",
- "description": "Discards collected exceptions and console API calls."
- },
- {
- "name": "enable",
- "description": "Enables reporting of execution contexts creation by means of `executionContextCreated` event.\nWhen the reporting gets enabled the event will be sent immediately for each existing execution\ncontext."
- },
- {
- "name": "evaluate",
- "description": "Evaluates expression on global object.",
- "parameters": [
- {
- "name": "expression",
- "description": "Expression to evaluate.",
- "type": "string"
- },
- {
- "name": "objectGroup",
- "description": "Symbolic group name that can be used to release multiple objects.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "includeCommandLineAPI",
- "description": "Determines whether Command Line API should be available during the evaluation.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "silent",
- "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "contextId",
- "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
- "optional": true,
- "$ref": "ExecutionContextId"
- },
- {
- "name": "returnByValue",
- "description": "Whether the result is expected to be a JSON object that should be sent by value.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the result.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "userGesture",
- "description": "Whether execution should be treated as initiated by user in the UI.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "awaitPromise",
- "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "throwOnSideEffect",
- "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "timeout",
- "description": "Terminate execution after timing out (number of milliseconds).",
- "experimental": true,
- "optional": true,
- "$ref": "TimeDelta"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Evaluation result.",
- "$ref": "RemoteObject"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "getIsolateId",
- "description": "Returns the isolate id.",
- "experimental": true,
- "returns": [
- {
- "name": "id",
- "description": "The isolate id.",
- "type": "string"
- }
- ]
- },
- {
- "name": "getHeapUsage",
- "description": "Returns the JavaScript heap usage.\nIt is the total usage of the corresponding isolate not scoped to a particular Runtime.",
- "experimental": true,
- "returns": [
- {
- "name": "usedSize",
- "description": "Used heap size in bytes.",
- "type": "number"
- },
- {
- "name": "totalSize",
- "description": "Allocated heap size in bytes.",
- "type": "number"
- }
- ]
- },
- {
- "name": "getProperties",
- "description": "Returns properties of a given object. Object group of the result is inherited from the target\nobject.",
- "parameters": [
- {
- "name": "objectId",
- "description": "Identifier of the object to return properties for.",
- "$ref": "RemoteObjectId"
- },
- {
- "name": "ownProperties",
- "description": "If true, returns properties belonging only to the element itself, not to its prototype\nchain.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "accessorPropertiesOnly",
- "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not\nreturned either.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the results.",
- "experimental": true,
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Object properties.",
- "type": "array",
- "items": {
- "$ref": "PropertyDescriptor"
- }
- },
- {
- "name": "internalProperties",
- "description": "Internal object properties (only of the element itself).",
- "optional": true,
- "type": "array",
- "items": {
- "$ref": "InternalPropertyDescriptor"
- }
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "globalLexicalScopeNames",
- "description": "Returns all let, const and class variables from global scope.",
- "parameters": [
- {
- "name": "executionContextId",
- "description": "Specifies in which execution context to lookup global scope variables.",
- "optional": true,
- "$ref": "ExecutionContextId"
- }
- ],
- "returns": [
- {
- "name": "names",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ]
- },
- {
- "name": "queryObjects",
- "parameters": [
- {
- "name": "prototypeObjectId",
- "description": "Identifier of the prototype to return objects for.",
- "$ref": "RemoteObjectId"
- },
- {
- "name": "objectGroup",
- "description": "Symbolic group name that can be used to release the results.",
- "optional": true,
- "type": "string"
- }
- ],
- "returns": [
- {
- "name": "objects",
- "description": "Array with objects.",
- "$ref": "RemoteObject"
- }
- ]
- },
- {
- "name": "releaseObject",
- "description": "Releases remote object with given id.",
- "parameters": [
- {
- "name": "objectId",
- "description": "Identifier of the object to release.",
- "$ref": "RemoteObjectId"
- }
- ]
- },
- {
- "name": "releaseObjectGroup",
- "description": "Releases all remote objects that belong to a given group.",
- "parameters": [
- {
- "name": "objectGroup",
- "description": "Symbolic object group name.",
- "type": "string"
- }
- ]
- },
- {
- "name": "runIfWaitingForDebugger",
- "description": "Tells inspected instance to run if it was waiting for debugger to attach."
- },
- {
- "name": "runScript",
- "description": "Runs script with given id in a given context.",
- "parameters": [
- {
- "name": "scriptId",
- "description": "Id of the script to run.",
- "$ref": "ScriptId"
- },
- {
- "name": "executionContextId",
- "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
- "optional": true,
- "$ref": "ExecutionContextId"
- },
- {
- "name": "objectGroup",
- "description": "Symbolic group name that can be used to release multiple objects.",
- "optional": true,
- "type": "string"
- },
- {
- "name": "silent",
- "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "includeCommandLineAPI",
- "description": "Determines whether Command Line API should be available during the evaluation.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "returnByValue",
- "description": "Whether the result is expected to be a JSON object which should be sent by value.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "generatePreview",
- "description": "Whether preview should be generated for the result.",
- "optional": true,
- "type": "boolean"
- },
- {
- "name": "awaitPromise",
- "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
- "optional": true,
- "type": "boolean"
- }
- ],
- "returns": [
- {
- "name": "result",
- "description": "Run result.",
- "$ref": "RemoteObject"
- },
- {
- "name": "exceptionDetails",
- "description": "Exception details.",
- "optional": true,
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "setAsyncCallStackDepth",
- "description": "Enables or disables async call stacks tracking.",
- "redirect": "Debugger",
- "parameters": [
- {
- "name": "maxDepth",
- "description": "Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async\ncall stacks (default).",
- "type": "integer"
- }
- ]
- },
- {
- "name": "setCustomObjectFormatterEnabled",
- "experimental": true,
- "parameters": [
- {
- "name": "enabled",
- "type": "boolean"
- }
- ]
- },
- {
- "name": "setMaxCallStackSizeToCapture",
- "experimental": true,
- "parameters": [
- {
- "name": "size",
- "type": "integer"
- }
- ]
- },
- {
- "name": "terminateExecution",
- "description": "Terminate current or next JavaScript execution.\nWill cancel the termination when the outer-most script execution ends.",
- "experimental": true
- },
- {
- "name": "addBinding",
- "description": "If executionContextId is empty, adds binding with the given name on the\nglobal objects of all inspected contexts, including those created later,\nbindings survive reloads.\nIf executionContextId is specified, adds binding only on global object of\ngiven execution context.\nBinding function takes exactly one argument, this argument should be string,\nin case of any other input, function throws an exception.\nEach binding function call produces Runtime.bindingCalled notification.",
- "experimental": true,
- "parameters": [
- {
- "name": "name",
- "type": "string"
- },
- {
- "name": "executionContextId",
- "optional": true,
- "$ref": "ExecutionContextId"
- }
- ]
- },
- {
- "name": "removeBinding",
- "description": "This method does not remove binding function from global object but\nunsubscribes current runtime agent from Runtime.bindingCalled notifications.",
- "experimental": true,
- "parameters": [
- {
- "name": "name",
- "type": "string"
- }
- ]
- }
- ],
- "events": [
- {
- "name": "bindingCalled",
- "description": "Notification is issued every time when binding is called.",
- "experimental": true,
- "parameters": [
- {
- "name": "name",
- "type": "string"
- },
- {
- "name": "payload",
- "type": "string"
- },
- {
- "name": "executionContextId",
- "description": "Identifier of the context where the call was made.",
- "$ref": "ExecutionContextId"
- }
- ]
- },
- {
- "name": "consoleAPICalled",
- "description": "Issued when console API was called.",
- "parameters": [
- {
- "name": "type",
- "description": "Type of the call.",
- "type": "string",
- "enum": [
- "log",
- "debug",
- "info",
- "error",
- "warning",
- "dir",
- "dirxml",
- "table",
- "trace",
- "clear",
- "startGroup",
- "startGroupCollapsed",
- "endGroup",
- "assert",
- "profile",
- "profileEnd",
- "count",
- "timeEnd"
- ]
- },
- {
- "name": "args",
- "description": "Call arguments.",
- "type": "array",
- "items": {
- "$ref": "RemoteObject"
- }
- },
- {
- "name": "executionContextId",
- "description": "Identifier of the context where the call was made.",
- "$ref": "ExecutionContextId"
- },
- {
- "name": "timestamp",
- "description": "Call timestamp.",
- "$ref": "Timestamp"
- },
- {
- "name": "stackTrace",
- "description": "Stack trace captured when the call was made.",
- "optional": true,
- "$ref": "StackTrace"
- },
- {
- "name": "context",
- "description": "Console context descriptor for calls on non-default console context (not console.*):\n'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call\non named context.",
- "experimental": true,
- "optional": true,
- "type": "string"
- }
- ]
- },
- {
- "name": "exceptionRevoked",
- "description": "Issued when unhandled exception was revoked.",
- "parameters": [
- {
- "name": "reason",
- "description": "Reason describing why exception was revoked.",
- "type": "string"
- },
- {
- "name": "exceptionId",
- "description": "The id of revoked exception, as reported in `exceptionThrown`.",
- "type": "integer"
- }
- ]
- },
- {
- "name": "exceptionThrown",
- "description": "Issued when exception was thrown and unhandled.",
- "parameters": [
- {
- "name": "timestamp",
- "description": "Timestamp of the exception.",
- "$ref": "Timestamp"
- },
- {
- "name": "exceptionDetails",
- "$ref": "ExceptionDetails"
- }
- ]
- },
- {
- "name": "executionContextCreated",
- "description": "Issued when new execution context is created.",
- "parameters": [
- {
- "name": "context",
- "description": "A newly created execution context.",
- "$ref": "ExecutionContextDescription"
- }
- ]
- },
- {
- "name": "executionContextDestroyed",
- "description": "Issued when execution context is destroyed.",
- "parameters": [
- {
- "name": "executionContextId",
- "description": "Id of the destroyed context",
- "$ref": "ExecutionContextId"
- }
- ]
- },
- {
- "name": "executionContextsCleared",
- "description": "Issued when all executionContexts were cleared in browser"
- },
- {
- "name": "inspectRequested",
- "description": "Issued when object should be inspected (for example, as a result of inspect() command line API\ncall).",
- "parameters": [
- {
- "name": "object",
- "$ref": "RemoteObject"
- },
- {
- "name": "hints",
- "type": "object"
- }
- ]
- }
- ]
- },
- {
- "domain": "Schema",
- "description": "This domain is deprecated.",
- "deprecated": true,
- "types": [
- {
- "id": "Domain",
- "description": "Description of the protocol domain.",
- "type": "object",
- "properties": [
- {
- "name": "name",
- "description": "Domain name.",
- "type": "string"
- },
- {
- "name": "version",
- "description": "Domain version.",
- "type": "string"
- }
- ]
- }
- ],
- "commands": [
- {
- "name": "getDomains",
- "description": "Returns supported domains.",
- "returns": [
- {
- "name": "domains",
- "description": "List of supported domains.",
- "type": "array",
- "items": {
- "$ref": "Domain"
- }
- }
- ]
- }
- ]
- }
- ]
-} \ No newline at end of file
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 30219a062d..303987dede 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -12,6 +12,7 @@
#include <string>
#include "src/base/platform/platform.h"
+#include "src/base/v8-fallthrough.h"
#include "src/conversions.h"
namespace v8_inspector {
@@ -550,35 +551,33 @@ std::string String16::utf8() const {
// have a good chance of being able to write the string into the
// buffer without reallocing (say, 1.5 x length).
if (length > std::numeric_limits<unsigned>::max() / 3) return std::string();
- std::vector<char> bufferVector(length * 3);
- char* buffer = bufferVector.data();
- const UChar* characters = m_impl.data();
- ConversionResult result =
- convertUTF16ToUTF8(&characters, characters + length, &buffer,
- buffer + bufferVector.size(), false);
- DCHECK(
- result !=
- targetExhausted); // (length * 3) should be sufficient for any conversion
-
- // Only produced from strict conversion.
- DCHECK(result != sourceIllegal);
-
- // Check for an unconverted high surrogate.
- if (result == sourceExhausted) {
- // This should be one unpaired high surrogate. Treat it the same
- // was as an unpaired high surrogate would have been handled in
- // the middle of a string with non-strict conversion - which is
- // to say, simply encode it to UTF-8.
- DCHECK((characters + 1) == (m_impl.data() + length));
- DCHECK((*characters >= 0xD800) && (*characters <= 0xDBFF));
- // There should be room left, since one UChar hasn't been
- // converted.
- DCHECK((buffer + 3) <= (buffer + bufferVector.size()));
- putUTF8Triple(buffer, *characters);
+ std::string output(length * 3, '\0');
+ const UChar* characters = m_impl.data();
+ const UChar* characters_end = characters + length;
+ char* buffer = &*output.begin();
+ char* buffer_end = &*output.end();
+ while (characters < characters_end) {
+ // Use strict conversion to detect unpaired surrogates.
+ ConversionResult result = convertUTF16ToUTF8(
+ &characters, characters_end, &buffer, buffer_end, /* strict= */ true);
+ DCHECK_NE(result, targetExhausted);
+ // Conversion fails when there is an unpaired surrogate. Put
+ // replacement character (U+FFFD) instead of the unpaired
+ // surrogate.
+ if (result != conversionOK) {
+ DCHECK_LE(0xD800, *characters);
+ DCHECK_LE(*characters, 0xDFFF);
+ // There should be room left, since one UChar hasn't been
+ // converted.
+ DCHECK_LE(buffer + 3, buffer_end);
+ putUTF8Triple(buffer, replacementCharacter);
+ ++characters;
+ }
}
- return std::string(bufferVector.data(), buffer - bufferVector.data());
+ output.resize(buffer - output.data());
+ return output;
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 2992f08530..4dfe8ad352 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -4,6 +4,8 @@
#include "src/inspector/string-util.h"
+#include <cmath>
+
#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/inspector/protocol/Protocol.h"
@@ -123,6 +125,26 @@ std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
}
// static
+std::unique_ptr<protocol::Value> StringUtil::parseProtocolMessage(
+ const ProtocolMessage& message) {
+ return parseJSON(message.json);
+}
+
+// static
+ProtocolMessage StringUtil::jsonToMessage(String message) {
+ ProtocolMessage result;
+ result.json = std::move(message);
+ return result;
+}
+
+// static
+ProtocolMessage StringUtil::binaryToMessage(std::vector<uint8_t> message) {
+ ProtocolMessage result;
+ result.binary = std::move(message);
+ return result;
+}
+
+// static
void StringUtil::builderAppendQuotedString(StringBuilder& builder,
const String& str) {
builder.append('"');
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 4ab39bd6d1..a9ce4ff424 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -22,6 +22,10 @@ class Value;
using String = v8_inspector::String16;
using StringBuilder = v8_inspector::String16Builder;
+struct ProtocolMessage {
+ String json;
+ std::vector<uint8_t> binary;
+};
class StringUtil {
public:
@@ -59,6 +63,25 @@ class StringUtil {
}
static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
+ static std::unique_ptr<protocol::Value> parseProtocolMessage(
+ const ProtocolMessage&);
+ static ProtocolMessage jsonToMessage(String message);
+ static ProtocolMessage binaryToMessage(std::vector<uint8_t> message);
+
+ static String fromUTF8(const uint8_t* data, size_t length) {
+ return String16::fromUTF8(reinterpret_cast<const char*>(data), length);
+ }
+
+ static String fromUTF16(const uint16_t* data, size_t length) {
+ return String16(data, length);
+ }
+
+ static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
+ static const uint8_t* CharactersUTF8(const String& s) { return nullptr; }
+ static const uint16_t* CharactersUTF16(const String& s) {
+ return s.characters16();
+ }
+ static size_t CharacterCount(const String& s) { return s.length(); }
};
// A read-only sequence of uninterpreted bytes with reference-counted storage.
@@ -73,6 +96,7 @@ class Binary {
static Binary fromBase64(const String& base64, bool* success) {
UNIMPLEMENTED();
}
+ static Binary fromSpan(const uint8_t* data, size_t size) { UNIMPLEMENTED(); }
};
} // namespace protocol
@@ -101,6 +125,19 @@ class StringBufferImpl : public StringBuffer {
DISALLOW_COPY_AND_ASSIGN(StringBufferImpl);
};
+class BinaryStringBuffer : public StringBuffer {
+ public:
+ explicit BinaryStringBuffer(std::vector<uint8_t> data)
+ : m_data(std::move(data)), m_string(m_data.data(), m_data.size()) {}
+ const StringView& string() override { return m_string; }
+
+ private:
+ std::vector<uint8_t> m_data;
+ StringView m_string;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryStringBuffer);
+};
+
String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId);
String16 stackTraceIdToString(uintptr_t id);
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 8b87bf4a50..944d18a138 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -704,6 +704,7 @@ Response V8DebuggerAgentImpl::continueToLocation(
InspectedContext* inspected = m_inspector->getContext(contextId);
if (!inspected)
return Response::Error("Cannot continue to specified location");
+ v8::HandleScope handleScope(m_isolate);
v8::Context::Scope contextScope(inspected->context());
return m_debugger->continueToLocation(
m_session->contextGroupId(), script, std::move(location),
@@ -1124,6 +1125,7 @@ Response V8DebuggerAgentImpl::setReturnValue(
std::unique_ptr<protocol::Runtime::CallArgument> protocolNewValue) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ v8::HandleScope handleScope(m_isolate);
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
if (iterator->Done()) {
return Response::Error("Could not find top call frame");
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 1539446452..ccc5676058 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -249,6 +249,7 @@ void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
bool onlyAtReturn) {
+ v8::HandleScope handleScope(m_isolate);
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
DCHECK(!iterator->Done());
bool atReturn = !iterator->GetReturnValue().IsEmpty();
@@ -1014,6 +1015,7 @@ std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
int V8Debugger::currentContextGroupId() {
if (!m_isolate->InContext()) return 0;
+ v8::HandleScope handleScope(m_isolate);
return m_inspector->contextGroupId(m_isolate->GetCurrentContext());
}
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index db05b24102..c4c4cc14a1 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -76,6 +76,8 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
m_state = protocol::DictionaryValue::create();
}
+ m_state->getBoolean("use_binary_protocol", &use_binary_protocol_);
+
m_runtimeAgent.reset(new V8RuntimeAgentImpl(
this, this, agentState(protocol::Runtime::Metainfo::domainName)));
protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
@@ -137,41 +139,53 @@ namespace {
class MessageBuffer : public StringBuffer {
public:
static std::unique_ptr<MessageBuffer> create(
- std::unique_ptr<protocol::Serializable> message) {
+ std::unique_ptr<protocol::Serializable> message, bool binary) {
return std::unique_ptr<MessageBuffer>(
- new MessageBuffer(std::move(message)));
+ new MessageBuffer(std::move(message), binary));
}
const StringView& string() override {
if (!m_serialized) {
- m_serialized = StringBuffer::create(toStringView(m_message->serialize()));
+ if (m_binary) {
+ // Encode binary response as an 8bit string buffer.
+ m_serialized.reset(
+ new BinaryStringBuffer(m_message->serializeToBinary()));
+ } else {
+ m_serialized =
+ StringBuffer::create(toStringView(m_message->serializeToJSON()));
+ }
m_message.reset(nullptr);
}
return m_serialized->string();
}
private:
- explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message)
- : m_message(std::move(message)) {}
+ explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message,
+ bool binary)
+ : m_message(std::move(message)), m_binary(binary) {}
std::unique_ptr<protocol::Serializable> m_message;
std::unique_ptr<StringBuffer> m_serialized;
+ bool m_binary;
};
} // namespace
void V8InspectorSessionImpl::sendProtocolResponse(
int callId, std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendResponse(callId, MessageBuffer::create(std::move(message)));
+ m_channel->sendResponse(
+ callId, MessageBuffer::create(std::move(message), use_binary_protocol_));
}
void V8InspectorSessionImpl::sendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendNotification(MessageBuffer::create(std::move(message)));
+ m_channel->sendNotification(
+ MessageBuffer::create(std::move(message), use_binary_protocol_));
}
-void V8InspectorSessionImpl::fallThrough(int callId, const String16& method,
- const String16& message) {
+void V8InspectorSessionImpl::fallThrough(
+ int callId, const String16& method,
+ const protocol::ProtocolMessage& message) {
// There's no other layer to handle the command.
UNREACHABLE();
}
@@ -316,19 +330,32 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
+ bool binary_protocol =
+ message.is8Bit() && message.length() && message.characters8()[0] == 0xD8;
+ if (binary_protocol) {
+ use_binary_protocol_ = true;
+ m_state->setBoolean("use_binary_protocol", true);
+ }
+
int callId;
+ std::unique_ptr<protocol::Value> parsed_message;
+ if (binary_protocol) {
+ parsed_message = protocol::Value::parseBinary(
+ message.characters8(), static_cast<unsigned>(message.length()));
+ } else {
+ parsed_message = protocol::StringUtil::parseJSON(message);
+ }
String16 method;
- std::unique_ptr<protocol::Value> parsedMessage =
- protocol::StringUtil::parseJSON(message);
- if (m_dispatcher.parseCommand(parsedMessage.get(), &callId, &method)) {
+ if (m_dispatcher.parseCommand(parsed_message.get(), &callId, &method)) {
// Pass empty string instead of the actual message to save on a conversion.
// We're allowed to do so because fall-through is not implemented.
- m_dispatcher.dispatch(callId, method, std::move(parsedMessage), "");
+ m_dispatcher.dispatch(callId, method, std::move(parsed_message),
+ protocol::ProtocolMessage());
}
}
std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
- String16 json = m_state->serialize();
+ String16 json = m_state->toJSONString();
return StringBufferImpl::adopt(json);
}
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 461cc0a2f0..8834b56f5d 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -102,7 +102,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
void sendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) override;
void fallThrough(int callId, const String16& method,
- const String16& message) override;
+ const protocol::ProtocolMessage& message) override;
void flushProtocolNotifications() override;
int m_contextGroupId;
@@ -122,6 +122,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
std::unique_ptr<V8SchemaAgentImpl> m_schemaAgent;
std::vector<std::unique_ptr<V8InspectorSession::Inspectable>>
m_inspectedObjects;
+ bool use_binary_protocol_ = false;
DISALLOW_COPY_AND_ASSIGN(V8InspectorSessionImpl);
};
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 1e16218e1c..b825397b4d 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -8,7 +8,6 @@
#include "src/base/atomicops.h"
#include "src/debug/debug-interface.h"
-#include "src/flags.h" // TODO(jgruber): Remove include and DEPS entry.
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
@@ -305,9 +304,10 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount,
// each function recompiled after the BlockCount mode has been set); and
// function-granularity coverage data otherwise.
typedef v8::debug::Coverage C;
- C::Mode mode = callCountValue
- ? (detailedValue ? C::kBlockCount : C::kPreciseCount)
- : (detailedValue ? C::kBlockBinary : C::kPreciseBinary);
+ typedef v8::debug::CoverageMode Mode;
+ Mode mode = callCountValue
+ ? (detailedValue ? Mode::kBlockCount : Mode::kPreciseCount)
+ : (detailedValue ? Mode::kBlockBinary : Mode::kPreciseBinary);
C::SelectMode(m_isolate, mode);
return Response::OK();
}
@@ -317,7 +317,8 @@ Response V8ProfilerAgentImpl::stopPreciseCoverage() {
m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageCallCount, false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageDetailed, false);
- v8::debug::Coverage::SelectMode(m_isolate, v8::debug::Coverage::kBestEffort);
+ v8::debug::Coverage::SelectMode(m_isolate,
+ v8::debug::CoverageMode::kBestEffort);
return Response::OK();
}
@@ -462,13 +463,14 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
Response V8ProfilerAgentImpl::startTypeProfile() {
m_state->setBoolean(ProfilerAgentState::typeProfileStarted, true);
v8::debug::TypeProfile::SelectMode(m_isolate,
- v8::debug::TypeProfile::kCollect);
+ v8::debug::TypeProfileMode::kCollect);
return Response::OK();
}
Response V8ProfilerAgentImpl::stopTypeProfile() {
m_state->setBoolean(ProfilerAgentState::typeProfileStarted, false);
- v8::debug::TypeProfile::SelectMode(m_isolate, v8::debug::TypeProfile::kNone);
+ v8::debug::TypeProfile::SelectMode(m_isolate,
+ v8::debug::TypeProfileMode::kNone);
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index cedb637399..97e0267d3a 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -55,7 +55,7 @@ static const char customObjectFormatterEnabled[] =
"customObjectFormatterEnabled";
static const char runtimeEnabled[] = "runtimeEnabled";
static const char bindings[] = "bindings";
-};
+} // namespace V8RuntimeAgentImplState
using protocol::Runtime::RemoteObject;
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index aac6481828..8feb97dd54 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -56,10 +56,14 @@ Response toProtocolValue(v8::Local<v8::Context> context,
}
if (value->IsNumber()) {
double doubleValue = value.As<v8::Number>()->Value();
- int intValue = static_cast<int>(doubleValue);
- if (intValue == doubleValue) {
- *result = protocol::FundamentalValue::create(intValue);
- return Response::OK();
+ if (doubleValue >= std::numeric_limits<int>::min() &&
+ doubleValue <= std::numeric_limits<int>::max() &&
+ bit_cast<int64_t>(doubleValue) != bit_cast<int64_t>(-0.0)) {
+ int intValue = static_cast<int>(doubleValue);
+ if (intValue == doubleValue) {
+ *result = protocol::FundamentalValue::create(intValue);
+ return Response::OK();
+ }
}
*result = protocol::FundamentalValue::create(doubleValue);
return Response::OK();
@@ -805,6 +809,63 @@ void getInternalPropertiesForPreview(
}
}
+void getPrivateFieldsForPreview(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, int* nameLimit,
+ bool* overflow,
+ protocol::Array<PropertyPreview>* properties) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Array> privateFields;
+
+ if (!v8::debug::GetPrivateFields(context, object).ToLocal(&privateFields)) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < privateFields->Length(); i += 2) {
+ v8::Local<v8::Data> name;
+ if (!privateFields->Get(context, i).ToLocal(&name)) {
+ tryCatch.Reset();
+ continue;
+ }
+
+ // Weirdly, v8::Private is set to be a subclass of v8::Data and
+ // not v8::Value, meaning, we first need to upcast to v8::Data
+ // and then downcast to v8::Private. Changing the hierarchy is a
+ // breaking change now. Not sure if that's possible.
+ //
+ // TODO(gsathya): Add an IsPrivate method to the v8::Private and
+ // assert here.
+ v8::Local<v8::Private> private_field = v8::Local<v8::Private>::Cast(name);
+ v8::Local<v8::Value> private_name = private_field->Name();
+ CHECK(!private_name->IsUndefined());
+
+ v8::Local<v8::Value> value;
+ if (!privateFields->Get(context, i + 1).ToLocal(&value)) {
+ tryCatch.Reset();
+ continue;
+ }
+
+ auto wrapper = ValueMirror::create(context, value);
+ if (wrapper) {
+ std::unique_ptr<PropertyPreview> propertyPreview;
+ wrapper->buildPropertyPreview(
+ context,
+ toProtocolStringWithTypeCheck(context->GetIsolate(), private_name),
+ &propertyPreview);
+ if (propertyPreview) {
+ if (!*nameLimit) {
+ *overflow = true;
+ return;
+ }
+ --*nameLimit;
+ properties->addItem(std::move(propertyPreview));
+ }
+ }
+ }
+}
+
class ObjectMirror final : public ValueMirror {
public:
ObjectMirror(v8::Local<v8::Value> value, const String16& description)
@@ -895,6 +956,7 @@ class ObjectMirror final : public ValueMirror {
v8::Local<v8::Value> value = m_value;
while (value->IsProxy()) value = value.As<v8::Proxy>()->GetTarget();
+
if (value->IsObject() && !value->IsProxy()) {
v8::Local<v8::Object> objectForPreview = value.As<v8::Object>();
std::vector<InternalPropertyMirror> internalProperties;
@@ -909,6 +971,9 @@ class ObjectMirror final : public ValueMirror {
}
}
+ getPrivateFieldsForPreview(context, objectForPreview, nameLimit,
+ &overflow, properties.get());
+
std::vector<PropertyMirror> mirrors;
if (getPropertiesForPreview(context, objectForPreview, nameLimit,
indexLimit, &overflow, &mirrors)) {
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 118b2de8ed..8fa7a244ab 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -31,6 +31,7 @@ namespace internal {
V(BigIntToWasmI64) \
V(BinaryOp) \
V(CallForwardVarargs) \
+ V(CallFunctionTemplate) \
V(CallTrampoline) \
V(CallVarargs) \
V(CallWithArrayLike) \
@@ -793,6 +794,14 @@ class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor, CallInterfaceDescriptor)
};
+class CallFunctionTemplateDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kFunctionTemplateInfo, kArgumentsCount)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunctionTemplateInfo
+ MachineType::IntPtr()) // kArgumentsCount
+ DECLARE_DESCRIPTOR(CallFunctionTemplateDescriptor, CallInterfaceDescriptor)
+};
+
class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread)
@@ -847,7 +856,7 @@ class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
// TODO(jgruber): Remove the unused allocation site parameter.
DEFINE_JS_PARAMETERS(kAllocationSite)
- DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged());
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
// TODO(ishell): Use DECLARE_JS_COMPATIBLE_DESCRIPTOR if registers match
DECLARE_DESCRIPTOR(ConstructStubDescriptor, CallInterfaceDescriptor)
@@ -870,7 +879,7 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
- DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged());
+ DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor,
CallInterfaceDescriptor, 1)
@@ -993,16 +1002,12 @@ class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS_NO_CONTEXT(kTargetContext, // register argument
- kApiFunctionAddress, // register argument
- kArgc, // register argument
- kCallData, // stack argument 1
- kHolder) // stack argument 2
- // receiver is implicit stack argument 3
- // argv are implicit stack arguments [4, 4 + kArgc[
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTargetContext
- MachineType::Pointer(), // kApiFunctionAddress
- MachineType::IntPtr(), // kArgc
+ DEFINE_PARAMETERS(kApiFunctionAddress, kActualArgumentsCount, kCallData,
+ kHolder)
+ // receiver is implicit stack argument 1
+ // argv are implicit stack arguments [2, 2 + kArgc[
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kApiFunctionAddress
+ MachineType::IntPtr(), // kActualArgumentsCount
MachineType::AnyTagged(), // kCallData
MachineType::AnyTagged()) // kHolder
DECLARE_DESCRIPTOR(ApiCallbackDescriptor, CallInterfaceDescriptor)
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 2183068576..d0a30349ca 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -47,7 +47,6 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
bytecode_generated_(false),
constant_array_builder_(zone),
handler_table_builder_(zone),
- return_seen_in_block_(false),
parameter_count_(parameter_count),
local_register_count_(locals_count),
register_allocator_(fixed_register_count()),
@@ -82,7 +81,7 @@ Register BytecodeArrayBuilder::Local(int index) const {
}
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
- DCHECK(return_seen_in_block_);
+ DCHECK(RemainderOfBlockIsDead());
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
@@ -146,6 +145,12 @@ void BytecodeArrayBuilder::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
bytecode_array_writer_.WriteJump(node, label);
}
+void BytecodeArrayBuilder::WriteJumpLoop(BytecodeNode* node,
+ BytecodeLoopHeader* loop_header) {
+ AttachOrEmitDeferredSourceInfo(node);
+ bytecode_array_writer_.WriteJumpLoop(node, loop_header);
+}
+
void BytecodeArrayBuilder::WriteSwitch(BytecodeNode* node,
BytecodeJumpTable* jump_table) {
AttachOrEmitDeferredSourceInfo(node);
@@ -330,21 +335,25 @@ class BytecodeNodeBuilder {
template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
Operands... operands) { \
- DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ DCHECK(Bytecodes::IsForwardJump(Bytecode::k##name)); \
BytecodeNode node(Create##name##Node(operands...)); \
WriteJump(&node, label); \
- LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
+void BytecodeArrayBuilder::OutputJumpLoop(BytecodeLoopHeader* loop_header,
+ int loop_depth) {
+ BytecodeNode node(CreateJumpLoopNode(0, loop_depth));
+ WriteJumpLoop(&node, loop_header);
+}
+
void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback(
BytecodeJumpTable* jump_table) {
BytecodeNode node(CreateSwitchOnSmiNoFeedbackNode(
jump_table->constant_pool_index(), jump_table->size(),
jump_table->case_value_base()));
WriteSwitch(&node, jump_table);
- LeaveBasicBlock();
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
@@ -506,17 +515,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
case Token::Value::INSTANCEOF:
OutputTestInstanceOf(reg, feedback_slot);
break;
- default:
- UNREACHABLE();
- }
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
- Register reg) {
- switch (op) {
case Token::Value::IN:
- OutputTestIn(reg);
+ OutputTestIn(reg, feedback_slot);
break;
default:
UNREACHABLE();
@@ -1053,18 +1053,23 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumeric(int feedback_slot) {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+ // Don't generate code for a label which hasn't had a corresponding forward
+ // jump generated already. For backwards jumps, use BindLoopHeader.
+ if (!label->has_referrer_jump()) return *this;
+
// Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label.
if (register_optimizer_) register_optimizer_->Flush();
bytecode_array_writer_.BindLabel(label);
- LeaveBasicBlock();
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
- BytecodeLabel* label) {
- bytecode_array_writer_.BindLabel(target, label);
- LeaveBasicBlock();
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(
+ BytecodeLoopHeader* loop_header) {
+ // Flush the register optimizer when starting a loop to ensure all expected
+ // registers are valid when jumping to the loop header.
+ if (register_optimizer_) register_optimizer_->Flush();
+ bytecode_array_writer_.BindLoopHeader(loop_header);
return *this;
}
@@ -1074,7 +1079,33 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table,
// all expected registers are valid when jumping to this location.
if (register_optimizer_) register_optimizer_->Flush();
bytecode_array_writer_.BindJumpTableEntry(jump_table, case_value);
- LeaveBasicBlock();
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
+ int handler_id, HandlerTable::CatchPrediction catch_prediction) {
+ // The handler starts a new basic block, and any reasonable try block won't
+ // let control fall through into it.
+ DCHECK_IMPLIES(register_optimizer_,
+ register_optimizer_->EnsureAllRegistersAreFlushed());
+ bytecode_array_writer_.BindHandlerTarget(handler_table_builder(), handler_id);
+ handler_table_builder()->SetPrediction(handler_id, catch_prediction);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
+ Register context) {
+ // Flush registers to make sure everything visible to the handler is
+ // materialized.
+ if (register_optimizer_) register_optimizer_->Flush();
+ bytecode_array_writer_.BindTryRegionStart(handler_table_builder(),
+ handler_id);
+ handler_table_builder()->SetContextRegister(handler_id, context);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
+ bytecode_array_writer_.BindTryRegionEnd(handler_table_builder(), handler_id);
return *this;
}
@@ -1178,10 +1209,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
- int loop_depth) {
- DCHECK(label->is_bound());
- OutputJumpLoop(label, 0, loop_depth);
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(
+ BytecodeLoopHeader* loop_header, int loop_depth) {
+ OutputJumpLoop(loop_header, loop_depth);
return *this;
}
@@ -1233,7 +1263,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(AbortReason reason) {
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
OutputReturn();
- return_seen_in_block_ = true;
return *this;
}
@@ -1321,7 +1350,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnGeneratorState(
BytecodeNode node(CreateSwitchOnGeneratorStateNode(
generator, jump_table->constant_pool_index(), jump_table->size()));
WriteSwitch(&node, jump_table);
- LeaveBasicBlock();
return *this;
}
@@ -1331,33 +1359,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
- int handler_id, HandlerTable::CatchPrediction catch_prediction) {
- BytecodeLabel handler;
- Bind(&handler);
- handler_table_builder()->SetHandlerTarget(handler_id, handler.offset());
- handler_table_builder()->SetPrediction(handler_id, catch_prediction);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
- Register context) {
- // TODO(leszeks): Do we need to start a new basic block here? Could we simply
- // get the current bytecode offset from the array writer instead?
- BytecodeLabel try_begin;
- Bind(&try_begin);
- handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
- handler_table_builder()->SetContextRegister(handler_id, context);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
- BytecodeLabel try_end;
- Bind(&try_end);
- handler_table_builder()->SetTryRegionEnd(handler_id, try_end.offset());
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CallProperty(Register callable,
RegisterList args,
int feedback_slot) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index d362ffffa4..93d108f7be 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -27,6 +27,7 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
+class BytecodeLoopHeader;
class BytecodeNode;
class BytecodeRegisterOptimizer;
class BytecodeJumpTable;
@@ -379,7 +380,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
int feedback_slot);
- BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
BytecodeArrayBuilder& CompareReference(Register reg);
BytecodeArrayBuilder& CompareUndetectable();
BytecodeArrayBuilder& CompareUndefined();
@@ -397,13 +397,20 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& ToNumber(int feedback_slot);
BytecodeArrayBuilder& ToNumeric(int feedback_slot);
+ // Exception handling.
+ BytecodeArrayBuilder& MarkHandler(int handler_id,
+ HandlerTable::CatchPrediction will_catch);
+ BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
+ BytecodeArrayBuilder& MarkTryEnd(int handler_id);
+
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
- BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+ BytecodeArrayBuilder& Bind(BytecodeLoopHeader* label);
BytecodeArrayBuilder& Bind(BytecodeJumpTable* jump_table, int case_value);
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
- BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
+ BytecodeArrayBuilder& JumpLoop(BytecodeLoopHeader* loop_header,
+ int loop_depth);
BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label);
@@ -458,12 +465,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& ResumeGenerator(Register generator,
RegisterList registers);
- // Exception handling.
- BytecodeArrayBuilder& MarkHandler(int handler_id,
- HandlerTable::CatchPrediction will_catch);
- BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
- BytecodeArrayBuilder& MarkTryEnd(int handler_id);
-
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
@@ -519,7 +520,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
}
}
- bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
bool RemainderOfBlockIsDead() const {
return bytecode_array_writer_.RemainderOfBlockIsDead();
}
@@ -568,6 +568,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO
+ V8_INLINE void OutputJumpLoop(BytecodeLoopHeader* loop_header,
+ int loop_depth);
V8_INLINE void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
bool RegisterIsValid(Register reg) const;
@@ -583,6 +585,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Write bytecode to bytecode array.
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header);
void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label);
// Not implemented as the illegal bytecode is used inside internally
@@ -593,8 +596,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
template <Bytecode bytecode, AccumulatorUse accumulator_use>
void PrepareToOutputBytecode();
- void LeaveBasicBlock() { return_seen_in_block_ = false; }
-
BytecodeArrayWriter* bytecode_array_writer() {
return &bytecode_array_writer_;
}
@@ -613,7 +614,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
- bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
BytecodeRegisterAllocator register_allocator_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index a563ff4fc3..05f655b71a 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -11,6 +11,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
+#include "src/interpreter/handler-table-builder.h"
#include "src/log.h"
#include "src/objects-inl.h"
@@ -45,16 +46,20 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
int frame_size = register_count * kSystemPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
- Handle<ByteArray> source_position_table =
- source_position_table_builder()->ToSourcePositionTable(isolate);
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
constant_pool);
bytecode_array->set_handler_table(*handler_table);
- bytecode_array->set_source_position_table(*source_position_table);
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(
- bytecode_array->GetFirstBytecodeAddress(),
- *source_position_table));
+ // TODO(v8:8510): Need to support native functions that should always have
+ // source positions suppressed and should write empty_byte_array here.
+ if (!source_position_table_builder_.Omit()) {
+ Handle<ByteArray> source_position_table =
+ source_position_table_builder()->ToSourcePositionTable(isolate);
+ bytecode_array->set_source_position_table(*source_position_table);
+ LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(
+ bytecode_array->GetFirstBytecodeAddress(),
+ *source_position_table));
+ }
return bytecode_array;
}
@@ -70,10 +75,8 @@ void BytecodeArrayWriter::Write(BytecodeNode* node) {
}
void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
- DCHECK(Bytecodes::IsJump(node->bytecode()));
+ DCHECK(Bytecodes::IsForwardJump(node->bytecode()));
- // TODO(rmcilroy): For forward jumps we could also mark the label as dead,
- // thereby avoiding emitting dead code when we bind the label.
if (exit_seen_in_block_) return; // Don't emit dead code.
UpdateExitSeenInBlock(node->bytecode());
MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
@@ -82,12 +85,22 @@ void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
EmitJump(node, label);
}
+void BytecodeArrayWriter::WriteJumpLoop(BytecodeNode* node,
+ BytecodeLoopHeader* loop_header) {
+ DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop);
+
+ if (exit_seen_in_block_) return; // Don't emit dead code.
+ UpdateExitSeenInBlock(node->bytecode());
+ MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
+
+ UpdateSourcePositionTable(node);
+ EmitJumpLoop(node, loop_header);
+}
+
void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
BytecodeJumpTable* jump_table) {
DCHECK(Bytecodes::IsSwitch(node->bytecode()));
- // TODO(rmcilroy): For jump tables we could also mark the table as dead,
- // thereby avoiding emitting dead code when we bind the entries.
if (exit_seen_in_block_) return; // Don't emit dead code.
UpdateExitSeenInBlock(node->bytecode());
MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
@@ -97,30 +110,18 @@ void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
}
void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
+ DCHECK(label->has_referrer_jump());
size_t current_offset = bytecodes()->size();
- if (label->is_forward_target()) {
- // An earlier jump instruction refers to this label. Update it's location.
- PatchJump(current_offset, label->offset());
- // Now treat as if the label will only be back referred to.
- }
- label->bind_to(current_offset);
- InvalidateLastBytecode();
- exit_seen_in_block_ = false; // Starting a new basic block.
+ // Update the jump instruction's location.
+ PatchJump(current_offset, label->jump_offset());
+ label->bind();
+ StartBasicBlock();
}
-void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
- BytecodeLabel* label) {
- DCHECK(!label->is_bound());
- DCHECK(target.is_bound());
- if (label->is_forward_target()) {
- // An earlier jump instruction refers to this label. Update it's location.
- PatchJump(target.offset(), label->offset());
- // Now treat as if the label will only be back referred to.
- }
- label->bind_to(target.offset());
- InvalidateLastBytecode();
- // exit_seen_in_block_ was reset when target was bound, so shouldn't be
- // changed here.
+void BytecodeArrayWriter::BindLoopHeader(BytecodeLoopHeader* loop_header) {
+ size_t current_offset = bytecodes()->size();
+ loop_header->bind_to(current_offset);
+ StartBasicBlock();
}
void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
@@ -135,8 +136,37 @@ void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
Smi::FromInt(static_cast<int>(relative_jump)));
jump_table->mark_bound(case_value);
+ StartBasicBlock();
+}
+
+void BytecodeArrayWriter::BindHandlerTarget(
+ HandlerTableBuilder* handler_table_builder, int handler_id) {
+ size_t current_offset = bytecodes()->size();
+ StartBasicBlock();
+ handler_table_builder->SetHandlerTarget(handler_id, current_offset);
+}
+
+void BytecodeArrayWriter::BindTryRegionStart(
+ HandlerTableBuilder* handler_table_builder, int handler_id) {
+ size_t current_offset = bytecodes()->size();
+ // Try blocks don't have to be in a separate basic block, but we do have to
+ // invalidate the bytecode to avoid eliding it and changing the offset.
+ InvalidateLastBytecode();
+ handler_table_builder->SetTryRegionStart(handler_id, current_offset);
+}
+
+void BytecodeArrayWriter::BindTryRegionEnd(
+ HandlerTableBuilder* handler_table_builder, int handler_id) {
+ // Try blocks don't have to be in a separate basic block, but we do have to
+ // invalidate the bytecode to avoid eliding it and changing the offset.
+ InvalidateLastBytecode();
+ size_t current_offset = bytecodes()->size();
+ handler_table_builder->SetTryRegionEnd(handler_id, current_offset);
+}
+
+void BytecodeArrayWriter::StartBasicBlock() {
InvalidateLastBytecode();
- exit_seen_in_block_ = false; // Starting a new basic block.
+ exit_seen_in_block_ = false;
}
void BytecodeArrayWriter::UpdateSourcePositionTable(
@@ -374,50 +404,57 @@ void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
unbound_jumps_--;
}
+void BytecodeArrayWriter::EmitJumpLoop(BytecodeNode* node,
+ BytecodeLoopHeader* loop_header) {
+ DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop);
+ DCHECK_EQ(0u, node->operand(0));
+
+ size_t current_offset = bytecodes()->size();
+
+ CHECK_GE(current_offset, loop_header->offset());
+ CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
+ // Label has been bound already so this is a backwards jump.
+ uint32_t delta =
+ static_cast<uint32_t>(current_offset - loop_header->offset());
+ OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
+ if (operand_scale > OperandScale::kSingle) {
+ // Adjust for scaling byte prefix for wide jump offset.
+ delta += 1;
+ }
+ node->update_operand0(delta);
+ EmitBytecode(node);
+}
+
void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
- DCHECK(Bytecodes::IsJump(node->bytecode()));
+ DCHECK(Bytecodes::IsForwardJump(node->bytecode()));
DCHECK_EQ(0u, node->operand(0));
size_t current_offset = bytecodes()->size();
- if (label->is_bound()) {
- CHECK_GE(current_offset, label->offset());
- CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
- // Label has been bound already so this is a backwards jump.
- uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
- OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
- if (operand_scale > OperandScale::kSingle) {
- // Adjust for scaling byte prefix for wide jump offset.
- delta += 1;
- }
- DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
- node->update_operand0(delta);
- } else {
- // The label has not yet been bound so this is a forward reference
- // that will be patched when the label is bound. We create a
- // reservation in the constant pool so the jump can be patched
- // when the label is bound. The reservation means the maximum size
- // of the operand for the constant is known and the jump can
- // be emitted into the bytecode stream with space for the operand.
- unbound_jumps_++;
- label->set_referrer(current_offset);
- OperandSize reserved_operand_size =
- constant_array_builder()->CreateReservedEntry();
- DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
- switch (reserved_operand_size) {
- case OperandSize::kNone:
- UNREACHABLE();
- break;
- case OperandSize::kByte:
- node->update_operand0(k8BitJumpPlaceholder);
- break;
- case OperandSize::kShort:
- node->update_operand0(k16BitJumpPlaceholder);
- break;
- case OperandSize::kQuad:
- node->update_operand0(k32BitJumpPlaceholder);
- break;
- }
+ // The label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound. We create a
+ // reservation in the constant pool so the jump can be patched
+ // when the label is bound. The reservation means the maximum size
+ // of the operand for the constant is known and the jump can
+ // be emitted into the bytecode stream with space for the operand.
+ unbound_jumps_++;
+ label->set_referrer(current_offset);
+ OperandSize reserved_operand_size =
+ constant_array_builder()->CreateReservedEntry();
+ DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
+ switch (reserved_operand_size) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ break;
+ case OperandSize::kByte:
+ node->update_operand0(k8BitJumpPlaceholder);
+ break;
+ case OperandSize::kShort:
+ node->update_operand0(k16BitJumpPlaceholder);
+ break;
+ case OperandSize::kQuad:
+ node->update_operand0(k32BitJumpPlaceholder);
+ break;
}
EmitBytecode(node);
}
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index e6db2fce22..d18c62a90f 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -19,9 +19,11 @@ class SourcePositionTableBuilder;
namespace interpreter {
class BytecodeLabel;
+class BytecodeLoopHeader;
class BytecodeNode;
class BytecodeJumpTable;
class ConstantArrayBuilder;
+class HandlerTableBuilder;
namespace bytecode_array_writer_unittest {
class BytecodeArrayWriterUnittest;
@@ -37,10 +39,18 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header);
void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void BindLabel(BytecodeLabel* label);
- void BindLabel(const BytecodeLabel& target, BytecodeLabel* label);
+ void BindLoopHeader(BytecodeLoopHeader* loop_header);
void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
+ void BindHandlerTarget(HandlerTableBuilder* handler_table_builder,
+ int handler_id);
+ void BindTryRegionStart(HandlerTableBuilder* handler_table_builder,
+ int handler_id);
+ void BindTryRegionEnd(HandlerTableBuilder* handler_table_builder,
+ int handler_id);
+
Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
int parameter_count,
Handle<ByteArray> handler_table);
@@ -71,6 +81,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void EmitBytecode(const BytecodeNode* const node);
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
+ void EmitJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header);
void EmitSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void UpdateSourcePositionTable(const BytecodeNode* const node);
@@ -79,6 +90,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void MaybeElideLastBytecode(Bytecode next_bytecode, bool has_source_info);
void InvalidateLastBytecode();
+ void StartBasicBlock();
+
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 00b1916c92..be142dbd17 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -18,6 +18,7 @@
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/smi.h"
+#include "src/objects/template-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
#include "src/unoptimized-compilation-info.h"
@@ -958,8 +959,7 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- SaveContext save(isolate);
- isolate->set_context(Context());
+ SaveAndSwitchContext save(isolate, Context());
#endif
AllocateDeferredConstants(isolate, script);
@@ -1093,7 +1093,7 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
}
// Check that we are not falling off the end.
- DCHECK(!builder()->RequiresImplicitReturn());
+ DCHECK(builder()->RemainderOfBlockIsDead());
}
void BytecodeGenerator::GenerateBytecodeBody() {
@@ -1153,7 +1153,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Emit an implicit return instruction in case control flow can fall off the
// end of the function without an explicit return being present on all paths.
- if (builder()->RequiresImplicitReturn()) {
+ if (!builder()->RemainderOfBlockIsDead()) {
builder()->LoadUndefined();
BuildReturn();
}
@@ -1221,6 +1221,9 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->var();
+ // Unused variables don't need to be visited.
+ if (!variable->is_used()) return;
+
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
@@ -1275,6 +1278,9 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK(variable->mode() == VariableMode::kLet ||
variable->mode() == VariableMode::kVar ||
variable->mode() == VariableMode::kDynamic);
+ // Unused variables don't need to be visited.
+ if (!variable->is_used()) return;
+
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackSlot slot =
@@ -2343,7 +2349,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForEffect(property->value());
}
} else {
- RegisterList args = register_allocator()->NewRegisterList(4);
+ RegisterList args = register_allocator()->NewRegisterList(3);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->key());
@@ -2351,10 +2357,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
- builder()
- ->LoadLiteral(Smi::FromEnum(LanguageMode::kSloppy))
- .StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kSetKeyedProperty, args);
+ builder()->CallRuntime(Runtime::kSetKeyedProperty, args);
Register value = args[2];
VisitSetHomeObject(value, literal, property);
}
@@ -3106,7 +3109,8 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(super_property_args[0]);
VisitForRegisterValue(super_property->home_object(),
super_property_args[1]);
builder()
@@ -3120,7 +3124,8 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(super_property_args[0]);
VisitForRegisterValue(super_property->home_object(),
super_property_args[1]);
VisitForRegisterValue(property->key(), super_property_args[2]);
@@ -3639,13 +3644,13 @@ void BytecodeGenerator::BuildAssignment(
case NAMED_SUPER_PROPERTY: {
builder()
->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
- .CallRuntime(StoreToSuperRuntimeId(), lhs_data.super_property_args());
+ .CallRuntime(Runtime::kStoreToSuper, lhs_data.super_property_args());
break;
}
case KEYED_SUPER_PROPERTY: {
builder()
->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
- .CallRuntime(StoreKeyedToSuperRuntimeId(),
+ .CallRuntime(Runtime::kStoreKeyedToSuper,
lhs_data.super_property_args());
break;
}
@@ -4163,7 +4168,8 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
RegisterList args = register_allocator()->NewRegisterList(3);
- VisitForRegisterValue(super_property->this_var(), args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
builder()->SetExpressionPosition(property);
@@ -4183,7 +4189,8 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
RegisterList args = register_allocator()->NewRegisterList(3);
- VisitForRegisterValue(super_property->this_var(), args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
VisitForRegisterValue(property->key(), args[2]);
@@ -4451,8 +4458,8 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// Default constructors don't need have to do the assignment because
// 'this' isn't accessed in default constructors.
if (!IsDefaultConstructor(info()->literal()->kind())) {
- BuildVariableAssignment(super->this_var()->var(), Token::INIT,
- HoleCheckMode::kRequired);
+ Variable* var = closure_scope()->GetReceiverScope()->receiver();
+ BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kRequired);
}
// The derived constructor has the correct bit set always, so we
@@ -4582,7 +4589,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
Register object = VisitForRegisterValue(property->obj());
VisitForAccumulatorValue(property->key());
builder()->Delete(object, language_mode());
- } else if (expr->IsVariableProxy() && !expr->AsVariableProxy()->is_this() &&
+ } else if (expr->IsVariableProxy() &&
!expr->AsVariableProxy()->is_new_target()) {
// Delete of an unqualified identifier is allowed in sloppy mode but is
// not allowed in strict mode.
@@ -4665,7 +4672,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
RegisterList load_super_args = super_property_args.Truncate(3);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(load_super_args[0]);
VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
@@ -4678,7 +4686,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
RegisterList load_super_args = super_property_args.Truncate(3);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+ BuildThisVariableLoad();
+ builder()->StoreAccumulatorInRegister(load_super_args[0]);
VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
VisitForRegisterValue(property->key(), load_super_args[2]);
builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
@@ -4741,13 +4750,13 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_SUPER_PROPERTY: {
builder()
->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
+ .CallRuntime(Runtime::kStoreToSuper, super_property_args);
break;
}
case KEYED_SUPER_PROPERTY: {
builder()
->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
+ .CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args);
break;
}
}
@@ -4841,15 +4850,15 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
+ FeedbackSlot slot;
if (expr->op() == Token::IN) {
- builder()->CompareOperation(expr->op(), lhs);
+ slot = feedback_spec()->AddKeyedHasICSlot();
} else if (expr->op() == Token::INSTANCEOF) {
- FeedbackSlot slot = feedback_spec()->AddInstanceOfSlot();
- builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
+ slot = feedback_spec()->AddInstanceOfSlot();
} else {
- FeedbackSlot slot = feedback_spec()->AddCompareICSlot();
- builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
+ slot = feedback_spec()->AddCompareICSlot();
}
+ builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
}
// Always returns a boolean value.
execution_result()->SetResultIsBoolean();
@@ -5132,8 +5141,19 @@ void BytecodeGenerator::VisitTemplateLiteral(TemplateLiteral* expr) {
}
}
-void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
- builder()->LoadAccumulatorWithRegister(Register::function_closure());
+void BytecodeGenerator::BuildThisVariableLoad() {
+ DeclarationScope* receiver_scope = closure_scope()->GetReceiverScope();
+ Variable* var = receiver_scope->receiver();
+ // TODO(littledan): implement 'this' hole check elimination.
+ HoleCheckMode hole_check_mode =
+ IsDerivedConstructor(receiver_scope->function_kind())
+ ? HoleCheckMode::kRequired
+ : HoleCheckMode::kElided;
+ BuildVariableLoad(var, hole_check_mode);
+}
+
+void BytecodeGenerator::VisitThisExpression(ThisExpression* expr) {
+ BuildThisVariableLoad();
}
void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
@@ -5854,16 +5874,6 @@ FeedbackSlot BytecodeGenerator::GetDummyCompareICSlot() {
return dummy_feedback_slot_.Get();
}
-Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
- return is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy;
-}
-
-Runtime::FunctionId BytecodeGenerator::StoreKeyedToSuperRuntimeId() {
- return is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy;
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index a5c573f7ff..045567b1ac 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/feedback-vector.h"
+#include "src/function-kind.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register.h"
@@ -203,6 +204,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildAssignment(const AssignmentLhsData& data, Token::Value op,
LookupHoistingMode lookup_hoisting_mode);
+ void BuildThisVariableLoad();
+
Expression* GetDestructuringDefaultValue(Expression** target);
void BuildDestructuringArrayAssignment(
ArrayLiteral* pattern, Token::Value op,
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
index da607a2927..df49b03bd4 100644
--- a/deps/v8/src/interpreter/bytecode-label.cc
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -18,18 +18,13 @@ BytecodeLabel* BytecodeLabels::New() {
}
void BytecodeLabels::Bind(BytecodeArrayBuilder* builder) {
+ DCHECK(!is_bound_);
+ is_bound_ = true;
for (auto& label : labels_) {
builder->Bind(&label);
}
}
-void BytecodeLabels::BindToLabel(BytecodeArrayBuilder* builder,
- const BytecodeLabel& target) {
- for (auto& label : labels_) {
- builder->Bind(target, &label);
- }
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index 9622c1513e..4581f4f4e2 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -15,42 +15,67 @@ namespace interpreter {
class BytecodeArrayBuilder;
-// A label representing a branch target in a bytecode array. When a
-// label is bound, it represents a known position in the bytecode
-// array. For labels that are forward references there can be at most
-// one reference whilst it is unbound.
-class V8_EXPORT_PRIVATE BytecodeLabel final {
+// A label representing a loop header in a bytecode array. It is bound before
+// the jump is seen, so its position is always known by the time the jump is
+// reached.
+class V8_EXPORT_PRIVATE BytecodeLoopHeader final {
public:
- BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+ BytecodeLoopHeader() : offset_(kInvalidOffset) {}
- bool is_bound() const { return bound_; }
- size_t offset() const { return offset_; }
+ size_t offset() const {
+ DCHECK_NE(offset_, kInvalidOffset);
+ return offset_;
+ }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
void bind_to(size_t offset) {
- DCHECK(!bound_ && offset != kInvalidOffset);
+ DCHECK_NE(offset, kInvalidOffset);
+ DCHECK_EQ(offset_, kInvalidOffset);
offset_ = offset;
- bound_ = true;
}
- void set_referrer(size_t offset) {
- DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
- offset_ = offset;
+ // The bytecode offset of the loop header.
+ size_t offset_;
+
+ friend class BytecodeArrayWriter;
+};
+
+// A label representing a forward branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode array. A label
+// can only have at most one referrer jump.
+class V8_EXPORT_PRIVATE BytecodeLabel final {
+ public:
+ BytecodeLabel() : bound_(false), jump_offset_(kInvalidOffset) {}
+
+ bool is_bound() const { return bound_; }
+ size_t jump_offset() const {
+ DCHECK_NE(jump_offset_, kInvalidOffset);
+ return jump_offset_;
+ }
+
+ bool has_referrer_jump() const { return jump_offset_ != kInvalidOffset; }
+
+ private:
+ static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+ void bind() {
+ DCHECK(!bound_);
+ bound_ = true;
}
- bool is_forward_target() const {
- return offset() != kInvalidOffset && !is_bound();
+ void set_referrer(size_t offset) {
+ DCHECK(!bound_);
+ DCHECK_NE(offset, kInvalidOffset);
+ DCHECK_EQ(jump_offset_, kInvalidOffset);
+ jump_offset_ = offset;
}
- // There are three states for a label:
- // bound_ offset_
- // UNSET false kInvalidOffset
- // FORWARD_TARGET false Offset of referring jump
- // BACKWARD_TARGET true Offset of label in bytecode array when bound
+ // Set when the label is bound (i.e. the start of the target basic block).
bool bound_;
- size_t offset_;
+ // Set when the jump referrer is set (i.e. the location of the jump).
+ size_t jump_offset_;
friend class BytecodeArrayWriter;
};
@@ -58,26 +83,26 @@ class V8_EXPORT_PRIVATE BytecodeLabel final {
// Class representing a branch target of multiple jumps.
class V8_EXPORT_PRIVATE BytecodeLabels {
public:
- explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
+ explicit BytecodeLabels(Zone* zone) : labels_(zone), is_bound_(false) {}
BytecodeLabel* New();
void Bind(BytecodeArrayBuilder* builder);
- void BindToLabel(BytecodeArrayBuilder* builder, const BytecodeLabel& target);
-
bool is_bound() const {
- bool is_bound = !labels_.empty() && labels_.front().is_bound();
- DCHECK(!is_bound ||
- std::all_of(labels_.begin(), labels_.end(),
- [](const BytecodeLabel& l) { return l.is_bound(); }));
- return is_bound;
+ DCHECK_IMPLIES(
+ is_bound_,
+ std::all_of(labels_.begin(), labels_.end(), [](const BytecodeLabel& l) {
+ return !l.has_referrer_jump() || l.is_bound();
+ }));
+ return is_bound_;
}
bool empty() const { return labels_.empty(); }
private:
ZoneLinkedList<BytecodeLabel> labels_;
+ bool is_bound_;
DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
};
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 7ba7d3b602..b120741872 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -60,6 +60,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Materialize all live registers and flush equivalence sets.
void Flush();
+ bool EnsureAllRegistersAreFlushed() const;
// Prepares for |bytecode|.
template <Bytecode bytecode, AccumulatorUse accumulator_use>
@@ -132,8 +133,6 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
RegisterInfo* non_set_member);
void PushToRegistersNeedingFlush(RegisterInfo* reg);
- bool EnsureAllRegistersAreFlushed() const;
-
// Methods for finding and creating metadata for each register.
RegisterInfo* GetRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index ae8bbe4275..ca76fcfec4 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Register final {
}
private:
- DISALLOW_NEW_AND_DELETE();
+ DISALLOW_NEW_AND_DELETE()
static const int kInvalidIndex = kMaxInt;
static const int kRegisterFileStartOffset =
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 7efcd1ae62..f9713ef79b 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -235,7 +235,7 @@ namespace interpreter {
V(TestReferenceEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
- V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
V(TestUndetectable, AccumulatorUse::kReadWrite) \
V(TestNull, AccumulatorUse::kReadWrite) \
V(TestUndefined, AccumulatorUse::kReadWrite) \
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 6b1bdc3424..8eb44069f6 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -70,7 +70,6 @@ void LoopBuilder::JumpToHeader(int loop_depth) {
int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
// Loop must have closed form, i.e. all loop elements are within the loop,
// the loop header precedes the body and next elements in the loop.
- DCHECK(loop_header_.is_bound());
builder()->JumpLoop(&loop_header_, level);
}
@@ -79,7 +78,7 @@ void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
SwitchBuilder::~SwitchBuilder() {
#ifdef DEBUG
for (auto site : case_sites_) {
- DCHECK(site.is_bound());
+ DCHECK(!site.has_referrer_jump() || site.is_bound());
}
#endif
}
@@ -108,7 +107,6 @@ void TryCatchBuilder::BeginTry(Register context) {
void TryCatchBuilder::EndTry() {
builder()->MarkTryEnd(handler_id_);
builder()->Jump(&exit_);
- builder()->Bind(&handler_);
builder()->MarkHandler(handler_id_, catch_prediction_);
if (block_coverage_builder_ != nullptr) {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 8359f0d1ee..d4f2d11e7c 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -121,7 +121,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
private:
- BytecodeLabel loop_header_;
+ BytecodeLoopHeader loop_header_;
// Unbound labels that identify jumps for continue statements in the code and
// jumps from checking the loop condition to the header for do-while loops.
@@ -188,7 +188,6 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
private:
int handler_id_;
HandlerTable::CatchPrediction catch_prediction_;
- BytecodeLabel handler_;
BytecodeLabel exit_;
BlockCoverageBuilder* block_coverage_builder_;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index dadfaa8783..903300a6e1 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -236,18 +236,19 @@ Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index), LoadSensitivity::kCritical);
+ return LoadFullTagged(GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index),
+ LoadSensitivity::kCritical);
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
- return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2));
+ return LoadFullTagged(GetInterpretedFramePointer(),
+ IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
return LoadAndUntagSmi(GetInterpretedFramePointer(),
- reg.ToOperand() << kSystemPointerSizeLog2);
+ reg.ToOperand() * kSystemPointerSize);
}
Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
@@ -282,7 +283,7 @@ Node* InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
Node* location = RegisterLocationInRegisterList(reg_list, index);
// Location is already poisoned on speculation, so no need to poison here.
- return Load(MachineType::AnyTagged(), location);
+ return LoadFullTagged(location);
}
Node* InterpreterAssembler::RegisterLocationInRegisterList(
@@ -296,19 +297,18 @@ Node* InterpreterAssembler::RegisterLocationInRegisterList(
}
void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- StoreNoWriteBarrier(
- MachineRepresentation::kTagged, GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2), value);
+ StoreFullTaggedNoWriteBarrier(
+ GetInterpretedFramePointer(),
+ IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
}
void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged,
- GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index), value);
+ StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index), value);
}
void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
- int offset = reg.ToOperand() << kSystemPointerSizeLog2;
+ int offset = reg.ToOperand() * kSystemPointerSize;
StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
}
@@ -648,8 +648,8 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
- return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
- LoadSensitivity::kCritical);
+ return UnsafeLoadFixedArrayElement(
+ constant_pool, UncheckedCast<IntPtrT>(index), LoadSensitivity::kCritical);
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 0ac2146731..e1e3181357 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -176,20 +176,10 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
return CAST(name);
};
- Label miss(this, Label::kDeferred);
ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS;
- GotoIf(IsUndefined(maybe_feedback_vector), &miss);
- accessor_asm.LoadGlobalIC(CAST(maybe_feedback_vector), feedback_slot,
+ accessor_asm.LoadGlobalIC(maybe_feedback_vector, feedback_slot,
lazy_context, lazy_name, typeof_mode, &exit_point,
slot_mode);
-
- BIND(&miss);
- {
- exit_point.ReturnCallRuntime(
- Runtime::kLoadGlobalIC_Miss, lazy_context(), lazy_name(),
- ParameterToTagged(feedback_slot, slot_mode), maybe_feedback_vector,
- SmiConstant(typeof_mode));
- }
}
};
@@ -237,10 +227,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Goto(&end);
Bind(&no_feedback);
- TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
- Node* language_mode = GetLanguageMode(closure, context);
- CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name,
- language_mode);
+ CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name);
Goto(&end);
Bind(&end);
@@ -571,22 +558,9 @@ IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Label no_feedback(this, Label::kDeferred), end(this);
VARIABLE(var_result, MachineRepresentation::kTagged);
- GotoIf(IsUndefined(feedback_vector), &no_feedback);
var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
smi_slot, feedback_vector));
- Goto(&end);
-
- BIND(&no_feedback);
- {
- Comment("KeyedLoadIC_no_feedback");
- var_result.Bind(CallRuntime(Runtime::kKeyedLoadIC_Miss, context, object,
- name, smi_slot, feedback_vector));
- Goto(&end);
- }
-
- BIND(&end);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -609,22 +583,8 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
Node* context = GetContext();
VARIABLE(var_result, MachineRepresentation::kTagged);
- Label no_feedback(this, Label::kDeferred), end(this);
- GotoIf(IsUndefined(maybe_vector), &no_feedback);
var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
name, value, smi_slot, maybe_vector));
- Goto(&end);
-
- Bind(&no_feedback);
- TNode<JSFunction> closure =
- CAST(LoadRegister(Register::function_closure()));
- Node* language_mode = GetLanguageMode(closure, context);
- var_result.Bind(CallRuntime(Runtime::kStoreICNoFeedback_Miss, context,
- value, object, name, language_mode,
- SmiConstant(property_type)));
- Goto(&end);
-
- Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -664,11 +624,10 @@ IGNITION_HANDLER(StaNamedPropertyNoFeedback,
Node* object = LoadRegisterAtOperandIndex(0);
Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* value = GetAccumulator();
- Node* language_mode = SmiFromInt32(BytecodeOperandFlag(2));
Node* context = GetContext();
- Node* result = CallRuntime(Runtime::kSetNamedProperty, context, object, name,
- value, language_mode);
+ Node* result =
+ CallRuntime(Runtime::kSetNamedProperty, context, object, name, value);
SetAccumulator(result);
Dispatch();
}
@@ -686,22 +645,9 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Node* maybe_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Label no_feedback(this, Label::kDeferred), end(this);
VARIABLE(var_result, MachineRepresentation::kTagged);
- GotoIf(IsUndefined(maybe_vector), &no_feedback);
-
var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
value, smi_slot, maybe_vector));
- Goto(&end);
-
- Bind(&no_feedback);
- TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
- Node* language_mode = GetLanguageMode(closure, context);
- var_result.Bind(CallRuntime(Runtime::kKeyedStoreICNoFeedback_Miss, context,
- value, object, name, language_mode));
- Goto(&end);
-
- Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -725,19 +671,8 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
Node* context = GetContext();
VARIABLE(var_result, MachineRepresentation::kTagged);
- Label no_feedback(this, Label::kDeferred), end(this);
- GotoIf(IsUndefined(feedback_vector), &no_feedback);
-
var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
index, value, smi_slot, feedback_vector));
- Goto(&end);
-
- BIND(&no_feedback);
- var_result.Bind(CallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, context,
- value, smi_slot, feedback_vector, array, index));
- Goto(&end);
-
- BIND(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -1941,16 +1876,22 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
Dispatch();
}
-// TestIn <src>
+// TestIn <src> <feedback_slot>
//
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
- Node* property = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadRegisterAtOperandIndex(0);
Node* object = GetAccumulator();
+ Node* raw_slot = BytecodeOperandIdx(1);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- SetAccumulator(HasProperty(context, object, property, kHasProperty));
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
+ smi_slot, feedback_vector));
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -2463,22 +2404,10 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
Node* context = GetContext();
VARIABLE(result, MachineRepresentation::kTagged);
- Label no_feedback(this, Label::kDeferred), end(this);
- GotoIf(IsUndefined(feedback_vector), &no_feedback);
ConstructorBuiltinsAssembler constructor_assembler(state());
result.Bind(constructor_assembler.EmitCreateRegExpLiteral(
feedback_vector, slot_id, pattern, flags, context));
- Goto(&end);
-
- BIND(&no_feedback);
- {
- result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context,
- feedback_vector, SmiTag(slot_id), pattern, flags));
- Goto(&end);
- }
-
- BIND(&end);
SetAccumulator(result.value());
Dispatch();
}
@@ -2644,18 +2573,8 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
Node* context = GetContext();
Variable var_result(this, MachineRepresentation::kTagged);
- Label no_feedback(this), end(this);
- GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
smi_flags, smi_slot, maybe_feedback_vector));
- Goto(&end);
-
- BIND(&no_feedback);
- var_result.Bind(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source,
- smi_flags, smi_slot, maybe_feedback_vector));
- Goto(&end);
-
- BIND(&end);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -2683,9 +2602,13 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
BIND(&call_runtime);
{
Node* description = LoadConstantPoolEntryAtOperandIndex(0);
+ Node* slot_smi = SmiTag(slot);
+ Node* closure = LoadRegister(Register::function_closure());
+ Node* shared_info =
+ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kCreateTemplateObject, context, description);
+ Node* result = CallRuntime(Runtime::kGetTemplateObject, context,
+ description, shared_info, slot_smi);
Label end(this);
GotoIf(IsUndefined(feedback_vector), &end);
@@ -2906,8 +2829,7 @@ IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
ExternalReference::address_of_pending_message_obj(isolate()));
Node* previous_message = Load(MachineType::TaggedPointer(), pending_message);
Node* new_message = GetAccumulator();
- StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
- new_message);
+ StoreFullTaggedNoWriteBarrier(pending_message, new_message);
SetAccumulator(previous_message);
Dispatch();
}
@@ -2967,7 +2889,8 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
BIND(&throw_error);
{
Node* name = LoadConstantPoolEntryAtOperandIndex(0);
- CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
+ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, GetContext(),
+ name);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
@@ -3037,7 +2960,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
SetAccumulator(return_value); \
DispatchToBytecode(original_bytecode, BytecodeOffset()); \
}
-DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
#undef DEBUG_BREAK
// IncBlockCounter <slot>
diff --git a/deps/v8/src/isolate-data.h b/deps/v8/src/isolate-data.h
index 269d0883f9..b804aa269e 100644
--- a/deps/v8/src/isolate-data.h
+++ b/deps/v8/src/isolate-data.h
@@ -9,6 +9,7 @@
#include "src/constants-arch.h"
#include "src/external-reference-table.h"
#include "src/roots.h"
+#include "src/thread-local-top.h"
#include "src/utils.h"
namespace v8 {
@@ -91,6 +92,9 @@ class IsolateData final {
return (address - start) < sizeof(*this);
}
+ ThreadLocalTop& thread_local_top() { return thread_local_top_; }
+ ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
+
RootsTable& roots() { return roots_; }
const RootsTable& roots() const { return roots_; }
@@ -110,6 +114,7 @@ class IsolateData final {
V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
+ V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
@@ -145,6 +150,8 @@ class IsolateData final {
ExternalReferenceTable external_reference_table_;
+ ThreadLocalTop thread_local_top_;
+
// The entry points for all builtins. This corresponds to
// Code::InstructionStart() for each Code object in the builtins table below.
// The entry table is in IsolateData for easy access through kRootRegister.
@@ -190,11 +197,14 @@ class IsolateData final {
// actual V8 code.
void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(std::is_standard_layout<RootsTable>::value);
+ STATIC_ASSERT(std::is_standard_layout<ThreadLocalTop>::value);
STATIC_ASSERT(std::is_standard_layout<ExternalReferenceTable>::value);
STATIC_ASSERT(std::is_standard_layout<IsolateData>::value);
STATIC_ASSERT(offsetof(IsolateData, roots_) == kRootsTableOffset);
STATIC_ASSERT(offsetof(IsolateData, external_reference_table_) ==
kExternalReferenceTableOffset);
+ STATIC_ASSERT(offsetof(IsolateData, thread_local_top_) ==
+ kThreadLocalTopOffset);
STATIC_ASSERT(offsetof(IsolateData, builtins_) == kBuiltinsTableOffset);
STATIC_ASSERT(offsetof(IsolateData, virtual_call_target_register_) ==
kVirtualCallTargetRegisterOffset);
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index bc70d3dead..552abdd31c 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -5,11 +5,13 @@
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
-#include "src/heap/heap-inl.h" // Need MemoryChunk from heap/spaces.h
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell.h"
#include "src/objects/regexp-match-info.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
@@ -18,19 +20,9 @@ IsolateAllocationMode Isolate::isolate_allocation_mode() {
return isolate_allocator_->mode();
}
-bool Isolate::FromWritableHeapObject(HeapObject obj, Isolate** isolate) {
- i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
- if (chunk->owner()->identity() == i::RO_SPACE) {
- *isolate = nullptr;
- return false;
- }
- *isolate = chunk->heap()->isolate();
- return true;
-}
-
void Isolate::set_context(Context context) {
DCHECK(context.is_null() || context->IsContext());
- thread_local_top_.context_ = context;
+ thread_local_top()->context_ = context;
}
Handle<NativeContext> Isolate::native_context() {
@@ -43,47 +35,49 @@ NativeContext Isolate::raw_native_context() {
Object Isolate::pending_exception() {
DCHECK(has_pending_exception());
- DCHECK(!thread_local_top_.pending_exception_->IsException(this));
- return thread_local_top_.pending_exception_;
+ DCHECK(!thread_local_top()->pending_exception_->IsException(this));
+ return thread_local_top()->pending_exception_;
}
void Isolate::set_pending_exception(Object exception_obj) {
DCHECK(!exception_obj->IsException(this));
- thread_local_top_.pending_exception_ = exception_obj;
+ thread_local_top()->pending_exception_ = exception_obj;
}
void Isolate::clear_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException(this));
- thread_local_top_.pending_exception_ = ReadOnlyRoots(this).the_hole_value();
+ DCHECK(!thread_local_top()->pending_exception_->IsException(this));
+ thread_local_top()->pending_exception_ = ReadOnlyRoots(this).the_hole_value();
}
bool Isolate::has_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException(this));
- return !thread_local_top_.pending_exception_->IsTheHole(this);
+ DCHECK(!thread_local_top()->pending_exception_->IsException(this));
+ return !thread_local_top()->pending_exception_->IsTheHole(this);
}
void Isolate::clear_pending_message() {
- thread_local_top_.pending_message_obj_ = ReadOnlyRoots(this).the_hole_value();
+ thread_local_top()->pending_message_obj_ =
+ ReadOnlyRoots(this).the_hole_value();
}
Object Isolate::scheduled_exception() {
DCHECK(has_scheduled_exception());
- DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
- return thread_local_top_.scheduled_exception_;
+ DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ return thread_local_top()->scheduled_exception_;
}
bool Isolate::has_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
- return thread_local_top_.scheduled_exception_ !=
+ DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ return thread_local_top()->scheduled_exception_ !=
ReadOnlyRoots(this).the_hole_value();
}
void Isolate::clear_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
- thread_local_top_.scheduled_exception_ = ReadOnlyRoots(this).the_hole_value();
+ DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ thread_local_top()->scheduled_exception_ =
+ ReadOnlyRoots(this).the_hole_value();
}
bool Isolate::is_catchable_by_javascript(Object exception) {
@@ -125,7 +119,8 @@ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
bool Isolate::IsArrayConstructorIntact() {
- Cell array_constructor_cell = heap()->array_constructor_protector();
+ Cell array_constructor_cell =
+ Cell::cast(root(RootIndex::kArrayConstructorProtector));
return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
}
@@ -142,56 +137,65 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
// done here. In place, there are mjsunit tests harmony/array-species* which
// ensure that behavior is correct in various invalid protector cases.
- PropertyCell species_cell = heap()->array_species_protector();
+ PropertyCell species_cell =
+ PropertyCell::cast(root(RootIndex::kArraySpeciesProtector));
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
- PropertyCell species_cell = heap()->typed_array_species_protector();
+ PropertyCell species_cell =
+ PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsRegExpSpeciesLookupChainIntact() {
- PropertyCell species_cell = heap()->regexp_species_protector();
+ PropertyCell species_cell =
+ PropertyCell::cast(root(RootIndex::kRegExpSpeciesProtector));
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsPromiseSpeciesLookupChainIntact() {
- PropertyCell species_cell = heap()->promise_species_protector();
+ PropertyCell species_cell =
+ PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsStringLengthOverflowIntact() {
- Cell string_length_cell = heap()->string_length_protector();
+ Cell string_length_cell = Cell::cast(root(RootIndex::kStringLengthProtector));
return string_length_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayBufferDetachingIntact() {
- PropertyCell buffer_detaching = heap()->array_buffer_detaching_protector();
+ PropertyCell buffer_detaching =
+ PropertyCell::cast(root(RootIndex::kArrayBufferDetachingProtector));
return buffer_detaching->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayIteratorLookupChainIntact() {
- PropertyCell array_iterator_cell = heap()->array_iterator_protector();
+ PropertyCell array_iterator_cell =
+ PropertyCell::cast(root(RootIndex::kArrayIteratorProtector));
return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsMapIteratorLookupChainIntact() {
- PropertyCell map_iterator_cell = heap()->map_iterator_protector();
+ PropertyCell map_iterator_cell =
+ PropertyCell::cast(root(RootIndex::kMapIteratorProtector));
return map_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsSetIteratorLookupChainIntact() {
- PropertyCell set_iterator_cell = heap()->set_iterator_protector();
+ PropertyCell set_iterator_cell =
+ PropertyCell::cast(root(RootIndex::kSetIteratorProtector));
return set_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsStringIteratorLookupChainIntact() {
- PropertyCell string_iterator_cell = heap()->string_iterator_protector();
+ PropertyCell string_iterator_cell =
+ PropertyCell::cast(root(RootIndex::kStringIteratorProtector));
return string_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8549c08b0b..73aee01a6d 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -35,6 +35,9 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames-inl.h"
+#include "src/hash-seed-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/read-only-heap.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
@@ -52,6 +55,7 @@
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/ostreams.h"
+#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/prototype.h"
#include "src/ptr-compr.h"
@@ -61,11 +65,14 @@
#include "src/simulator.h"
#include "src/snapshot/embedded-data.h"
#include "src/snapshot/embedded-file-writer.h"
+#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/startup-deserializer.h"
+#include "src/string-stream.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache.h"
#include "src/v8.h"
+#include "src/v8threads.h"
#include "src/version.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
@@ -213,7 +220,12 @@ void Isolate::SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size) {
// Verify that the contents of the embedded blob are unchanged from
// serialization-time, just to ensure the compiler isn't messing with us.
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK_EQ(d.Hash(), d.CreateHash());
+ if (d.EmbeddedBlobHash() != d.CreateEmbeddedBlobHash()) {
+ FATAL(
+ "Embedded blob checksum verification failed. This indicates that the "
+ "embedded blob has been modified since compilation time. A common "
+ "cause is a debugging breakpoint set within builtin code.");
+ }
#endif // DEBUG
}
@@ -244,30 +256,57 @@ uint32_t Isolate::CurrentEmbeddedBlobSize() {
std::memory_order::memory_order_relaxed);
}
-void ThreadLocalTop::Initialize(Isolate* isolate) {
- *this = ThreadLocalTop();
- isolate_ = isolate;
-#ifdef USE_SIMULATOR
- simulator_ = Simulator::current(isolate);
-#endif
- thread_id_ = ThreadId::Current();
- thread_in_wasm_flag_address_ = reinterpret_cast<Address>(
- trap_handler::GetThreadInWasmThreadLocalAddress());
-}
+size_t Isolate::HashIsolateForEmbeddedBlob() {
+ DCHECK(builtins_.is_initialized());
+ DCHECK(FLAG_embedded_builtins);
+ DCHECK(Builtins::AllBuiltinsAreIsolateIndependent());
+
+ DisallowHeapAllocation no_gc;
-void ThreadLocalTop::Free() {
- // Match unmatched PopPromise calls.
- while (promise_on_stack_) isolate_->PopPromise();
+ static constexpr size_t kSeed = 0;
+ size_t hash = kSeed;
+
+ // Hash data sections of builtin code objects.
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = heap_.builtin(i);
+
+ DCHECK(Internals::HasHeapObjectTag(code.ptr()));
+ uint8_t* const code_ptr =
+ reinterpret_cast<uint8_t*>(code.ptr() - kHeapObjectTag);
+
+ // These static asserts ensure we don't miss relevant fields. We don't hash
+ // instruction size and flags since they change when creating the off-heap
+ // trampolines. Other data fields must remain the same.
+ STATIC_ASSERT(Code::kInstructionSizeOffset == Code::kDataStart);
+ STATIC_ASSERT(Code::kFlagsOffset == Code::kInstructionSizeOffsetEnd + 1);
+ STATIC_ASSERT(Code::kSafepointTableOffsetOffset ==
+ Code::kFlagsOffsetEnd + 1);
+ static constexpr int kStartOffset = Code::kSafepointTableOffsetOffset;
+
+ for (int j = kStartOffset; j < Code::kUnalignedHeaderSize; j++) {
+ hash = base::hash_combine(hash, size_t{code_ptr[j]});
+ }
+ }
+
+ // The builtins constants table is also tightly tied to embedded builtins.
+ hash = base::hash_combine(
+ hash, static_cast<size_t>(heap_.builtins_constants_table()->length()));
+
+ return hash;
}
base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-base::Atomic32 Isolate::isolate_counter_ = 0;
#if DEBUG
-base::Atomic32 Isolate::isolate_key_created_ = 0;
+std::atomic<bool> Isolate::isolate_key_created_{false};
#endif
+namespace {
+// A global counter for all generated Isolates, might overflow.
+std::atomic<int> isolate_counter{0};
+} // namespace
+
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
@@ -319,7 +358,9 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
void Isolate::InitializeOncePerProcess() {
isolate_key_ = base::Thread::CreateThreadLocalKey();
#if DEBUG
- base::Relaxed_Store(&isolate_key_created_, 1);
+ bool expected = false;
+ DCHECK_EQ(true, isolate_key_created_.compare_exchange_strong(
+ expected, true, std::memory_order_relaxed));
#endif
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
}
@@ -350,7 +391,7 @@ void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
v->VisitRootPointer(Root::kTop, nullptr,
FullObjectSlot(&thread->scheduled_exception_));
- for (v8::TryCatch* block = thread->try_catch_handler(); block != nullptr;
+ for (v8::TryCatch* block = thread->try_catch_handler_; block != nullptr;
block = block->next_) {
// TODO(3770): Make TryCatch::exception_ an Address (and message_obj_ too).
v->VisitRootPointer(
@@ -409,13 +450,13 @@ bool Isolate::IsDeferredHandle(Address* handle) {
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
- thread_local_top()->set_try_catch_handler(that);
+ thread_local_top()->try_catch_handler_ = that;
}
void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
- DCHECK(thread_local_top()->try_catch_handler() == that);
- thread_local_top()->set_try_catch_handler(that->next_);
+ DCHECK(thread_local_top()->try_catch_handler_ == that);
+ thread_local_top()->try_catch_handler_ = that->next_;
}
@@ -490,11 +531,56 @@ StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
namespace {
+class StackFrameCacheHelper : public AllStatic {
+ public:
+ static MaybeHandle<StackTraceFrame> LookupCachedFrame(
+ Isolate* isolate, Handle<AbstractCode> code, int code_offset) {
+ if (FLAG_optimize_for_size) return MaybeHandle<StackTraceFrame>();
+
+ const auto maybe_cache = handle(code->stack_frame_cache(), isolate);
+ if (!maybe_cache->IsSimpleNumberDictionary())
+ return MaybeHandle<StackTraceFrame>();
+
+ const auto cache = Handle<SimpleNumberDictionary>::cast(maybe_cache);
+ const int entry = cache->FindEntry(isolate, code_offset);
+ if (entry != NumberDictionary::kNotFound) {
+ return handle(StackTraceFrame::cast(cache->ValueAt(entry)), isolate);
+ }
+ return MaybeHandle<StackTraceFrame>();
+ }
+
+ static void CacheFrameAndUpdateCache(Isolate* isolate,
+ Handle<AbstractCode> code,
+ int code_offset,
+ Handle<StackTraceFrame> frame) {
+ if (FLAG_optimize_for_size) return;
+
+ const auto maybe_cache = handle(code->stack_frame_cache(), isolate);
+ const auto cache = maybe_cache->IsSimpleNumberDictionary()
+ ? Handle<SimpleNumberDictionary>::cast(maybe_cache)
+ : SimpleNumberDictionary::New(isolate, 1);
+ Handle<SimpleNumberDictionary> new_cache =
+ SimpleNumberDictionary::Set(isolate, cache, code_offset, frame);
+ if (*new_cache != *cache || !maybe_cache->IsSimpleNumberDictionary()) {
+ AbstractCode::SetStackFrameCache(code, new_cache);
+ }
+ }
+};
+
+} // anonymous namespace
+
class FrameArrayBuilder {
public:
+ enum FrameFilterMode { ALL, CURRENT_SECURITY_CONTEXT };
+
FrameArrayBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
- Handle<Object> caller)
- : isolate_(isolate), mode_(mode), limit_(limit), caller_(caller) {
+ Handle<Object> caller,
+ FrameFilterMode filter_mode = CURRENT_SECURITY_CONTEXT)
+ : isolate_(isolate),
+ mode_(mode),
+ limit_(limit),
+ caller_(caller),
+ check_security_context_(filter_mode == CURRENT_SECURITY_CONTEXT) {
switch (mode_) {
case SKIP_FIRST:
skip_next_frame_ = true;
@@ -525,8 +611,19 @@ class FrameArrayBuilder {
// The stored bytecode offset is relative to a different base than what
// is used in the source position table, hence the subtraction.
offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+
+ Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
+ if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
+ int param_count = function->shared()->internal_formal_parameter_count();
+ parameters = isolate_->factory()->NewFixedArray(param_count);
+ for (int i = 0; i < param_count; i++) {
+ parameters->set(i,
+ generator_object->parameters_and_registers()->get(i));
+ }
+ }
+
elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
- offset, flags);
+ offset, flags, parameters);
}
void AppendPromiseAllFrame(Handle<Context> context, int offset) {
@@ -539,8 +636,12 @@ class FrameArrayBuilder {
Handle<Object> receiver(native_context->promise_function(), isolate_);
Handle<AbstractCode> code(AbstractCode::cast(function->code()), isolate_);
+
+ // TODO(mmarchini) save Promises list from Promise.all()
+ Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
+
elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
- offset, flags);
+ offset, flags, parameters);
}
void AppendJavaScriptFrame(
@@ -558,9 +659,13 @@ class FrameArrayBuilder {
if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
if (is_constructor) flags |= FrameArray::kIsConstructor;
+ Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
+ if (V8_UNLIKELY(FLAG_detailed_error_stack_trace))
+ parameters = summary.parameters();
+
elements_ = FrameArray::AppendJSFrame(
elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
- abstract_code, offset, flags);
+ abstract_code, offset, flags, parameters);
}
void AppendWasmCompiledFrame(
@@ -607,9 +712,18 @@ class FrameArrayBuilder {
if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
+ Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
+ if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
+ int param_count = exit_frame->ComputeParametersCount();
+ parameters = isolate_->factory()->NewFixedArray(param_count);
+ for (int i = 0; i < param_count; i++) {
+ parameters->set(i, exit_frame->GetParameter(i));
+ }
+ }
+
elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
Handle<AbstractCode>::cast(code),
- offset, flags);
+ offset, flags, parameters);
}
bool full() { return elements_->FrameCount() >= limit_; }
@@ -619,6 +733,40 @@ class FrameArrayBuilder {
return elements_;
}
+ // Creates a StackTraceFrame object for each frame in the FrameArray.
+ Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
+ elements_->ShrinkToFit(isolate_);
+ const int frame_count = elements_->FrameCount();
+ Handle<FixedArray> stack_trace =
+ isolate_->factory()->NewFixedArray(frame_count);
+
+ for (int i = 0; i < frame_count; ++i) {
+ // Caching stack frames only happens for non-Wasm frames.
+ if (!elements_->IsAnyWasmFrame(i)) {
+ MaybeHandle<StackTraceFrame> maybe_frame =
+ StackFrameCacheHelper::LookupCachedFrame(
+ isolate_, handle(elements_->Code(i), isolate_),
+ Smi::ToInt(elements_->Offset(i)));
+ if (!maybe_frame.is_null()) {
+ Handle<StackTraceFrame> frame = maybe_frame.ToHandleChecked();
+ stack_trace->set(i, *frame);
+ continue;
+ }
+ }
+
+ Handle<StackTraceFrame> frame =
+ isolate_->factory()->NewStackTraceFrame(elements_, i);
+ stack_trace->set(i, *frame);
+
+ if (!elements_->IsAnyWasmFrame(i)) {
+ StackFrameCacheHelper::CacheFrameAndUpdateCache(
+ isolate_, handle(elements_->Code(i), isolate_),
+ Smi::ToInt(elements_->Offset(i)), frame);
+ }
+ }
+ return stack_trace;
+ }
+
private:
// Poison stack frames below the first strict mode frame.
// The stack trace API should not expose receivers and function
@@ -674,6 +822,7 @@ class FrameArrayBuilder {
}
bool IsInSameSecurityContext(Handle<JSFunction> function) {
+ if (!check_security_context_) return true;
return isolate_->context()->HasSameSecurityTokenAs(function->context());
}
@@ -691,6 +840,7 @@ class FrameArrayBuilder {
const Handle<Object> caller_;
bool skip_next_frame_ = true;
bool encountered_strict_function_ = false;
+ const bool check_security_context_;
Handle<FrameArray> elements_;
};
@@ -808,8 +958,6 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
}
}
-} // namespace
-
Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameSkipMode mode,
Handle<Object> caller) {
@@ -942,10 +1090,12 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
- RETURN_ON_EXCEPTION(this,
- Object::SetProperty(this, error_object, key,
- stack_trace, LanguageMode::kStrict),
- JSReceiver);
+ RETURN_ON_EXCEPTION(
+ this,
+ Object::SetProperty(this, error_object, key, stack_trace,
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
+ JSReceiver);
}
return error_object;
}
@@ -959,7 +1109,8 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
CaptureSimpleStackTrace(error_object, mode, caller);
RETURN_ON_EXCEPTION(this,
Object::SetProperty(this, error_object, key, stack_trace,
- LanguageMode::kStrict),
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
JSReceiver);
return error_object;
}
@@ -1007,133 +1158,54 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
return frame->pc();
}
-class CaptureStackTraceHelper {
- public:
- explicit CaptureStackTraceHelper(Isolate* isolate) : isolate_(isolate) {}
-
- Handle<StackFrameInfo> NewStackFrameObject(FrameSummary& summ) {
- if (summ.IsJavaScript()) return NewStackFrameObject(summ.AsJavaScript());
- if (summ.IsWasm()) return NewStackFrameObject(summ.AsWasm());
- UNREACHABLE();
- }
-
- Handle<StackFrameInfo> NewStackFrameObject(
- const FrameSummary::JavaScriptFrameSummary& summ) {
- int code_offset;
- Handle<ByteArray> source_position_table;
- Handle<Object> maybe_cache;
- Handle<SimpleNumberDictionary> cache;
- if (!FLAG_optimize_for_size) {
- code_offset = summ.code_offset();
- source_position_table =
- handle(summ.abstract_code()->source_position_table(), isolate_);
- maybe_cache = handle(summ.abstract_code()->stack_frame_cache(), isolate_);
- if (maybe_cache->IsSimpleNumberDictionary()) {
- cache = Handle<SimpleNumberDictionary>::cast(maybe_cache);
- } else {
- cache = SimpleNumberDictionary::New(isolate_, 1);
- }
- int entry = cache->FindEntry(isolate_, code_offset);
- if (entry != NumberDictionary::kNotFound) {
- Handle<StackFrameInfo> frame(
- StackFrameInfo::cast(cache->ValueAt(entry)), isolate_);
- return frame;
- }
- }
-
- Handle<StackFrameInfo> frame = factory()->NewStackFrameInfo();
- Handle<Script> script = Handle<Script>::cast(summ.script());
- Script::PositionInfo info;
- bool valid_pos = Script::GetPositionInfo(script, summ.SourcePosition(),
- &info, Script::WITH_OFFSET);
- if (valid_pos) {
- frame->set_line_number(info.line + 1);
- frame->set_column_number(info.column + 1);
- }
- frame->set_script_id(script->id());
- frame->set_script_name(script->name());
- frame->set_script_name_or_source_url(script->GetNameOrSourceURL());
- frame->set_is_eval(script->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL);
- Handle<String> function_name = summ.FunctionName();
- frame->set_function_name(*function_name);
- frame->set_is_constructor(summ.is_constructor());
- frame->set_is_wasm(false);
- if (!FLAG_optimize_for_size) {
- auto new_cache =
- SimpleNumberDictionary::Set(isolate_, cache, code_offset, frame);
- if (*new_cache != *cache || !maybe_cache->IsNumberDictionary()) {
- AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
- }
- }
- frame->set_id(next_id());
- return frame;
- }
-
- Handle<StackFrameInfo> NewStackFrameObject(
- const FrameSummary::WasmFrameSummary& summ) {
- Handle<StackFrameInfo> info = factory()->NewStackFrameInfo();
-
- Handle<WasmModuleObject> module_object(
- summ.wasm_instance()->module_object(), isolate_);
- Handle<String> name = WasmModuleObject::GetFunctionName(
- isolate_, module_object, summ.function_index());
- info->set_function_name(*name);
- // Encode the function index as line number (1-based).
- info->set_line_number(summ.function_index() + 1);
- // Encode the byte offset as column (1-based).
- int position = summ.byte_offset();
- // Make position 1-based.
- if (position >= 0) ++position;
- info->set_column_number(position);
- info->set_script_id(summ.script()->id());
- info->set_is_wasm(true);
- info->set_id(next_id());
- return info;
- }
-
- private:
- inline Factory* factory() { return isolate_->factory(); }
-
- int next_id() const {
- int id = isolate_->last_stack_frame_info_id() + 1;
- isolate_->set_last_stack_frame_info_id(id);
- return id;
- }
-
- Isolate* isolate_;
-};
-
Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
DisallowJavascriptExecution no_js(this);
- CaptureStackTraceHelper helper(this);
// Ensure no negative values.
int limit = Max(frame_limit, 0);
- Handle<FixedArray> stack_trace_elems = factory()->NewFixedArray(limit);
-
- int frames_seen = 0;
- for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
+ FrameArrayBuilder::FrameFilterMode filter_mode =
+ (options & StackTrace::kExposeFramesAcrossSecurityOrigins)
+ ? FrameArrayBuilder::ALL
+ : FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ FrameArrayBuilder builder(this, SKIP_NONE, limit,
+ factory()->undefined_value(), filter_mode);
+
+ for (StackTraceFrameIterator it(this); !it.done() && !builder.full();
it.Advance()) {
StandardFrame* frame = it.frame();
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
- for (size_t i = frames.size(); i != 0 && frames_seen < limit; i--) {
+ for (size_t i = frames.size(); i != 0 && !builder.full(); i--) {
FrameSummary& frame = frames[i - 1];
if (!frame.is_subject_to_debugging()) continue;
- // Filter frames from other security contexts.
- if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
- !this->context()->HasSameSecurityTokenAs(*frame.native_context()))
- continue;
- Handle<StackFrameInfo> new_frame_obj = helper.NewStackFrameObject(frame);
- stack_trace_elems->set(frames_seen, *new_frame_obj);
- frames_seen++;
+
+ if (frame.IsJavaScript()) {
+ //=========================================================
+ // Handle a JavaScript frame.
+ //=========================================================
+ auto const& java_script = frame.AsJavaScript();
+ builder.AppendJavaScriptFrame(java_script);
+ } else if (frame.IsWasmCompiled()) {
+ //=========================================================
+ // Handle a WASM compiled frame.
+ //=========================================================
+ auto const& wasm_compiled = frame.AsWasmCompiled();
+ builder.AppendWasmCompiledFrame(wasm_compiled);
+ } else if (frame.IsWasmInterpreted()) {
+ //=========================================================
+ // Handle a WASM interpreted frame.
+ //=========================================================
+ auto const& wasm_interpreted = frame.AsWasmInterpreted();
+ builder.AppendWasmInterpretedFrame(wasm_interpreted);
+ }
}
}
- return FixedArray::ShrinkOrEmpty(this, stack_trace_elems, frames_seen);
+
+ // TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
+ return builder.GetElementsAsStackTraceFrameArray();
}
@@ -1171,8 +1243,6 @@ static void PrintFrames(Isolate* isolate,
}
void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
- // The MentionedObjectCache is not GC-proof at the moment.
- DisallowHeapAllocation no_gc;
HandleScope scope(this);
DCHECK(accumulator->IsMentionedObjectCacheClear(this));
@@ -1637,9 +1707,18 @@ Object Isolate::UnwindAndFindHandler() {
// Some stubs are able to handle exceptions.
if (!catchable_by_js) break;
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
+ wasm::WasmCode* wasm_code =
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
+ if (wasm_code != nullptr) {
+ // It is safe to skip Wasm runtime stubs as none of them contain local
+ // exception handlers.
+ CHECK_EQ(wasm::WasmCode::kRuntimeStub, wasm_code->kind());
+ CHECK_EQ(0, wasm_code->handler_table_offset());
+ break;
+ }
Code code = stub_frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table_offset() || !code->is_turbofanned()) {
+ !code->has_handler_table() || !code->is_turbofanned()) {
break;
}
@@ -1703,10 +1782,6 @@ Object Isolate::UnwindAndFindHandler() {
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
}
- WasmInterpreterEntryFrame* interpreter_frame =
- WasmInterpreterEntryFrame::cast(frame);
- // TODO(wasm): Implement try-catch in the interpreter.
- interpreter_frame->debug_info()->Unwind(frame->fp());
} break;
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
@@ -1830,7 +1905,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
case StackFrame::STUB: {
Handle<Code> code(frame->LookupCode(), this);
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table_offset() || !code->is_turbofanned()) {
+ !code->has_handler_table() || !code->is_turbofanned()) {
break;
}
@@ -1898,7 +1973,7 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
clear_scheduled_exception();
}
}
- if (reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr()) ==
+ if (reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr()) ==
handler->message_obj_) {
clear_pending_message();
}
@@ -2132,7 +2207,7 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
Object exception = pending_exception();
// Clear the pending message object early to avoid endless recursion.
- Object message_obj = thread_local_top_.pending_message_obj_;
+ Object message_obj = thread_local_top()->pending_message_obj_;
clear_pending_message();
// For uncatchable exceptions we do nothing. If needed, the exception and the
@@ -2212,27 +2287,27 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
auto PropagateToExternalHandler = [=]() {
if (IsHandledByJavaScript()) {
- thread_local_top_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
return false;
}
if (!IsHandledExternally()) {
- thread_local_top_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
return true;
}
- thread_local_top_.external_caught_exception_ = true;
+ thread_local_top()->external_caught_exception_ = true;
v8::TryCatch* handler = try_catch_handler();
- DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
- thread_local_top_.pending_message_obj_->IsTheHole(this));
+ DCHECK(thread_local_top()->pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top()->pending_message_obj_->IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
+ if (thread_local_top()->pending_message_obj_->IsTheHole(this)) return true;
handler->message_obj_ =
- reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr());
+ reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
return true;
};
@@ -2245,11 +2320,11 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
- if (thread_local_top_.pending_exception_ !=
+ if (thread_local_top()->pending_exception_ !=
ReadOnlyRoots(heap()).termination_exception() &&
- !thread_local_top_.pending_message_obj_->IsTheHole(this)) {
+ !thread_local_top()->pending_message_obj_->IsTheHole(this)) {
Handle<JSMessageObject> message_obj(
- JSMessageObject::cast(thread_local_top_.pending_message_obj_), this);
+ JSMessageObject::cast(thread_local_top()->pending_message_obj_), this);
Handle<Script> script(message_obj->script(), this);
int start_pos = message_obj->start_position();
int end_pos = message_obj->end_position();
@@ -2421,7 +2496,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
} else if (frame->type() == StackFrame::STUB) {
Code code = frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table_offset() || !code->is_turbofanned()) {
+ !code->has_handler_table() || !code->is_turbofanned()) {
continue;
}
catch_prediction = code->GetBuiltinCatchPrediction();
@@ -2589,7 +2664,6 @@ void Isolate::SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine) {
DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
wasm_engine_ = std::move(engine);
wasm_engine_->AddIsolate(this);
- wasm::WasmCodeManager::InstallSamplingGCCallback(this);
}
// NOLINTNEXTLINE
@@ -2627,27 +2701,17 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
class VerboseAccountingAllocator : public AccountingAllocator {
public:
- VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
- size_t pool_sample_bytes)
- : heap_(heap),
- last_memory_usage_(0),
- last_pool_size_(0),
- nesting_deepth_(0),
- allocation_sample_bytes_(allocation_sample_bytes),
- pool_sample_bytes_(pool_sample_bytes) {}
-
- v8::internal::Segment* GetSegment(size_t size) override {
- v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
- if (memory) {
- size_t malloced_current = GetCurrentMemoryUsage();
- size_t pooled_current = GetCurrentPoolSize();
-
- if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
- last_pool_size_ + pool_sample_bytes_ < pooled_current) {
- PrintMemoryJSON(malloced_current, pooled_current);
- last_memory_usage_ = malloced_current;
- last_pool_size_ = pooled_current;
- }
+ VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
+ : heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {}
+
+ v8::internal::Segment* AllocateSegment(size_t size) override {
+ v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
+ if (!memory) return nullptr;
+ size_t malloced_current = GetCurrentMemoryUsage();
+
+ if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
+ PrintMemoryJSON(malloced_current);
+ last_memory_usage_ = malloced_current;
}
return memory;
}
@@ -2655,13 +2719,10 @@ class VerboseAccountingAllocator : public AccountingAllocator {
void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::ReturnSegment(memory);
size_t malloced_current = GetCurrentMemoryUsage();
- size_t pooled_current = GetCurrentPoolSize();
- if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ ||
- pooled_current + pool_sample_bytes_ < last_pool_size_) {
- PrintMemoryJSON(malloced_current, pooled_current);
+ if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
+ PrintMemoryJSON(malloced_current);
last_memory_usage_ = malloced_current;
- last_pool_size_ = pooled_current;
}
}
@@ -2693,7 +2754,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
zone->allocation_size(), nesting_deepth_.load());
}
- void PrintMemoryJSON(size_t malloced, size_t pooled) {
+ void PrintMemoryJSON(size_t malloced) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init();
@@ -2702,17 +2763,14 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
- "\"allocated\": %" PRIuS
- ","
- "\"pooled\": %" PRIuS "}\n",
- reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
+ "\"allocated\": %" PRIuS "}\n",
+ reinterpret_cast<void*>(heap_->isolate()), time, malloced);
}
Heap* heap_;
- std::atomic<size_t> last_memory_usage_;
- std::atomic<size_t> last_pool_size_;
- std::atomic<size_t> nesting_deepth_;
- size_t allocation_sample_bytes_, pool_sample_bytes_;
+ std::atomic<size_t> last_memory_usage_{0};
+ std::atomic<size_t> nesting_deepth_{0};
+ size_t allocation_sample_bytes_;
};
#ifdef DEBUG
@@ -2749,7 +2807,7 @@ void Isolate::Delete(Isolate* isolate) {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = isolate->CurrentPerIsolateThreadData();
- DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
+ DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
Isolate* saved_isolate = reinterpret_cast<Isolate*>(
base::Thread::GetThreadLocal(isolate->isolate_key_));
SetIsolateThreadLocals(isolate, nullptr);
@@ -2778,11 +2836,11 @@ v8::PageAllocator* Isolate::page_allocator() {
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)),
- id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
+ id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
stack_guard_(this),
- allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
- &heap_, 256 * KB, 128 * KB)
- : new AccountingAllocator()),
+ allocator_(FLAG_trace_zone_stats
+ ? new VerboseAccountingAllocator(&heap_, 256 * KB)
+ : new AccountingAllocator()),
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
@@ -3025,7 +3083,7 @@ Isolate::~Isolate() {
default_microtask_queue_ = nullptr;
}
-void Isolate::InitializeThreadLocal() { thread_local_top_.Initialize(this); }
+void Isolate::InitializeThreadLocal() { thread_local_top()->Initialize(this); }
void Isolate::SetTerminationOnExternalTryCatch() {
if (try_catch_handler() == nullptr) return;
@@ -3039,30 +3097,30 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
Object exception = pending_exception();
if (IsJavaScriptHandlerOnTop(exception)) {
- thread_local_top_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
return false;
}
if (!IsExternalHandlerOnTop(exception)) {
- thread_local_top_.external_caught_exception_ = false;
+ thread_local_top()->external_caught_exception_ = false;
return true;
}
- thread_local_top_.external_caught_exception_ = true;
+ thread_local_top()->external_caught_exception_ = true;
if (!is_catchable_by_javascript(exception)) {
SetTerminationOnExternalTryCatch();
} else {
v8::TryCatch* handler = try_catch_handler();
- DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
- thread_local_top_.pending_message_obj_->IsTheHole(this));
+ DCHECK(thread_local_top()->pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top()->pending_message_obj_->IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
+ if (thread_local_top()->pending_message_obj_->IsTheHole(this)) return true;
handler->message_obj_ =
- reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr());
+ reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
}
return true;
}
@@ -3098,12 +3156,8 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
builtins->builtin_handle(i), instruction_start);
- // Note that references to the old, on-heap code objects may still exist on
- // the heap. This is fine for the sake of serialization, as serialization
- // will canonicalize all builtins in MaybeCanonicalizeBuiltin().
- //
- // From this point onwards, some builtin code objects may be unreachable and
- // thus collected by the GC.
+ // From this point onwards, the old builtin code object is unreachable and
+ // will be collected by the next GC.
builtins->set_builtin(i, *trampoline);
if (isolate->logger()->is_listening_to_code_events() ||
@@ -3113,6 +3167,14 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
}
}
+#ifdef DEBUG
+bool IsolateIsCompatibleWithEmbeddedBlob(Isolate* isolate) {
+ if (!FLAG_embedded_builtins) return true;
+ EmbeddedData d = EmbeddedData::FromBlob(isolate);
+ return (d.IsolateHash() == isolate->HashIsolateForEmbeddedBlob());
+}
+#endif // DEBUG
+
} // namespace
void Isolate::InitializeDefaultEmbeddedBlob() {
@@ -3186,11 +3248,24 @@ void Isolate::TearDownEmbeddedBlob() {
}
}
-bool Isolate::Init(StartupDeserializer* des) {
+bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr); }
+
+bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
+ StartupDeserializer* startup_deserializer) {
+ DCHECK_NOT_NULL(read_only_deserializer);
+ DCHECK_NOT_NULL(startup_deserializer);
+ return Init(read_only_deserializer, startup_deserializer);
+}
+
+bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
+ StartupDeserializer* startup_deserializer) {
TRACE_ISOLATE(init);
+ const bool create_heap_objects = (read_only_deserializer == nullptr);
+ // We either have both or neither.
+ DCHECK_EQ(create_heap_objects, startup_deserializer == nullptr);
base::ElapsedTimer timer;
- if (des == nullptr && FLAG_profile_deserialization) timer.Start();
+ if (create_heap_objects && FLAG_profile_deserialization) timer.Start();
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
@@ -3244,7 +3319,8 @@ bool Isolate::Init(StartupDeserializer* des) {
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
- heap_.SetUp();
+ auto* read_only_heap = ReadOnlyHeap::GetOrCreateReadOnlyHeap(&heap_);
+ heap_.SetUp(read_only_heap);
isolate_data_.external_reference_table()->Init(this);
@@ -3256,7 +3332,6 @@ bool Isolate::Init(StartupDeserializer* des) {
deoptimizer_data_ = new DeoptimizerData(heap());
- const bool create_heap_objects = (des == nullptr);
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}
@@ -3285,18 +3360,16 @@ bool Isolate::Init(StartupDeserializer* des) {
setup_delegate_->SetupBuiltins(this);
#ifndef V8_TARGET_ARCH_ARM
if (create_heap_objects) {
- // Create a copy of the the interpreter entry trampoline and store it
- // on the root list. It is used as a template for further copies that
- // may later be created to help profile interpreted code.
+ // Store the interpreter entry trampoline on the root list. It is used as a
+ // template for further copies that may later be created to help profile
+ // interpreted code.
// We currently cannot do this on arm due to RELATIVE_CODE_TARGETs
// assuming that all possible Code targets may be addressed with an int24
// offset, effectively limiting code space size to 32MB. We can guarantee
// this at mksnapshot-time, but not at runtime.
// See also: https://crbug.com/v8/8713.
- HandleScope handle_scope(this);
- Handle<Code> code =
- factory()->CopyCode(BUILTIN_CODE(this, InterpreterEntryTrampoline));
- heap_.SetInterpreterEntryTrampolineForProfiling(*code);
+ heap_.SetInterpreterEntryTrampolineForProfiling(
+ heap_.builtin(Builtins::kInterpreterEntryTrampoline));
}
#endif
if (FLAG_embedded_builtins && create_heap_objects) {
@@ -3330,7 +3403,10 @@ bool Isolate::Init(StartupDeserializer* des) {
AlwaysAllocateScope always_allocate(this);
CodeSpaceMemoryModificationScope modification_scope(&heap_);
- if (!create_heap_objects) des->DeserializeInto(this);
+ if (!create_heap_objects) {
+ read_only_heap->MaybeDeserialize(this, read_only_deserializer);
+ startup_deserializer->DeserializeInto(this);
+ }
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();
@@ -3342,6 +3418,27 @@ bool Isolate::Init(StartupDeserializer* des) {
// Initialize the builtin entry table.
Builtins::UpdateBuiltinEntryTable(this);
+#ifdef DEBUG
+ // Verify that the current heap state (usually deserialized from the snapshot)
+ // is compatible with the embedded blob. If this DCHECK fails, we've likely
+ // loaded a snapshot generated by a different V8 version or build-time
+ // configuration.
+ if (!IsolateIsCompatibleWithEmbeddedBlob(this)) {
+ FATAL(
+ "The Isolate is incompatible with the embedded blob. This is usually "
+ "caused by incorrect usage of mksnapshot. When generating custom "
+ "snapshots, embedders must ensure they pass the same flags as during "
+ "the V8 build process (e.g.: --turbo-instruction-scheduling).");
+ }
+ DCHECK_IMPLIES(FLAG_jitless, FLAG_embedded_builtins);
+#endif // DEBUG
+
+#ifndef V8_TARGET_ARCH_ARM
+ // The IET for profiling should always be a full on-heap Code object.
+ DCHECK(!Code::cast(heap_.interpreter_entry_trampoline_for_profiling())
+ ->is_off_heap_trampoline());
+#endif // V8_TARGET_ARCH_ARM
+
if (FLAG_print_builtin_code) builtins()->PrintBuiltinCode();
if (FLAG_print_builtin_size) builtins()->PrintBuiltinSize();
@@ -3365,10 +3462,10 @@ bool Isolate::Init(StartupDeserializer* des) {
{
HandleScope scope(this);
- ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());
+ ast_string_constants_ = new AstStringConstants(this, HashSeed(this));
}
- initialized_from_snapshot_ = (des != nullptr);
+ initialized_from_snapshot_ = !create_heap_objects;
if (!FLAG_inline_new) heap_.DisableInlineAllocation();
@@ -3381,7 +3478,7 @@ bool Isolate::Init(StartupDeserializer* des) {
sampling_flags);
}
- if (des == nullptr && FLAG_profile_deserialization) {
+ if (create_heap_objects && FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
@@ -3389,7 +3486,6 @@ bool Isolate::Init(StartupDeserializer* des) {
return true;
}
-
void Isolate::Enter() {
Isolate* current_isolate = nullptr;
PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
@@ -4027,17 +4123,16 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
call_completed_callbacks_.erase(pos);
}
-void Isolate::FireCallCompletedCallback() {
+void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
if (!handle_scope_implementer()->CallDepthIsZero()) return;
bool run_microtasks =
- default_microtask_queue()->size() &&
- !default_microtask_queue()->HasMicrotasksSuppressions() &&
- handle_scope_implementer()->microtasks_policy() ==
- v8::MicrotasksPolicy::kAuto;
+ microtask_queue && microtask_queue->size() &&
+ !microtask_queue->HasMicrotasksSuppressions() &&
+ microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kAuto;
if (run_microtasks) {
- default_microtask_queue()->RunMicrotasks(this);
+ microtask_queue->RunMicrotasks(this);
} else {
// TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
// set is still open (whether to clear it after every microtask or once
@@ -4407,12 +4502,12 @@ double Isolate::LoadStartTimeMs() {
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
- RAILMode old_rail_mode = rail_mode_.Value();
+ RAILMode old_rail_mode = rail_mode_.load();
if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
base::MutexGuard guard(&rail_mutex_);
load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
}
- rail_mode_.SetValue(rail_mode);
+ rail_mode_.store(rail_mode);
if (old_rail_mode == PERFORMANCE_LOAD && rail_mode != PERFORMANCE_LOAD) {
heap()->incremental_marking()->incremental_marking_job()->ScheduleTask(
heap());
@@ -4478,25 +4573,28 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
return GetCurrentStackPosition() - gap < stack_guard->real_climit();
}
-SaveContext::SaveContext(Isolate* isolate)
- : isolate_(isolate), prev_(isolate->save_context()) {
+SaveContext::SaveContext(Isolate* isolate) : isolate_(isolate) {
if (!isolate->context().is_null()) {
context_ = Handle<Context>(isolate->context(), isolate);
}
- isolate->set_save_context(this);
c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
}
SaveContext::~SaveContext() {
isolate_->set_context(context_.is_null() ? Context() : *context_);
- isolate_->set_save_context(prev_);
}
bool SaveContext::IsBelowFrame(StandardFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
+SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
+ Context new_context)
+ : SaveContext(isolate) {
+ isolate->set_context(new_context);
+}
+
#ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 3bf6f3aa85..765089a690 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -16,11 +16,10 @@
#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/allocation.h"
-#include "src/base/atomicops.h"
#include "src/base/macros.h"
#include "src/builtins/builtins.h"
#include "src/contexts.h"
-#include "src/debug/debug-interface.h"
+#include "src/debug/interface-types.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
#include "src/globals.h"
@@ -33,7 +32,6 @@
#include "src/objects/code.h"
#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
-#include "src/thread-id.h"
#include "src/unicode.h"
#ifdef V8_INTL_SUPPORT
@@ -51,6 +49,7 @@ class RandomNumberGenerator;
namespace debug {
class ConsoleDelegate;
+class AsyncEventDelegate;
}
namespace internal {
@@ -76,7 +75,6 @@ class DeoptimizerData;
class DescriptorLookupCache;
class EmbeddedFileWriterInterface;
class EternalHandles;
-class ExternalCallbackScope;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
@@ -86,11 +84,10 @@ class MaterializedObjectStore;
class Microtask;
class MicrotaskQueue;
class OptimizingCompileDispatcher;
-class PromiseOnStack;
+class ReadOnlyDeserializer;
class RegExpStack;
class RootVisitor;
class RuntimeProfiler;
-class SaveContext;
class SetupIsolateDelegate;
class Simulator;
class StartupDeserializer;
@@ -341,102 +338,6 @@ class WasmEngine;
V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
-class ThreadLocalTop {
- public:
- // Does early low-level initialization that does not depend on the
- // isolate being present.
- ThreadLocalTop() = default;
-
- // Initialize the thread data.
- void Initialize(Isolate*);
-
- // Get the top C++ try catch handler or nullptr if none are registered.
- //
- // This method is not guaranteed to return an address that can be
- // used for comparison with addresses into the JS stack. If such an
- // address is needed, use try_catch_handler_address.
- FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
-
- // Get the address of the top C++ try catch handler or nullptr if
- // none are registered.
- //
- // This method always returns an address that can be compared to
- // pointers into the JavaScript stack. When running on actual
- // hardware, try_catch_handler_address and TryCatchHandler return
- // the same pointer. When running on a simulator with a separate JS
- // stack, try_catch_handler_address returns a JS stack address that
- // corresponds to the place on the JS stack where the C++ handler
- // would have been if the stack were not separate.
- Address try_catch_handler_address() {
- return reinterpret_cast<Address>(
- v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
- }
-
- void Free();
-
- Isolate* isolate_ = nullptr;
- // The context where the current execution method is created and for variable
- // lookups.
- // TODO(3770): This field is read/written from generated code, so it would
- // be cleaner to make it an "Address raw_context_", and construct a Context
- // object in the getter. Same for {pending_handler_context_} below. In the
- // meantime, assert that the memory layout is the same.
- STATIC_ASSERT(sizeof(Context) == kSystemPointerSize);
- Context context_;
- ThreadId thread_id_ = ThreadId::Invalid();
- Object pending_exception_;
-
- // Communication channel between Isolate::FindHandler and the CEntry.
- Context pending_handler_context_;
- Address pending_handler_entrypoint_ = kNullAddress;
- Address pending_handler_constant_pool_ = kNullAddress;
- Address pending_handler_fp_ = kNullAddress;
- Address pending_handler_sp_ = kNullAddress;
-
- // Communication channel between Isolate::Throw and message consumers.
- bool rethrowing_message_ = false;
- Object pending_message_obj_;
-
- // Use a separate value for scheduled exceptions to preserve the
- // invariants that hold about pending_exception. We may want to
- // unify them later.
- Object scheduled_exception_;
- bool external_caught_exception_ = false;
- SaveContext* save_context_ = nullptr;
-
- // Stack.
- // The frame pointer of the top c entry frame.
- Address c_entry_fp_ = kNullAddress;
- // Try-blocks are chained through the stack.
- Address handler_ = kNullAddress;
- // C function that was called at c entry.
- Address c_function_ = kNullAddress;
-
- // Throwing an exception may cause a Promise rejection. For this purpose
- // we keep track of a stack of nested promises and the corresponding
- // try-catch handlers.
- PromiseOnStack* promise_on_stack_ = nullptr;
-
-#ifdef USE_SIMULATOR
- Simulator* simulator_ = nullptr;
-#endif
-
- // The stack pointer of the bottom JS entry frame.
- Address js_entry_sp_ = kNullAddress;
- // The external callback we're currently in.
- ExternalCallbackScope* external_callback_scope_ = nullptr;
- StateTag current_vm_state_ = EXTERNAL;
-
- // Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
-
- // Address of the thread-local "thread in wasm" flag.
- Address thread_in_wasm_flag_address_ = kNullAddress;
-
- private:
- v8::TryCatch* try_catch_handler_ = nullptr;
-};
-
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
@@ -457,58 +358,57 @@ class ThreadLocalTop {
V(int, suffix_table, (kBMMaxShift + 1)) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef std::vector<HeapObject> DebugObjectCache;
-
-#define ISOLATE_INIT_LIST(V) \
- /* Assembler state. */ \
- V(FatalErrorCallback, exception_behavior, nullptr) \
- V(OOMErrorCallback, oom_behavior, nullptr) \
- V(LogEventCallback, event_logger, nullptr) \
- V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
- V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
- V(ExtensionCallback, wasm_module_callback, &NoExtension) \
- V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
- V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
- V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
- V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
- /* State for Relocatable. */ \
- V(Relocatable*, relocatable_top, nullptr) \
- V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
- V(Object, string_stream_current_security_token, Object()) \
- V(const intptr_t*, api_external_references, nullptr) \
- V(AddressToIndexHashMap*, external_reference_map, nullptr) \
- V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
- V(MicrotaskQueue*, default_microtask_queue, nullptr) \
- V(CompilationStatistics*, turbo_statistics, nullptr) \
- V(CodeTracer*, code_tracer, nullptr) \
- V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
- V(PromiseRejectCallback, promise_reject_callback, nullptr) \
- V(const v8::StartupData*, snapshot_blob, nullptr) \
- V(int, code_and_metadata_size, 0) \
- V(int, bytecode_and_metadata_size, 0) \
- V(int, external_script_source_size, 0) \
- /* true if being profiled. Causes collection of extra compile info. */ \
- V(bool, is_profiling, false) \
- /* true if a trace is being formatted through Error.prepareStackTrace. */ \
- V(bool, formatting_stack_trace, false) \
- /* Perform side effect checks on function call and API callbacks. */ \
- V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
- /* Current code coverage mode */ \
- V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
- V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
- V(int, last_stack_frame_info_id, 0) \
- V(int, last_console_context_id, 0) \
- V(v8_inspector::V8Inspector*, inspector, nullptr) \
- V(bool, next_v8_call_is_safe_for_termination, false) \
- V(bool, only_terminate_in_safe_scope, false) \
+using DebugObjectCache = std::vector<Handle<HeapObject>>;
+
+#define ISOLATE_INIT_LIST(V) \
+ /* Assembler state. */ \
+ V(FatalErrorCallback, exception_behavior, nullptr) \
+ V(OOMErrorCallback, oom_behavior, nullptr) \
+ V(LogEventCallback, event_logger, nullptr) \
+ V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
+ V(ExtensionCallback, wasm_module_callback, &NoExtension) \
+ V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
+ V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
+ V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
+ /* State for Relocatable. */ \
+ V(Relocatable*, relocatable_top, nullptr) \
+ V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
+ V(Object, string_stream_current_security_token, Object()) \
+ V(const intptr_t*, api_external_references, nullptr) \
+ V(AddressToIndexHashMap*, external_reference_map, nullptr) \
+ V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
+ V(MicrotaskQueue*, default_microtask_queue, nullptr) \
+ V(CompilationStatistics*, turbo_statistics, nullptr) \
+ V(CodeTracer*, code_tracer, nullptr) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ V(PromiseRejectCallback, promise_reject_callback, nullptr) \
+ V(const v8::StartupData*, snapshot_blob, nullptr) \
+ V(int, code_and_metadata_size, 0) \
+ V(int, bytecode_and_metadata_size, 0) \
+ V(int, external_script_source_size, 0) \
+ /* true if being profiled. Causes collection of extra compile info. */ \
+ V(bool, is_profiling, false) \
+ /* true if a trace is being formatted through Error.prepareStackTrace. */ \
+ V(bool, formatting_stack_trace, false) \
+ /* Perform side effect checks on function call and API callbacks. */ \
+ V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
+ /* Current code coverage mode */ \
+ V(debug::CoverageMode, code_coverage_mode, debug::CoverageMode::kBestEffort) \
+ V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
+ V(int, last_stack_frame_info_id, 0) \
+ V(int, last_console_context_id, 0) \
+ V(v8_inspector::V8Inspector*, inspector, nullptr) \
+ V(bool, next_v8_call_is_safe_for_termination, false) \
+ V(bool, only_terminate_in_safe_scope, false) \
V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)
-#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
- inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
- inline type name() const { return thread_local_top_.name##_; }
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
+ inline type name() const { return thread_local_top()->name##_; }
#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
- type* name##_address() { return &thread_local_top_.name##_; }
+ type* name##_address() { return &thread_local_top()->name##_; }
// HiddenFactory exists so Isolate can privately inherit from it without making
// Factory's members available to Isolate directly.
@@ -599,7 +499,7 @@ class Isolate final : private HiddenFactory {
// Returns the isolate inside which the current thread is running or nullptr.
V8_INLINE static Isolate* TryGetCurrent() {
- DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
+ DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
return reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
}
@@ -611,11 +511,6 @@ class Isolate final : private HiddenFactory {
return isolate;
}
- // Get the isolate that the given HeapObject lives in, returning true on
- // success. If the object is not writable (i.e. lives in read-only space),
- // return false.
- inline static bool FromWritableHeapObject(HeapObject obj, Isolate** isolate);
-
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
// isolate.
@@ -624,7 +519,9 @@ class Isolate final : private HiddenFactory {
void InitializeLoggingAndCounters();
bool InitializeCounters(); // Returns false if already initialized.
- bool Init(StartupDeserializer* des);
+ bool InitWithoutSnapshot();
+ bool InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
+ StartupDeserializer* startup_deserializer);
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != nullptr; }
@@ -668,11 +565,9 @@ class Isolate final : private HiddenFactory {
Address get_address_from_id(IsolateAddressId id);
// Access to top context (where the current function object was created).
- Context context() { return thread_local_top_.context_; }
+ Context context() { return thread_local_top()->context_; }
inline void set_context(Context context);
- Context* context_address() { return &thread_local_top_.context_; }
-
- THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
+ Context* context_address() { return &thread_local_top()->context_; }
// Access to current thread id.
THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
@@ -697,17 +592,17 @@ class Isolate final : private HiddenFactory {
THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
v8::TryCatch* try_catch_handler() {
- return thread_local_top_.try_catch_handler();
+ return thread_local_top()->try_catch_handler_;
}
bool* external_caught_exception_address() {
- return &thread_local_top_.external_caught_exception_;
+ return &thread_local_top()->external_caught_exception_;
}
THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
inline void clear_pending_message();
Address pending_message_obj_address() {
- return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
+ return reinterpret_cast<Address>(&thread_local_top()->pending_message_obj_);
}
inline Object scheduled_exception();
@@ -724,22 +619,20 @@ class Isolate final : private HiddenFactory {
return thread->c_entry_fp_;
}
static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
- Address c_function() { return thread_local_top_.c_function_; }
+ Address c_function() { return thread_local_top()->c_function_; }
inline Address* c_entry_fp_address() {
- return &thread_local_top_.c_entry_fp_;
+ return &thread_local_top()->c_entry_fp_;
}
- inline Address* handler_address() { return &thread_local_top_.handler_; }
+ inline Address* handler_address() { return &thread_local_top()->handler_; }
inline Address* c_function_address() {
- return &thread_local_top_.c_function_;
+ return &thread_local_top()->c_function_;
}
// Bottom JS entry.
- Address js_entry_sp() {
- return thread_local_top_.js_entry_sp_;
- }
+ Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
inline Address* js_entry_sp_address() {
- return &thread_local_top_.js_entry_sp_;
+ return &thread_local_top()->js_entry_sp_;
}
// Returns the global object of the current context. It could be
@@ -750,7 +643,7 @@ class Isolate final : private HiddenFactory {
inline Handle<JSObject> global_proxy();
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
- void FreeThreadResources() { thread_local_top_.Free(); }
+ void FreeThreadResources() { thread_local_top()->Free(); }
// This method is called by the api after operations that may throw
// exceptions. If an exception was thrown and not handled by an external
@@ -975,6 +868,10 @@ class Isolate final : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+ static Isolate* FromHeap(Heap* heap) {
+ return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
+ OFFSET_OF(Isolate, heap_));
+ }
const IsolateData* isolate_data() const { return &isolate_data_; }
IsolateData* isolate_data() { return &isolate_data_; }
@@ -1022,7 +919,12 @@ class Isolate final : private HiddenFactory {
void set_deoptimizer_lazy_throw(bool value) {
deoptimizer_lazy_throw_ = value;
}
- ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
+ ThreadLocalTop* thread_local_top() {
+ return &isolate_data_.thread_local_top_;
+ }
+ ThreadLocalTop const* thread_local_top() const {
+ return &isolate_data_.thread_local_top_;
+ }
static uint32_t thread_in_wasm_flag_address_offset() {
// For WebAssembly trap handlers there is a flag in thread-local storage
@@ -1031,7 +933,7 @@ class Isolate final : private HiddenFactory {
// flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
// here returns the offset of that member from {isolate_root()}.
return static_cast<uint32_t>(
- OFFSET_OF(Isolate, thread_local_top_.thread_in_wasm_flag_address_) -
+ OFFSET_OF(Isolate, thread_local_top()->thread_in_wasm_flag_address_) -
isolate_root_bias());
}
@@ -1149,23 +1051,23 @@ class Isolate final : private HiddenFactory {
bool NeedsDetailedOptimizedCodeLineInfo() const;
bool is_best_effort_code_coverage() const {
- return code_coverage_mode() == debug::Coverage::kBestEffort;
+ return code_coverage_mode() == debug::CoverageMode::kBestEffort;
}
bool is_precise_count_code_coverage() const {
- return code_coverage_mode() == debug::Coverage::kPreciseCount;
+ return code_coverage_mode() == debug::CoverageMode::kPreciseCount;
}
bool is_precise_binary_code_coverage() const {
- return code_coverage_mode() == debug::Coverage::kPreciseBinary;
+ return code_coverage_mode() == debug::CoverageMode::kPreciseBinary;
}
bool is_block_count_code_coverage() const {
- return code_coverage_mode() == debug::Coverage::kBlockCount;
+ return code_coverage_mode() == debug::CoverageMode::kBlockCount;
}
bool is_block_binary_code_coverage() const {
- return code_coverage_mode() == debug::Coverage::kBlockBinary;
+ return code_coverage_mode() == debug::CoverageMode::kBlockBinary;
}
bool is_block_code_coverage() const {
@@ -1173,7 +1075,7 @@ class Isolate final : private HiddenFactory {
}
bool is_collecting_type_profile() const {
- return type_profile_mode() == debug::TypeProfile::kCollect;
+ return type_profile_mode() == debug::TypeProfileMode::kCollect;
}
// Collect feedback vectors with data for code coverage or type profile.
@@ -1199,6 +1101,8 @@ class Isolate final : private HiddenFactory {
const std::string& default_locale() { return default_locale_; }
+ void ResetDefaultLocale() { default_locale_.clear(); }
+
void set_default_locale(const std::string& locale) {
DCHECK_EQ(default_locale_.length(), 0);
default_locale_ = locale;
@@ -1348,7 +1252,7 @@ class Isolate final : private HiddenFactory {
// compile dispatcher's queue.
void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
- int id() const { return static_cast<int>(id_); }
+ int id() const { return id_; }
CompilationStatistics* GetTurboStatistics();
CodeTracer* GetCodeTracer();
@@ -1389,7 +1293,7 @@ class Isolate final : private HiddenFactory {
size_t heap_limit);
void AddCallCompletedCallback(CallCompletedCallback callback);
void RemoveCallCompletedCallback(CallCompletedCallback callback);
- void FireCallCompletedCallback();
+ void FireCallCompletedCallback(MicrotaskQueue* microtask_queue);
void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
@@ -1450,18 +1354,13 @@ class Isolate final : private HiddenFactory {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
- std::vector<Object>* read_only_object_cache() {
- return &read_only_object_cache_;
- }
-
std::vector<Object>* partial_snapshot_cache() {
return &partial_snapshot_cache_;
}
// Off-heap builtins cannot embed constants within the code object itself,
// and thus need to load them from the root list.
- // TODO(jgruber): Rename to IsGeneratingEmbeddedBuiltins().
- bool ShouldLoadConstantsFromRootList() const {
+ bool IsGeneratingEmbeddedBuiltins() const {
return FLAG_embedded_builtins &&
builtins_constants_table_builder() != nullptr;
}
@@ -1470,6 +1369,11 @@ class Isolate final : private HiddenFactory {
return builtins_constants_table_builder_;
}
+ // Hashes bits of the Isolate that are relevant for embedded builtins. In
+ // particular, the embedded blob requires builtin Code object layout and the
+ // builtins constants table to remain unchanged from build-time.
+ size_t HashIsolateForEmbeddedBlob();
+
static const uint8_t* CurrentEmbeddedBlob();
static uint32_t CurrentEmbeddedBlobSize();
static bool CurrentEmbeddedBlobIsBinaryEmbedded();
@@ -1546,7 +1450,7 @@ class Isolate final : private HiddenFactory {
void SetRAILMode(RAILMode rail_mode);
- RAILMode rail_mode() { return rail_mode_.Value(); }
+ RAILMode rail_mode() { return rail_mode_.load(); }
double LoadStartTimeMs();
@@ -1595,6 +1499,9 @@ class Isolate final : private HiddenFactory {
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
+ bool Init(ReadOnlyDeserializer* read_only_deserializer,
+ StartupDeserializer* startup_deserializer);
+
void CheckIsolateLayout();
class ThreadDataTable {
@@ -1644,11 +1551,8 @@ class Isolate final : private HiddenFactory {
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey isolate_key_;
- // A global counter for all generated Isolates, might overflow.
- static base::Atomic32 isolate_counter_;
-
-#if DEBUG
- static base::Atomic32 isolate_key_created_;
+#ifdef DEBUG
+ static std::atomic<bool> isolate_key_created_;
#endif
void Deinit();
@@ -1695,7 +1599,7 @@ class Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
- base::Atomic32 id_;
+ const int id_;
EntryStackItem* entry_stack_ = nullptr;
int stack_trace_nesting_level_ = 0;
StringStream* incomplete_message_ = nullptr;
@@ -1712,7 +1616,6 @@ class Isolate final : private HiddenFactory {
DeoptimizerData* deoptimizer_data_ = nullptr;
bool deoptimizer_lazy_throw_ = false;
MaterializedObjectStore* materialized_object_store_ = nullptr;
- ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_ = false;
int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
@@ -1738,7 +1641,7 @@ class Isolate final : private HiddenFactory {
DateCache* date_cache_ = nullptr;
base::RandomNumberGenerator* random_number_generator_ = nullptr;
base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
- base::AtomicValue<RAILMode> rail_mode_;
+ std::atomic<RAILMode> rail_mode_;
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
@@ -1852,7 +1755,6 @@ class Isolate final : private HiddenFactory {
v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
- std::vector<Object> read_only_object_cache_;
std::vector<Object> partial_snapshot_cache_;
// Used during builtins compilation to build the builtins constants table,
@@ -1924,11 +1826,9 @@ class Isolate final : private HiddenFactory {
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
-
#undef FIELD_ACCESSOR
#undef THREAD_LOCAL_TOP_ACCESSOR
-
class PromiseOnStack {
public:
PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
@@ -1941,17 +1841,15 @@ class PromiseOnStack {
PromiseOnStack* prev_;
};
-
-// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
-// class as a work around for a bug in the generated code found with these
-// versions of GCC. See V8 issue 122 for details.
+// SaveContext scopes save the current context on the Isolate on creation, and
+// restore it on destruction.
class V8_EXPORT_PRIVATE SaveContext {
public:
explicit SaveContext(Isolate* isolate);
+
~SaveContext();
Handle<Context> context() { return context_; }
- SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
bool IsBelowFrame(StandardFrame* frame);
@@ -1959,10 +1857,16 @@ class V8_EXPORT_PRIVATE SaveContext {
private:
Isolate* const isolate_;
Handle<Context> context_;
- SaveContext* const prev_;
Address c_entry_fp_;
};
+// Like SaveContext, but also switches the Context to a new one in the
+// constructor.
+class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
+ public:
+ SaveAndSwitchContext(Isolate* isolate, Context new_context);
+};
+
class AssertNoContextChange {
#ifdef DEBUG
public:
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 7b0f757b1d..f62cdba880 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -8,6 +8,8 @@
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/field-type.h"
+#include "src/hash-seed-inl.h"
+#include "src/heap/heap-inl.h" // For string_table().
#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -126,7 +128,7 @@ bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
desc.set_enumerable(true);
desc.set_writable(true);
change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
- kDontThrow);
+ Just(kDontThrow));
}
MAYBE_RETURN(change_result, false);
return true;
@@ -839,8 +841,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
int position = position_;
uc32 c0 = c0_;
- uint32_t running_hash =
- static_cast<uint32_t>(isolate()->heap()->HashSeed());
+ uint32_t running_hash = static_cast<uint32_t>(HashSeed(isolate()));
uint32_t index = 0;
bool is_array_index = true;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 67ad58c206..078399b85d 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -157,6 +157,10 @@ class JsonParser {
ZoneVector<Handle<Object>> properties_;
};
+// Explicit instantiation declarations.
+extern template class JsonParser<true>;
+extern template class JsonParser<false>;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index 1d3858f351..271440ef07 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -109,6 +109,15 @@ class JsonStringifier {
Result StackPush(Handle<Object> object, Handle<Object> key);
void StackPop();
+ // Uses the current stack_ to provide a detailed error message of
+ // the objects involved in the circular structure.
+ Handle<String> ConstructCircularStructureErrorMessage(Handle<Object> last_key,
+ size_t start_index);
+ // The prefix and postfix count do NOT include the starting and
+ // closing lines of the error message.
+ static const int kCircularErrorMessagePrefixCount = 2;
+ static const int kCircularErrorMessagePostfixCount = 1;
+
Factory* factory() { return isolate_->factory(); }
Isolate* isolate_;
@@ -373,11 +382,13 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object,
{
DisallowHeapAllocation no_allocation;
- for (const KeyObject& key_object : stack_) {
- if (*key_object.second == *object) {
+ for (size_t i = 0; i < stack_.size(); ++i) {
+ if (*stack_[i].second == *object) {
AllowHeapAllocation allow_to_return_error;
- Handle<Object> error =
- factory()->NewTypeError(MessageTemplate::kCircularStructure);
+ Handle<String> circle_description =
+ ConstructCircularStructureErrorMessage(key, i);
+ Handle<Object> error = factory()->NewTypeError(
+ MessageTemplate::kCircularStructure, circle_description);
isolate_->Throw(*error);
return EXCEPTION;
}
@@ -389,6 +400,117 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object,
void JsonStringifier::StackPop() { stack_.pop_back(); }
+class CircularStructureMessageBuilder {
+ public:
+ explicit CircularStructureMessageBuilder(Isolate* isolate)
+ : builder_(isolate) {}
+
+ void AppendStartLine(Handle<Object> start_object) {
+ builder_.AppendCString(kStartPrefix);
+ builder_.AppendCString("starting at object with constructor ");
+ AppendConstructorName(start_object);
+ }
+
+ void AppendNormalLine(Handle<Object> key, Handle<Object> object) {
+ builder_.AppendCString(kLinePrefix);
+ AppendKey(key);
+ builder_.AppendCString(" -> object with constructor ");
+ AppendConstructorName(object);
+ }
+
+ void AppendClosingLine(Handle<Object> closing_key) {
+ builder_.AppendCString(kEndPrefix);
+ AppendKey(closing_key);
+ builder_.AppendCString(" closes the circle");
+ }
+
+ void AppendEllipsis() {
+ builder_.AppendCString(kLinePrefix);
+ builder_.AppendCString("...");
+ }
+
+ MaybeHandle<String> Finish() { return builder_.Finish(); }
+
+ private:
+ void AppendConstructorName(Handle<Object> object) {
+ builder_.AppendCharacter('\'');
+ Handle<String> constructor_name =
+ JSReceiver::GetConstructorName(Handle<JSReceiver>::cast(object));
+ builder_.AppendString(constructor_name);
+ builder_.AppendCharacter('\'');
+ }
+
+ // A key can either be a string, the empty string or a Smi.
+ void AppendKey(Handle<Object> key) {
+ if (key->IsSmi()) {
+ builder_.AppendCString("index ");
+ AppendSmi(Smi::cast(*key));
+ return;
+ }
+
+ CHECK(key->IsString());
+ Handle<String> key_as_string = Handle<String>::cast(key);
+ if (key_as_string->length() == 0) {
+ builder_.AppendCString("<anonymous>");
+ } else {
+ builder_.AppendCString("property '");
+ builder_.AppendString(key_as_string);
+ builder_.AppendCharacter('\'');
+ }
+ }
+
+ void AppendSmi(Smi smi) {
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ builder_.AppendCString(IntToCString(smi->value(), buffer));
+ }
+
+ IncrementalStringBuilder builder_;
+ static constexpr const char* kStartPrefix = "\n --> ";
+ static constexpr const char* kEndPrefix = "\n --- ";
+ static constexpr const char* kLinePrefix = "\n | ";
+};
+
+Handle<String> JsonStringifier::ConstructCircularStructureErrorMessage(
+ Handle<Object> last_key, size_t start_index) {
+ DCHECK(start_index < stack_.size());
+ CircularStructureMessageBuilder builder(isolate_);
+
+ // We track the index to be printed next for better readability.
+ size_t index = start_index;
+ const size_t stack_size = stack_.size();
+
+ builder.AppendStartLine(stack_[index++].second);
+
+ // Append a maximum of kCircularErrorMessagePrefixCount normal lines.
+ const size_t prefix_end =
+ std::min(stack_size, index + kCircularErrorMessagePrefixCount);
+ for (; index < prefix_end; ++index) {
+ builder.AppendNormalLine(stack_[index].first, stack_[index].second);
+ }
+
+ // If the circle consists of too many objects, we skip them and just
+ // print an ellipsis.
+ if (stack_size > index + kCircularErrorMessagePostfixCount) {
+ builder.AppendEllipsis();
+ }
+
+ // Since we calculate the postfix lines from the back of the stack,
+ // we have to ensure that lines are not printed twice.
+ index = std::max(index, stack_size - kCircularErrorMessagePostfixCount);
+ for (; index < stack_size; ++index) {
+ builder.AppendNormalLine(stack_[index].first, stack_[index].second);
+ }
+
+ builder.AppendClosingLine(last_key);
+
+ Handle<String> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, result, builder.Finish(),
+ factory()->empty_string());
+ return result;
+}
+
template <bool deferred_string_key>
JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
bool comma,
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index cc11ac09bf..99fa2be414 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -6,6 +6,7 @@
#include "src/api-arguments-inl.h"
#include "src/elements-inl.h"
+#include "src/field-index-inl.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/identity-map.h"
@@ -14,6 +15,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
@@ -67,12 +69,17 @@ void KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
}
void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
- if (key->IsSymbol()) {
+ if (filter_ == PRIVATE_NAMES_ONLY) {
+ if (!key->IsSymbol()) return;
+ if (!Symbol::cast(*key)->is_private_name()) return;
+ } else if (key->IsSymbol()) {
if (filter_ & SKIP_SYMBOLS) return;
- if (Handle<Symbol>::cast(key)->is_private()) return;
+
+ if (Symbol::cast(*key)->is_private()) return;
} else if (filter_ & SKIP_STRINGS) {
return;
}
+
if (IsShadowed(key)) return;
if (keys_.is_null()) {
keys_ = OrderedHashSet::Allocate(isolate_, 16);
@@ -489,7 +496,7 @@ void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
// args are invalid after args.Call(), create a new one in every iteration.
PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
- *receiver, *object, kDontThrow);
+ *receiver, *object, Just(kDontThrow));
Handle<Object> element = accessor->Get(result, i);
Handle<Object> attributes;
@@ -521,7 +528,7 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
IndexedOrNamed type) {
Isolate* isolate = accumulator->isolate();
PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
- *object, kDontThrow);
+ *object, Just(kDontThrow));
Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined(isolate)) {
@@ -709,6 +716,23 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
return CollectInterceptorKeys(receiver, object, this, kNamed);
}
+void KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ if (object->HasFastProperties()) {
+ int limit = object->map()->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+ isolate_);
+ CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
+ } else if (object->IsJSGlobalObject()) {
+ GlobalDictionary::CollectKeysTo(
+ handle(JSGlobalObject::cast(*object)->global_dictionary(), isolate_),
+ this);
+ } else {
+ NameDictionary::CollectKeysTo(
+ handle(object->property_dictionary(), isolate_), this);
+ }
+}
+
Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
Handle<JSObject> object) {
@@ -763,6 +787,11 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
}
filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
}
+ if (filter_ & PRIVATE_NAMES_ONLY) {
+ CollectPrivateNames(receiver, object);
+ return Just(true);
+ }
+
MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>());
return Just(true);
@@ -801,11 +830,17 @@ class NameComparator {
} // namespace
-// ES6 9.5.12
+// ES6 #sec-proxy-object-internal-methods-and-internal-slots-ownpropertykeys
// Returns |true| on success, |nothing| in case of exception.
Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy) {
STACK_CHECK(isolate_, Nothing<bool>());
+ if (filter_ == PRIVATE_NAMES_ONLY) {
+ NameDictionary::CollectKeysTo(
+ handle(proxy->property_dictionary(), isolate_), this);
+ return Just(true);
+ }
+
// 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
Handle<Object> handler(proxy->handler(), isolate_);
// 2. If handler is null, throw a TypeError exception.
@@ -843,73 +878,81 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Object::CreateListFromArrayLike(isolate_, trap_result_array,
ElementTypes::kStringAndSymbol),
Nothing<bool>());
- // 9. Let extensibleTarget be ? IsExtensible(target).
+ // 9. If trapResult contains any duplicate entries, throw a TypeError
+ // exception. Combine with step 18
+ // 18. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+ Zone set_zone(isolate_->allocator(), ZONE_NAME);
+ ZoneAllocationPolicy alloc(&set_zone);
+ const int kPresent = 1;
+ const int kGone = 0;
+ base::TemplateHashMapImpl<Handle<Name>, int, NameComparator,
+ ZoneAllocationPolicy>
+ unchecked_result_keys(ZoneHashMap::kDefaultHashMapCapacity,
+ NameComparator(isolate_), alloc);
+ int unchecked_result_keys_size = 0;
+ for (int i = 0; i < trap_result->length(); ++i) {
+ Handle<Name> key(Name::cast(trap_result->get(i)), isolate_);
+ auto entry = unchecked_result_keys.LookupOrInsert(key, key->Hash(), alloc);
+ if (entry->value != kPresent) {
+ entry->value = kPresent;
+ unchecked_result_keys_size++;
+ } else {
+ // found dupes, throw exception
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysDuplicateEntries));
+ return Nothing<bool>();
+ }
+ }
+ // 10. Let extensibleTarget be ? IsExtensible(target).
Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
MAYBE_RETURN(maybe_extensible, Nothing<bool>());
bool extensible_target = maybe_extensible.FromJust();
- // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+ // 11. Let targetKeys be ? target.[[OwnPropertyKeys]]().
Handle<FixedArray> target_keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, target_keys,
JSReceiver::OwnPropertyKeys(target),
Nothing<bool>());
- // 11. (Assert)
- // 12. Let targetConfigurableKeys be an empty List.
+ // 12, 13. (Assert)
+ // 14. Let targetConfigurableKeys be an empty List.
// To save memory, we're re-using target_keys and will modify it in-place.
Handle<FixedArray> target_configurable_keys = target_keys;
- // 13. Let targetNonconfigurableKeys be an empty List.
+ // 15. Let targetNonconfigurableKeys be an empty List.
Handle<FixedArray> target_nonconfigurable_keys =
isolate_->factory()->NewFixedArray(target_keys->length());
int nonconfigurable_keys_length = 0;
- // 14. Repeat, for each element key of targetKeys:
+ // 16. Repeat, for each element key of targetKeys:
for (int i = 0; i < target_keys->length(); ++i) {
- // 14a. Let desc be ? target.[[GetOwnProperty]](key).
+ // 16a. Let desc be ? target.[[GetOwnProperty]](key).
PropertyDescriptor desc;
Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
isolate_, target, handle(target_keys->get(i), isolate_), &desc);
MAYBE_RETURN(found, Nothing<bool>());
- // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
+ // 16b. If desc is not undefined and desc.[[Configurable]] is false, then
if (found.FromJust() && !desc.configurable()) {
- // 14b i. Append key as an element of targetNonconfigurableKeys.
+ // 16b i. Append key as an element of targetNonconfigurableKeys.
target_nonconfigurable_keys->set(nonconfigurable_keys_length,
target_keys->get(i));
nonconfigurable_keys_length++;
// The key was moved, null it out in the original list.
target_keys->set(i, Smi::kZero);
} else {
- // 14c. Else,
- // 14c i. Append key as an element of targetConfigurableKeys.
+ // 16c. Else,
+ // 16c i. Append key as an element of targetConfigurableKeys.
// (No-op, just keep it in |target_keys|.)
}
}
- // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+ // 17. If extensibleTarget is true and targetNonconfigurableKeys is empty,
// then:
if (extensible_target && nonconfigurable_keys_length == 0) {
- // 15a. Return trapResult.
+ // 17a. Return trapResult.
return AddKeysFromJSProxy(proxy, trap_result);
}
- // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
- Zone set_zone(isolate_->allocator(), ZONE_NAME);
- ZoneAllocationPolicy alloc(&set_zone);
- const int kPresent = 1;
- const int kGone = 0;
- base::TemplateHashMapImpl<Handle<Name>, int, NameComparator,
- ZoneAllocationPolicy>
- unchecked_result_keys(ZoneHashMap::kDefaultHashMapCapacity,
- NameComparator(isolate_), alloc);
- int unchecked_result_keys_size = 0;
- for (int i = 0; i < trap_result->length(); ++i) {
- Handle<Name> key(Name::cast(trap_result->get(i)), isolate_);
- auto entry = unchecked_result_keys.LookupOrInsert(key, key->Hash(), alloc);
- if (entry->value != kPresent) {
- entry->value = kPresent;
- unchecked_result_keys_size++;
- }
- }
- // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
+ // 18. (Done in step 9)
+ // 19. Repeat, for each key that is an element of targetNonconfigurableKeys:
for (int i = 0; i < nonconfigurable_keys_length; ++i) {
Object raw_key = target_nonconfigurable_keys->get(i);
Handle<Name> key(Name::cast(raw_key), isolate_);
- // 17a. If key is not an element of uncheckedResultKeys, throw a
+ // 19a. If key is not an element of uncheckedResultKeys, throw a
// TypeError exception.
auto found = unchecked_result_keys.Lookup(key, key->Hash());
if (found == nullptr || found->value == kGone) {
@@ -917,20 +960,20 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
MessageTemplate::kProxyOwnKeysMissing, key));
return Nothing<bool>();
}
- // 17b. Remove key from uncheckedResultKeys.
+ // 19b. Remove key from uncheckedResultKeys.
found->value = kGone;
unchecked_result_keys_size--;
}
- // 18. If extensibleTarget is true, return trapResult.
+ // 20. If extensibleTarget is true, return trapResult.
if (extensible_target) {
return AddKeysFromJSProxy(proxy, trap_result);
}
- // 19. Repeat, for each key that is an element of targetConfigurableKeys:
+ // 21. Repeat, for each key that is an element of targetConfigurableKeys:
for (int i = 0; i < target_configurable_keys->length(); ++i) {
Object raw_key = target_configurable_keys->get(i);
if (raw_key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
Handle<Name> key(Name::cast(raw_key), isolate_);
- // 19a. If key is not an element of uncheckedResultKeys, throw a
+ // 21a. If key is not an element of uncheckedResultKeys, throw a
// TypeError exception.
auto found = unchecked_result_keys.Lookup(key, key->Hash());
if (found == nullptr || found->value == kGone) {
@@ -938,18 +981,18 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
MessageTemplate::kProxyOwnKeysMissing, key));
return Nothing<bool>();
}
- // 19b. Remove key from uncheckedResultKeys.
+ // 21b. Remove key from uncheckedResultKeys.
found->value = kGone;
unchecked_result_keys_size--;
}
- // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
+ // 22. If uncheckedResultKeys is not empty, throw a TypeError exception.
if (unchecked_result_keys_size != 0) {
DCHECK_GT(unchecked_result_keys_size, 0);
isolate_->Throw(*isolate_->factory()->NewTypeError(
MessageTemplate::kProxyOwnKeysNonExtensible));
return Nothing<bool>();
}
- // 21. Return trapResult.
+ // 23. Return trapResult.
return AddKeysFromJSProxy(proxy, trap_result);
}
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index b4eaa3101c..74876122c5 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -7,7 +7,7 @@
#include "src/objects.h"
#include "src/objects/hash-table.h"
-#include "src/objects/ordered-hash-table.h"
+#include "src/objects/js-objects.h"
namespace v8 {
namespace internal {
@@ -52,6 +52,8 @@ class KeyAccumulator final {
Handle<JSObject> object);
Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
Handle<JSObject> object);
+ void CollectPrivateNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
Maybe<bool> CollectAccessCheckInterceptorKeys(
Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
Handle<JSObject> object);
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index 489f93d76b..a70f17292e 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -46,10 +46,12 @@ class Label {
#endif
#endif
+#ifdef DEBUG
V8_INLINE ~Label() {
DCHECK(!is_linked());
DCHECK(!is_near_linked());
}
+#endif
V8_INLINE void Unuse() { pos_ = 0; }
V8_INLINE void UnuseNear() { near_link_pos_ = 0; }
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 90d3b0d3bc..1b9686c612 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -9,7 +9,7 @@
#include "src/handles-inl.h"
#include "src/objects-inl.h"
-#include "src/objects/descriptor-array.h"
+#include "src/objects/descriptor-array-inl.h"
#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index d5ce1f9223..0a0523dc60 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -142,7 +142,7 @@ class LayoutDescriptor : public ByteArray {
V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetTagged(int field_index,
bool tagged);
- OBJECT_CONSTRUCTORS(LayoutDescriptor, ByteArray)
+ OBJECT_CONSTRUCTORS(LayoutDescriptor, ByteArray);
};
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index e33cf07844..624d8bfaed 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -45,16 +45,6 @@ std::unique_ptr<v8::Platform> NewDefaultPlatform(
return std::move(platform);
}
-v8::Platform* CreateDefaultPlatform(
- int thread_pool_size, IdleTaskSupport idle_task_support,
- InProcessStackDumping in_process_stack_dumping,
- v8::TracingController* tracing_controller) {
- return NewDefaultPlatform(
- thread_pool_size, idle_task_support, in_process_stack_dumping,
- std::unique_ptr<v8::TracingController>(tracing_controller))
- .release();
-}
-
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
MessageLoopBehavior behavior) {
return static_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate,
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 53fe01e42d..8d25787495 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -16,7 +16,7 @@ namespace v8 {
namespace platform {
namespace tracing {
-#define MAX_CATEGORY_GROUPS 200
+static const size_t kMaxCategoryGroups = 200;
// Parallel arrays g_category_groups and g_category_group_enabled are separate
// so that a pointer to a member of g_category_group_enabled can be easily
@@ -24,13 +24,13 @@ namespace tracing {
// only with char enabled pointers from g_category_group_enabled, and we can
// convert internally to determine the category name from the char enabled
// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+const char* g_category_groups[kMaxCategoryGroups] = {
"toplevel",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "tracing categories exhausted; must increase kMaxCategoryGroups",
"__metadata"};
// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+unsigned char g_category_group_enabled[kMaxCategoryGroups] = {0};
// Indexes here have to match the g_category_groups array indexes above.
const int g_category_categories_exhausted = 1;
// Metadata category not used in V8.
@@ -78,13 +78,16 @@ uint64_t TracingController::AddTraceEvent(
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) {
uint64_t handle = 0;
- if (mode_ != DISABLED) {
+ if (recording_.load(std::memory_order_acquire)) {
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
if (trace_object) {
- trace_object->Initialize(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags,
- CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
+ {
+ base::MutexGuard lock(mutex_.get());
+ trace_object->Initialize(
+ phase, category_enabled_flag, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, arg_convertables, flags,
+ CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
+ }
}
}
return handle;
@@ -98,13 +101,16 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) {
uint64_t handle = 0;
- if (mode_ != DISABLED) {
+ if (recording_.load(std::memory_order_acquire)) {
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
if (trace_object) {
- trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
- bind_id, num_args, arg_names, arg_types,
- arg_values, arg_convertables, flags, timestamp,
- CurrentCpuTimestampMicroseconds());
+ {
+ base::MutexGuard lock(mutex_.get());
+ trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, arg_convertables, flags, timestamp,
+ CurrentCpuTimestampMicroseconds());
+ }
}
}
return handle;
@@ -118,11 +124,6 @@ void TracingController::UpdateTraceEventDuration(
CurrentCpuTimestampMicroseconds());
}
-const uint8_t* TracingController::GetCategoryGroupEnabled(
- const char* category_group) {
- return GetCategoryGroupEnabledInternal(category_group);
-}
-
const char* TracingController::GetCategoryGroupName(
const uint8_t* category_group_enabled) {
// Calculate the index of the category group by finding
@@ -133,7 +134,7 @@ const char* TracingController::GetCategoryGroupName(
// Check for out of bounds category pointers.
DCHECK(category_ptr >= category_begin &&
category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
- MAX_CATEGORY_GROUPS));
+ kMaxCategoryGroups));
uintptr_t category_index =
(category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
return g_category_groups[category_index];
@@ -144,7 +145,7 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
base::MutexGuard lock(mutex_.get());
- mode_ = RECORDING_MODE;
+ recording_.store(true, std::memory_order_release);
UpdateCategoryGroupEnabledFlags();
observers_copy = observers_;
}
@@ -154,11 +155,11 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
}
void TracingController::StopTracing() {
- if (mode_ == DISABLED) {
+ bool expected = true;
+ if (!recording_.compare_exchange_strong(expected, false)) {
return;
}
DCHECK(trace_buffer_);
- mode_ = DISABLED;
UpdateCategoryGroupEnabledFlags();
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
@@ -168,13 +169,16 @@ void TracingController::StopTracing() {
for (auto o : observers_copy) {
o->OnTraceDisabled();
}
- trace_buffer_->Flush();
+ {
+ base::MutexGuard lock(mutex_.get());
+ trace_buffer_->Flush();
+ }
}
void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
unsigned char enabled_flag = 0;
const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
+ if (recording_.load(std::memory_order_acquire) &&
trace_config_->IsCategoryGroupEnabled(category_group)) {
enabled_flag |= ENABLED_FOR_RECORDING;
}
@@ -183,7 +187,8 @@ void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
// TODO(primiano): this is a temporary workaround for catapult:#2341,
// to guarantee that metadata events are always added even if the category
// filter is "-*". See crbug.com/618054 for more details and long-term fix.
- if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) {
+ if (recording_.load(std::memory_order_acquire) &&
+ !strcmp(category_group, "__metadata")) {
enabled_flag |= ENABLED_FOR_RECORDING;
}
@@ -193,13 +198,13 @@ void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
}
void TracingController::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::Relaxed_Load(&g_category_index);
+ size_t category_index = base::Acquire_Load(&g_category_index);
for (size_t i = 0; i < category_index; i++) UpdateCategoryGroupEnabledFlag(i);
}
-const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
+const uint8_t* TracingController::GetCategoryGroupEnabled(
const char* category_group) {
- // Check that category groups does not contain double quote
+ // Check that category group does not contain double quote
DCHECK(!strchr(category_group, '"'));
// The g_category_groups is append only, avoid using a lock for the fast path.
@@ -226,8 +231,8 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
// Create a new category group.
// Check that there is a slot for the new category_group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS);
- if (category_index < MAX_CATEGORY_GROUPS) {
+ DCHECK(category_index < kMaxCategoryGroups);
+ if (category_index < kMaxCategoryGroups) {
// Don't hold on to the category_group pointer, so that we can create
// category groups with strings not known at compile time (this is
// required by SetWatchEvent).
@@ -253,7 +258,7 @@ void TracingController::AddTraceStateObserver(
{
base::MutexGuard lock(mutex_.get());
observers_.insert(observer);
- if (mode_ != RECORDING_MODE) return;
+ if (!recording_.load(std::memory_order_acquire)) return;
}
// Fire the observer if recording is already in progress.
observer->OnTraceEnabled();
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
new file mode 100644
index 0000000000..87c96616bc
--- /dev/null
+++ b/deps/v8/src/libsampler/OWNERS
@@ -0,0 +1 @@
+alph@chromium.org
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index eb804a787a..94ad3fd4c5 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -234,6 +234,7 @@ void SamplerManager::DoSample(const v8::RegisterState& state) {
SamplerList& samplers = it->second;
for (Sampler* sampler : samplers) {
+ if (!sampler->ShouldRecordSample()) continue;
Isolate* isolate = sampler->isolate();
// We require a fully initialized and entered isolate.
if (isolate == nullptr || !isolate->IsInUse()) continue;
@@ -542,6 +543,7 @@ void Sampler::Stop() {
void Sampler::DoSample() {
if (!SignalHandler::Installed()) return;
DCHECK(IsActive());
+ SetShouldRecordSample();
pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 8e39a95f58..a3f26bc848 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -49,6 +49,12 @@ class Sampler {
// Whether the sampler is running (start has been called).
bool IsActive() const { return active_.load(std::memory_order_relaxed); }
+ // Returns true and consumes the pending sample bit if a sample should be
+ // dispatched to this sampler.
+ bool ShouldRecordSample() {
+ return record_sample_.exchange(false, std::memory_order_relaxed);
+ }
+
void DoSample();
// Used in tests to make sure that stack sampling is performed.
@@ -73,8 +79,13 @@ class Sampler {
active_.store(value, std::memory_order_relaxed);
}
+ void SetShouldRecordSample() {
+ record_sample_.store(true, std::memory_order_relaxed);
+ }
+
Isolate* isolate_;
std::atomic_bool active_{false};
+ std::atomic_bool record_sample_{false};
std::unique_ptr<PlatformData> data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 6aecad98fe..8d7f9f5b81 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -563,8 +563,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ia32";
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
const char arch[] = "x64";
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
- const char arch[] = "x32";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
#elif V8_TARGET_ARCH_PPC
@@ -1625,7 +1623,7 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
Address pc = isolate_->GetAbstractPC(&line, &column);
msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << line << kNext
<< column << kNext << old_state << kNext << new_state << kNext
- << reinterpret_cast<void*>(map.ptr()) << kNext;
+ << AsHex::Address(map.ptr()) << kNext;
if (key->IsSmi()) {
msg << Smi::ToInt(key);
} else if (key->IsNumber()) {
@@ -1654,10 +1652,9 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
}
Log::MessageBuilder msg(log_);
msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
- << kNext << reinterpret_cast<void*>(from.ptr()) << kNext
- << reinterpret_cast<void*>(to.ptr()) << kNext
- << reinterpret_cast<void*>(pc) << kNext << line << kNext << column
- << kNext << reason << kNext;
+ << kNext << AsHex::Address(from.ptr()) << kNext
+ << AsHex::Address(to.ptr()) << kNext << AsHex::Address(pc) << kNext
+ << line << kNext << column << kNext << reason << kNext;
if (!name_or_sfi.is_null()) {
if (name_or_sfi->IsName()) {
@@ -1666,7 +1663,7 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
SharedFunctionInfo sfi = SharedFunctionInfo::cast(name_or_sfi);
msg << sfi->DebugName();
#if V8_SFI_HAS_UNIQUE_ID
- msg << " " << sfi->unique_id();
+ msg << " " << SharedFunctionInfoWithID::cast(sfi)->unique_id();
#endif // V8_SFI_HAS_UNIQUE_ID
}
}
@@ -1678,7 +1675,7 @@ void Logger::MapCreate(Map map) {
DisallowHeapAllocation no_gc;
Log::MessageBuilder msg(log_);
msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(map.ptr());
+ << AsHex::Address(map.ptr());
msg.WriteToLogFile();
}
@@ -1687,7 +1684,7 @@ void Logger::MapDetails(Map map) {
DisallowHeapAllocation no_gc;
Log::MessageBuilder msg(log_);
msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(map.ptr()) << kNext;
+ << AsHex::Address(map.ptr()) << kNext;
if (FLAG_trace_maps_details) {
std::ostringstream buffer;
map->PrintMapDetails(buffer);
@@ -1761,7 +1758,8 @@ static int EnumerateWasmModuleObjects(
if (obj->IsWasmModuleObject()) {
WasmModuleObject module = WasmModuleObject::cast(obj);
if (module_objects != nullptr) {
- module_objects[module_objects_count] = handle(module, heap->isolate());
+ module_objects[module_objects_count] =
+ handle(module, Isolate::FromHeap(heap));
}
module_objects_count++;
}
@@ -1932,6 +1930,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
if (event_handler) {
+ isolate_->wasm_engine()->EnableCodeLogging(isolate_);
jit_logger_.reset(new JitLogger(isolate_, event_handler));
AddCodeEventListener(jit_logger_.get());
if (options & kJitCodeEventEnumExisting) {
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 3608aa763d..cc5d13dd6b 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -1082,11 +1082,8 @@ JSReceiver LookupIterator::NextHolder(Map map) {
LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
DCHECK(!IsElement());
if (!holder->IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
-
- Handle<String> name_string = Handle<String>::cast(name_);
- if (name_string->length() == 0) return NOT_FOUND;
-
- return IsSpecialIndex(*name_string) ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ return IsSpecialIndex(String::cast(*name_)) ? INTEGER_INDEXED_EXOTIC
+ : NOT_FOUND;
}
namespace {
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index 6825f1ac83..c6209cd9d6 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -9,6 +9,7 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/property-details.h"
#include "src/transitions.h"
namespace v8 {
@@ -45,8 +46,16 @@ Name MapUpdater::GetKey(int descriptor) const {
PropertyDetails MapUpdater::GetDetails(int descriptor) const {
DCHECK_LE(0, descriptor);
if (descriptor == modified_descriptor_) {
- return PropertyDetails(new_kind_, new_attributes_, new_location_,
- new_constness_, new_representation_);
+ PropertyAttributes attributes = new_attributes_;
+ // If the original map was sealed or frozen, let us used the old
+ // attributes so that we follow the same transition path as before.
+ // Note that the user could not have changed the attributes because
+ // both seal and freeze make the properties non-configurable.
+ if (integrity_level_ == SEALED || integrity_level_ == FROZEN) {
+ attributes = old_descriptors_->GetDetails(descriptor).attributes();
+ }
+ return PropertyDetails(new_kind_, attributes, new_location_, new_constness_,
+ new_representation_);
}
return old_descriptors_->GetDetails(descriptor);
}
@@ -141,10 +150,12 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
isolate_, old_map_->instance_type(), &new_constness_,
&new_representation_, &new_field_type_);
- if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
+ if (TryReconfigureToDataFieldInplace() == kEnd) return result_map_;
if (FindRootMap() == kEnd) return result_map_;
if (FindTargetMap() == kEnd) return result_map_;
- ConstructNewMap();
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
DCHECK_EQ(kEnd, state_);
return result_map_;
}
@@ -157,7 +168,9 @@ Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
if (FindRootMap() == kEnd) return result_map_;
if (FindTargetMap() == kEnd) return result_map_;
- ConstructNewMap();
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
DCHECK_EQ(kEnd, state_);
return result_map_;
}
@@ -168,7 +181,9 @@ Handle<Map> MapUpdater::Update() {
if (FindRootMap() == kEnd) return result_map_;
if (FindTargetMap() == kEnd) return result_map_;
- ConstructNewMap();
+ if (ConstructNewMap() == kAtIntegrityLevelSource) {
+ ConstructNewMapWithIntegrityLevelTransition();
+ }
DCHECK_EQ(kEnd, state_);
if (FLAG_fast_map_update) {
TransitionsAccessor(isolate_, old_map_).SetMigrationTarget(*result_map_);
@@ -183,7 +198,8 @@ void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
Map::GeneralizeField(isolate_, map, modify_index, new_constness,
new_representation, new_field_type);
- DCHECK_EQ(*old_descriptors_, old_map_->instance_descriptors());
+ DCHECK(*old_descriptors_ == old_map_->instance_descriptors() ||
+ *old_descriptors_ == integrity_source_map_->instance_descriptors());
}
MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
@@ -194,7 +210,7 @@ MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
return state_; // Done.
}
-MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
+MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
// If it's just a representation generalization case (i.e. property kind and
// attributes stays unchanged) it's fine to transition from None to anything
// but double without any modification to the object, because the default
@@ -238,12 +254,56 @@ MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
return state_; // Done.
}
+bool MapUpdater::TrySaveIntegrityLevelTransitions() {
+ // Figure out the most restrictive integrity level transition (it should
+ // be the last one in the transition tree).
+ Handle<Map> previous =
+ handle(Map::cast(old_map_->GetBackPointer()), isolate_);
+ Symbol integrity_level_symbol;
+ TransitionsAccessor last_transitions(isolate_, previous);
+ if (!last_transitions.HasIntegrityLevelTransitionTo(
+ *old_map_, &integrity_level_symbol, &integrity_level_)) {
+ // The last transition was not integrity level transition - just bail out.
+ // This can happen in the following cases:
+ // - there are private symbol transitions following the integrity level
+ // transitions (see crbug.com/v8/8854).
+ // - there is a getter added in addition to an existing setter (or a setter
+ // in addition to an existing getter).
+ return false;
+ }
+ integrity_level_symbol_ = handle(integrity_level_symbol, isolate_);
+ integrity_source_map_ = previous;
+
+ // Now walk up the back pointer chain and skip all integrity level
+ // transitions. If we encounter any non-integrity level transition interleaved
+ // with integrity level transitions, just bail out.
+ while (!integrity_source_map_->is_extensible()) {
+ previous =
+ handle(Map::cast(integrity_source_map_->GetBackPointer()), isolate_);
+ TransitionsAccessor transitions(isolate_, previous);
+ if (!transitions.HasIntegrityLevelTransitionTo(*integrity_source_map_)) {
+ return false;
+ }
+ integrity_source_map_ = previous;
+ }
+
+ // Integrity-level transitions never change number of descriptors.
+ CHECK_EQ(old_map_->NumberOfOwnDescriptors(),
+ integrity_source_map_->NumberOfOwnDescriptors());
+
+ has_integrity_level_transition_ = true;
+ old_descriptors_ =
+ handle(integrity_source_map_->instance_descriptors(), isolate_);
+ return true;
+}
+
MapUpdater::State MapUpdater::FindRootMap() {
DCHECK_EQ(kInitialized, state_);
// Check the state of the root map.
root_map_ = handle(old_map_->FindRootMap(isolate_), isolate_);
ElementsKind from_kind = root_map_->elements_kind();
ElementsKind to_kind = new_elements_kind_;
+
if (root_map_->is_deprecated()) {
state_ = kEnd;
result_map_ = handle(
@@ -252,9 +312,24 @@ MapUpdater::State MapUpdater::FindRootMap() {
DCHECK(result_map_->is_dictionary_map());
return state_;
}
- int root_nof = root_map_->NumberOfOwnDescriptors();
+
if (!old_map_->EquivalentToForTransition(*root_map_)) {
return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+ } else if (old_map_->is_extensible() != root_map_->is_extensible()) {
+ DCHECK(!old_map_->is_extensible());
+ DCHECK(root_map_->is_extensible());
+ // We have an integrity level transition in the tree, let us make a note
+ // of that transition to be able to replay it later.
+ if (!TrySaveIntegrityLevelTransitions()) {
+ return CopyGeneralizeAllFields("GenAll_PrivateSymbolsOnNonExtensible");
+ }
+
+ // We want to build transitions to the original element kind (before
+ // the seal transitions), so change {to_kind} accordingly.
+ DCHECK(to_kind == DICTIONARY_ELEMENTS ||
+ to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+ IsFixedTypedArrayElementsKind(to_kind));
+ to_kind = integrity_source_map_->elements_kind();
}
// TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
@@ -266,6 +341,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
}
+ int root_nof = root_map_->NumberOfOwnDescriptors();
if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) {
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
@@ -276,8 +352,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
if (old_details.location() != kField) {
return CopyGeneralizeAllFields("GenAll_RootModification2");
}
- if (new_constness_ != old_details.constness() &&
- (!FLAG_modify_map_inplace || !old_map_->is_prototype_map())) {
+ if (new_constness_ != old_details.constness() && !FLAG_modify_map_inplace) {
return CopyGeneralizeAllFields("GenAll_RootModification3");
}
if (!new_representation_.fits_into(old_details.representation())) {
@@ -295,11 +370,6 @@ MapUpdater::State MapUpdater::FindRootMap() {
// Modify root map in-place.
if (FLAG_modify_map_inplace && new_constness_ != old_details.constness()) {
- // Only prototype root maps are allowed to be updated in-place.
- // TODO(ishell): fix all the stubs that use prototype map check to
- // ensure that the prototype was not modified.
- DCHECK(old_map_->is_prototype_map());
- DCHECK(old_map_->is_stable());
DCHECK(IsGeneralizableTo(old_details.constness(), new_constness_));
GeneralizeField(old_map_, modified_descriptor_, new_constness_,
old_details.representation(),
@@ -379,7 +449,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
PropertyDetails details =
target_descriptors->GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
- DCHECK_EQ(new_attributes_, details.attributes());
+ DCHECK_EQ(GetDetails(modified_descriptor_).attributes(),
+ details.attributes());
DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
DCHECK_EQ(new_location_, details.location());
DCHECK(new_representation_.fits_into(details.representation()));
@@ -398,9 +469,20 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (*target_map_ != *old_map_) {
old_map_->NotifyLeafMapLayoutChange(isolate_);
}
- result_map_ = target_map_;
- state_ = kEnd;
- return state_; // Done.
+ if (!has_integrity_level_transition_) {
+ result_map_ = target_map_;
+ state_ = kEnd;
+ return state_; // Done.
+ }
+
+ // We try to replay the integrity level transition here.
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchSpecial(*integrity_level_symbol_);
+ if (!transition.is_null()) {
+ result_map_ = handle(transition, isolate_);
+ state_ = kEnd;
+ return state_; // Done.
+ }
}
// Find the last compatible target map in the transition tree.
@@ -572,10 +654,10 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// If the |new_elements_kind_| is still transitionable then the old map's
// elements kind is also transitionable and therefore the old descriptors
- // array must already have non in-place generalizable fields.
- CHECK_IMPLIES(is_transitionable_fast_elements_kind_,
- !Map::IsInplaceGeneralizableField(
- next_constness, next_representation, *next_field_type));
+ // array must already have generalized field type.
+ CHECK_IMPLIES(
+ is_transitionable_fast_elements_kind_,
+ Map::IsMostGeneralFieldType(next_representation, *next_field_type));
MaybeObjectHandle wrapped_type(
Map::WrapFieldType(isolate_, next_field_type));
@@ -653,7 +735,11 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
Handle<Map> split_map = FindSplitMap(new_descriptors);
int split_nof = split_map->NumberOfOwnDescriptors();
- DCHECK_NE(old_nof_, split_nof);
+ if (old_nof_ == split_nof) {
+ CHECK(has_integrity_level_transition_);
+ state_ = kAtIntegrityLevelSource;
+ return state_;
+ }
PropertyDetails split_details = GetDetails(split_nof);
TransitionsAccessor transitions(isolate_, split_map);
@@ -717,10 +803,31 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
split_map->ReplaceDescriptors(isolate_, *new_descriptors,
*new_layout_descriptor);
- result_map_ = new_map;
- state_ = kEnd;
+ if (has_integrity_level_transition_) {
+ target_map_ = new_map;
+ state_ = kAtIntegrityLevelSource;
+ } else {
+ result_map_ = new_map;
+ state_ = kEnd;
+ }
return state_; // Done.
}
+MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
+ DCHECK_EQ(kAtIntegrityLevelSource, state_);
+
+ TransitionsAccessor transitions(isolate_, target_map_);
+ if (!transitions.CanHaveMoreTransitions()) {
+ return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ }
+
+ result_map_ = Map::CopyForPreventExtensions(
+ isolate_, target_map_, integrity_level_, integrity_level_symbol_,
+ "CopyForPreventExtensions");
+
+ state_ = kEnd;
+ return state_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 511541e882..13571a3321 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -25,12 +25,16 @@ namespace internal {
// rewrite the new type is deduced by merging the current type with any
// potential new (partial) version of the type in the transition tree.
// To do this, on each rewrite:
-// - Search the root of the transition tree using FindRootMap.
+// - Search the root of the transition tree using FindRootMap, remember
+// the integrity level (preventExtensions/seal/freeze) transitions.
// - Find/create a |root_map| with requested |new_elements_kind|.
// - Find |target_map|, the newest matching version of this map using the
// "updated" |old_map|'s descriptor array (i.e. whose entry at |modify_index|
// is considered to be of |new_kind| and having |new_attributes|) to walk
-// the transition tree.
+// the transition tree. If there was an integrity level transition on the path
+// to the old map, use the descriptor array of the map preceding the first
+// integrity level transition (|integrity_source_map|), and try to replay
+// the integrity level transition afterwards.
// - Merge/generalize the "updated" descriptor array of the |old_map| and
// descriptor array of the |target_map|.
// - Generalize the |modify_index| descriptor using |new_representation| and
@@ -38,10 +42,11 @@ namespace internal {
// - Walk the tree again starting from the root towards |target_map|. Stop at
// |split_map|, the first map who's descriptor array does not match the merged
// descriptor array.
-// - If |target_map| == |split_map|, |target_map| is in the expected state.
-// Return it.
+// - If |target_map| == |split_map|, and there are no integrity level
+// transitions, |target_map| is in the expected state. Return it.
// - Otherwise, invalidate the outdated transition target from |target_map|, and
// replace its transition tree with a new branch for the updated descriptors.
+// - If the |old_map| had integrity level transition, create the new map for it.
class MapUpdater {
public:
MapUpdater(Isolate* isolate, Handle<Map> old_map);
@@ -63,11 +68,17 @@ class MapUpdater {
Handle<Map> Update();
private:
- enum State { kInitialized, kAtRootMap, kAtTargetMap, kEnd };
+ enum State {
+ kInitialized,
+ kAtRootMap,
+ kAtTargetMap,
+ kAtIntegrityLevelSource,
+ kEnd
+ };
// Try to reconfigure property in-place without rebuilding transition tree
// and creating new maps. See implementation for details.
- State TryRecofigureToDataFieldInplace();
+ State TryReconfigureToDataFieldInplace();
// Step 1.
// - Search the root of the transition tree using FindRootMap.
@@ -75,10 +86,14 @@ class MapUpdater {
State FindRootMap();
// Step 2.
- // - Find |target_map_|, the newest matching version of this map using the
+ // - Find |target_map|, the newest matching version of this map using the
// "updated" |old_map|'s descriptor array (i.e. whose entry at
- // |modified_descriptor_| is considered to be of |new_kind| and having
- // |new_attributes|) to walk the transition tree.
+ // |modify_index| is considered to be of |new_kind| and having
+ // |new_attributes|) to walk the transition tree. If there was an integrity
+ // level transition on the path to the old map, use the descriptor array
+ // of the map preceding the first integrity level transition
+ // (|integrity_source_map|), and try to replay the integrity level
+ // transition afterwards.
State FindTargetMap();
// Step 3.
@@ -102,6 +117,11 @@ class MapUpdater {
// descriptors.
State ConstructNewMap();
+ // Step 6 (if there was
+ // - If the |old_map| had integrity level transition, create the new map
+ // for it.
+ State ConstructNewMapWithIntegrityLevelTransition();
+
// When a requested reconfiguration can not be done the result is a copy
// of |old_map_| where every field has |Tagged| representation and |Any|
// field type. This map is disconnected from the transition tree.
@@ -143,6 +163,8 @@ class MapUpdater {
Representation new_representation,
Handle<FieldType> new_field_type);
+ bool TrySaveIntegrityLevelTransitions();
+
Isolate* isolate_;
Handle<Map> old_map_;
Handle<DescriptorArray> old_descriptors_;
@@ -151,6 +173,12 @@ class MapUpdater {
Handle<Map> result_map_;
int old_nof_;
+ // Information about integrity level transitions.
+ bool has_integrity_level_transition_ = false;
+ PropertyAttributes integrity_level_ = NONE;
+ Handle<Symbol> integrity_level_symbol_;
+ Handle<Map> integrity_source_map_;
+
State state_ = kInitialized;
ElementsKind new_elements_kind_;
bool is_transitionable_fast_elements_kind_;
diff --git a/deps/v8/src/memcopy.h b/deps/v8/src/memcopy.h
index e0469a024e..1229ec916d 100644
--- a/deps/v8/src/memcopy.h
+++ b/deps/v8/src/memcopy.h
@@ -25,7 +25,7 @@ void init_memcopy_functions();
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
-const int kMinComplexMemCopy = 64;
+const size_t kMinComplexMemCopy = 64;
// Copy memory area. No restrictions.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
@@ -45,7 +45,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
memcpy(dest, src, chars);
}
// For values < 16, the assembler function is slower than the inlined C code.
-const int kMinComplexMemCopy = 16;
+const size_t kMinComplexMemCopy = 16;
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size);
@@ -75,7 +75,7 @@ V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
memcpy(dest, src, chars);
}
// For values < 16, the assembler function is slower than the inlined C code.
-const int kMinComplexMemCopy = 16;
+const size_t kMinComplexMemCopy = 16;
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size);
@@ -93,54 +93,49 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
size_t size) {
memmove(dest, src, size);
}
-const int kMinComplexMemCopy = 8;
+const size_t kMinComplexMemCopy = 8;
#endif // V8_TARGET_ARCH_IA32
// Copies words from |src| to |dst|. The data spans must not overlap.
-// |src| and |dst| must be kSystemPointerSize-aligned.
-inline void CopyWords(Address dst, const Address src, size_t num_words) {
- constexpr int kSystemPointerSize = sizeof(void*); // to avoid src/globals.h
- DCHECK(IsAligned(dst, kSystemPointerSize));
- DCHECK(IsAligned(src, kSystemPointerSize));
- DCHECK(((src <= dst) && ((src + num_words * kSystemPointerSize) <= dst)) ||
- ((dst <= src) && ((dst + num_words * kSystemPointerSize) <= src)));
+// |src| and |dst| must be TWord-size aligned.
+template <size_t kBlockCopyLimit, typename T>
+inline void CopyImpl(T* dst_ptr, const T* src_ptr, size_t count) {
+ constexpr int kTWordSize = sizeof(T);
+#ifdef DEBUG
+ Address dst = reinterpret_cast<Address>(dst_ptr);
+ Address src = reinterpret_cast<Address>(src_ptr);
+ DCHECK(IsAligned(dst, kTWordSize));
+ DCHECK(IsAligned(src, kTWordSize));
+ DCHECK(((src <= dst) && ((src + count * kTWordSize) <= dst)) ||
+ ((dst <= src) && ((dst + count * kTWordSize) <= src)));
+#endif
// Use block copying MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
- static const size_t kBlockCopyLimit = 16;
-
- Address* dst_ptr = reinterpret_cast<Address*>(dst);
- Address* src_ptr = reinterpret_cast<Address*>(src);
- if (num_words < kBlockCopyLimit) {
+ if (count < kBlockCopyLimit) {
do {
- num_words--;
+ count--;
*dst_ptr++ = *src_ptr++;
- } while (num_words > 0);
+ } while (count > 0);
} else {
- MemCopy(dst_ptr, src_ptr, num_words * kSystemPointerSize);
+ MemCopy(dst_ptr, src_ptr, count * kTWordSize);
}
}
+// Copies kSystemPointerSize-sized words from |src| to |dst|. The data spans
+// must not overlap. |src| and |dst| must be kSystemPointerSize-aligned.
+inline void CopyWords(Address dst, const Address src, size_t num_words) {
+ static const size_t kBlockCopyLimit = 16;
+ CopyImpl<kBlockCopyLimit>(reinterpret_cast<Address*>(dst),
+ reinterpret_cast<const Address*>(src), num_words);
+}
+
// Copies data from |src| to |dst|. The data spans must not overlap.
template <typename T>
inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
STATIC_ASSERT(sizeof(T) == 1);
- DCHECK(((src <= dst) && ((src + num_bytes) <= dst)) ||
- ((dst <= src) && ((dst + num_bytes) <= src)));
if (num_bytes == 0) return;
-
- // Use block copying MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = kMinComplexMemCopy;
-
- if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
- do {
- num_bytes--;
- *dst++ = *src++;
- } while (num_bytes > 0);
- } else {
- MemCopy(dst, src, num_bytes);
- }
+ CopyImpl<kMinComplexMemCopy>(dst, src, num_bytes);
}
inline void MemsetPointer(Address* dest, Address value, size_t counter) {
@@ -236,7 +231,7 @@ template <typename sourcechar, typename sinkchar>
void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
sinkchar* limit = dest + chars;
if ((sizeof(*dest) == sizeof(*src)) &&
- (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
+ (chars >= kMinComplexMemCopy / sizeof(*dest))) {
MemCopy(dest, src, chars * sizeof(*dest));
} else {
while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
diff --git a/deps/v8/src/message-template.h b/deps/v8/src/message-template.h
index 05caf14ac8..6f85a8e31c 100644
--- a/deps/v8/src/message-template.h
+++ b/deps/v8/src/message-template.h
@@ -61,7 +61,7 @@ namespace internal {
T(CannotFreezeArrayBufferView, \
"Cannot freeze array buffer views with elements") \
T(CannotSeal, "Cannot seal") \
- T(CircularStructure, "Converting circular structure to JSON") \
+ T(CircularStructure, "Converting circular structure to JSON%") \
T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
T(ConstructorClassField, "Classes may not have a field named 'constructor'") \
@@ -228,6 +228,8 @@ namespace internal {
T(ProxyOwnKeysNonExtensible, \
"'ownKeys' on proxy: trap returned extra keys but proxy target is " \
"non-extensible") \
+ T(ProxyOwnKeysDuplicateEntries, \
+ "'ownKeys' on proxy: trap returned duplicate entries") \
T(ProxyPreventExtensionsExtensible, \
"'preventExtensions' on proxy: trap returned truish but the proxy target " \
"is extensible") \
@@ -290,6 +292,7 @@ namespace internal {
/* ReferenceError */ \
T(NotDefined, "% is not defined") \
T(SuperAlreadyCalled, "Super constructor may only be called once") \
+ T(AccessedUninitializedVariable, "Cannot access '%' before initialization") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(BigIntDivZero, "Division by zero") \
@@ -475,6 +478,7 @@ namespace internal {
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
+ T(UnexpectedPrivateField, "Unexpected private field") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
@@ -547,15 +551,15 @@ namespace internal {
T(TraceEventPhaseError, "Trace event phase must be a number.") \
T(TraceEventIDError, "Trace event id must be a number.") \
/* Weak refs */ \
- T(WeakRefsCleanupMustBeCallable, "WeakFactory: cleanup must be callable") \
- T(WeakRefsMakeCellTargetMustBeObject, \
- "WeakFactory.prototype.makeCell: target must be an object") \
- T(WeakRefsMakeCellTargetAndHoldingsMustNotBeSame, \
- "WeakFactory.prototype.makeCell: target and holdings must not be same") \
+ T(WeakRefsCleanupMustBeCallable, \
+ "FinalizationGroup: cleanup must be callable") \
+ T(WeakRefsRegisterTargetMustBeObject, \
+ "FinalizationGroup.prototype.register: target must be an object") \
+ T(WeakRefsRegisterTargetAndHoldingsMustNotBeSame, \
+ "FinalizationGroup.prototype.register: target and holdings must not be " \
+ "same") \
T(WeakRefsWeakRefConstructorTargetMustBeObject, \
- "WeakRef: target must be an object") \
- T(WeakRefsMakeRefTargetAndHoldingsMustNotBeSame, \
- "WeakFactory.prototype.makeRef: target and holdings must not be same")
+ "WeakRef: target must be an object")
enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index f89d3a5746..25922647bb 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -294,6 +294,11 @@ Handle<Object> StackFrameBase::GetEvalOrigin() {
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
+int StackFrameBase::GetScriptId() const {
+ if (!HasScript()) return kNone;
+ return GetScript()->id();
+}
+
bool StackFrameBase::IsEval() {
return HasScript() &&
GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
@@ -462,7 +467,7 @@ Handle<Object> JSStackFrame::GetTypeName() {
int JSStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
- return -1;
+ return kNone;
}
int JSStackFrame::GetColumnNumber() {
@@ -470,11 +475,11 @@ int JSStackFrame::GetColumnNumber() {
if (HasScript()) {
return Script::GetColumnNumber(GetScript(), GetPosition()) + 1;
}
- return -1;
+ return kNone;
}
int JSStackFrame::GetPromiseIndex() const {
- return is_promise_all_ ? offset_ : -1;
+ return is_promise_all_ ? offset_ : kNone;
}
bool JSStackFrame::IsNative() {
@@ -516,14 +521,14 @@ void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
}
int line_number = call_site->GetLineNumber();
- if (line_number != -1) {
+ if (line_number != StackFrameBase::kNone) {
builder->AppendCharacter(':');
Handle<String> line_string = isolate->factory()->NumberToString(
handle(Smi::FromInt(line_number), isolate), isolate);
builder->AppendString(line_string);
int column_number = call_site->GetColumnNumber();
- if (column_number != -1) {
+ if (column_number != StackFrameBase::kNone) {
builder->AppendCharacter(':');
Handle<String> column_string = isolate->factory()->NumberToString(
handle(Smi::FromInt(column_number), isolate), isolate);
@@ -838,33 +843,33 @@ MaybeHandle<String> AsmJsWasmStackFrame::ToString() {
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
- : isolate_(isolate), array_(array), next_frame_ix_(frame_ix) {}
+ : isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
-bool FrameArrayIterator::HasNext() const {
- return (next_frame_ix_ < array_->FrameCount());
+bool FrameArrayIterator::HasFrame() const {
+ return (frame_ix_ < array_->FrameCount());
}
-void FrameArrayIterator::Next() { next_frame_ix_++; }
+void FrameArrayIterator::Advance() { frame_ix_++; }
StackFrameBase* FrameArrayIterator::Frame() {
- DCHECK(HasNext());
- const int flags = array_->Flags(next_frame_ix_)->value();
+ DCHECK(HasFrame());
+ const int flags = array_->Flags(frame_ix_)->value();
int flag_mask = FrameArray::kIsWasmFrame |
FrameArray::kIsWasmInterpretedFrame |
FrameArray::kIsAsmJsWasmFrame;
switch (flags & flag_mask) {
case 0:
// JavaScript Frame.
- js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ js_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &js_frame_;
case FrameArray::kIsWasmFrame:
case FrameArray::kIsWasmInterpretedFrame:
// Wasm Frame:
- wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &wasm_frame_;
case FrameArray::kIsAsmJsWasmFrame:
// Asm.js Wasm Frame:
- asm_wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ asm_wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &asm_wasm_frame_;
default:
UNREACHABLE();
@@ -1040,7 +1045,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
Object);
- for (FrameArrayIterator it(isolate, elems); it.HasNext(); it.Next()) {
+ for (FrameArrayIterator it(isolate, elems); it.HasFrame(); it.Advance()) {
builder.AppendCString("\n at ");
StackFrameBase* frame = it.Frame();
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index f030190aa8..4c3d49c2b4 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -63,6 +63,9 @@ class StackFrameBase {
virtual Handle<Object> GetTypeName() = 0;
virtual Handle<Object> GetEvalOrigin();
+ // Returns the script ID if one is attached, -1 otherwise.
+ int GetScriptId() const;
+
virtual int GetPosition() const = 0;
// Return 1-based line number, including line offset.
virtual int GetLineNumber() = 0;
@@ -82,6 +85,9 @@ class StackFrameBase {
virtual MaybeHandle<String> ToString() = 0;
+ // Used to signal that the requested field is unknown.
+ static const int kNone = -1;
+
protected:
StackFrameBase() = default;
explicit StackFrameBase(Isolate* isolate) : isolate_(isolate) {}
@@ -158,9 +164,9 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
- int GetColumnNumber() override { return -1; }
+ int GetColumnNumber() override { return kNone; }
- int GetPromiseIndex() const override { return -1; }
+ int GetPromiseIndex() const override { return kNone; }
bool IsNative() override { return false; }
bool IsToplevel() override { return false; }
@@ -222,14 +228,14 @@ class FrameArrayIterator {
StackFrameBase* Frame();
- bool HasNext() const;
- void Next();
+ bool HasFrame() const;
+ void Advance();
private:
Isolate* isolate_;
Handle<FrameArray> array_;
- int next_frame_ix_;
+ int frame_ix_;
WasmStackFrame wasm_frame_;
AsmJsWasmStackFrame asm_wasm_frame_;
diff --git a/deps/v8/src/microtask-queue.cc b/deps/v8/src/microtask-queue.cc
index 5010b0bc25..4b8b0410c6 100644
--- a/deps/v8/src/microtask-queue.cc
+++ b/deps/v8/src/microtask-queue.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include <algorithm>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/logging.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
@@ -25,6 +25,8 @@ const size_t MicrotaskQueue::kCapacityOffset =
OFFSET_OF(MicrotaskQueue, capacity_);
const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_);
const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_);
+const size_t MicrotaskQueue::kFinishedMicrotaskCountOffset =
+ OFFSET_OF(MicrotaskQueue, finished_microtask_count_);
const intptr_t MicrotaskQueue::kMinimumCapacity = 8;
@@ -75,6 +77,26 @@ Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
return ReadOnlyRoots(isolate).undefined_value().ptr();
}
+void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
+ v8::Local<Function> function) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope scope(isolate);
+ Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
+ Utils::OpenHandle(*function), isolate->native_context());
+ EnqueueMicrotask(*microtask);
+}
+
+void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
+ v8::MicrotaskCallback callback,
+ void* data) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope scope(isolate);
+ Handle<CallbackTask> microtask = isolate->factory()->NewCallbackTask(
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(callback)),
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(data)));
+ EnqueueMicrotask(*microtask);
+}
+
void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
if (size_ == capacity_) {
// Keep the capacity of |ring_buffer_| power of 2, so that the JIT
@@ -88,6 +110,14 @@ void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
++size_;
}
+void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) {
+ if (!IsRunningMicrotasks() && !GetMicrotasksScopeDepth() &&
+ !HasMicrotasksSuppressions()) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ RunMicrotasks(isolate);
+ }
+}
+
namespace {
class SetIsRunningMicrotasks {
@@ -114,20 +144,27 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
return 0;
}
+ intptr_t base_count = finished_microtask_count_;
+
HandleScope handle_scope(isolate);
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result;
+ int processed_microtask_count;
{
SetIsRunningMicrotasks scope(&is_running_microtasks_);
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(isolate));
HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
isolate->handle_scope_implementer());
- TRACE_EVENT0("v8.execute", "RunMicrotasks");
+ TRACE_EVENT_BEGIN0("v8.execute", "RunMicrotasks");
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception);
+ processed_microtask_count =
+ static_cast<int>(finished_microtask_count_ - base_count);
+ TRACE_EVENT_END1("v8.execute", "RunMicrotasks", "microtask_count",
+ processed_microtask_count);
}
// If execution is terminating, clean up and propagate that to TryCatch scope.
@@ -144,8 +181,7 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
DCHECK_EQ(0, size());
OnCompleted(isolate);
- // TODO(tzik): Return the number of microtasks run in this round.
- return 0;
+ return processed_microtask_count;
}
void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
@@ -176,29 +212,38 @@ void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
}
void MicrotaskQueue::AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- auto pos = std::find(microtasks_completed_callbacks_.begin(),
- microtasks_completed_callbacks_.end(), callback);
+ MicrotasksCompletedCallbackWithData callback, void* data) {
+ CallbackWithData callback_with_data(callback, data);
+ auto pos =
+ std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback_with_data);
if (pos != microtasks_completed_callbacks_.end()) return;
- microtasks_completed_callbacks_.push_back(callback);
+ microtasks_completed_callbacks_.push_back(callback_with_data);
}
void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- auto pos = std::find(microtasks_completed_callbacks_.begin(),
- microtasks_completed_callbacks_.end(), callback);
+ MicrotasksCompletedCallbackWithData callback, void* data) {
+ CallbackWithData callback_with_data(callback, data);
+ auto pos =
+ std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback_with_data);
if (pos == microtasks_completed_callbacks_.end()) return;
microtasks_completed_callbacks_.erase(pos);
}
void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const {
- std::vector<MicrotasksCompletedCallback> callbacks(
- microtasks_completed_callbacks_);
+ std::vector<CallbackWithData> callbacks(microtasks_completed_callbacks_);
for (auto& callback : callbacks) {
- callback(reinterpret_cast<v8::Isolate*>(isolate));
+ callback.first(reinterpret_cast<v8::Isolate*>(isolate), callback.second);
}
}
+Microtask MicrotaskQueue::get(intptr_t index) const {
+ DCHECK_LT(index, size_);
+ Object microtask(ring_buffer_[(index + start_) % capacity_]);
+ return Microtask::cast(microtask);
+}
+
void MicrotaskQueue::OnCompleted(Isolate* isolate) {
// TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
// set is still open (whether to clear it after every microtask or once
diff --git a/deps/v8/src/microtask-queue.h b/deps/v8/src/microtask-queue.h
index c4db47ad46..7224794c1d 100644
--- a/deps/v8/src/microtask-queue.h
+++ b/deps/v8/src/microtask-queue.h
@@ -21,7 +21,7 @@ class Microtask;
class Object;
class RootVisitor;
-class V8_EXPORT_PRIVATE MicrotaskQueue {
+class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
public:
static void SetUpDefaultMicrotaskQueue(Isolate* isolate);
static std::unique_ptr<MicrotaskQueue> New(Isolate* isolate);
@@ -35,11 +35,23 @@ class V8_EXPORT_PRIVATE MicrotaskQueue {
intptr_t microtask_queue_pointer,
Address raw_microtask);
- void EnqueueMicrotask(Microtask microtask);
+ // v8::MicrotaskQueue implementations.
+ void EnqueueMicrotask(v8::Isolate* isolate,
+ v8::Local<Function> microtask) override;
+ void EnqueueMicrotask(v8::Isolate* isolate, v8::MicrotaskCallback callback,
+ void* data) override;
+ void PerformCheckpoint(v8::Isolate* isolate) override;
- // Returns -1 if the execution is terminating, otherwise, returns 0.
- // TODO(tzik): Update the implementation to return the number of processed
- // microtasks.
+ void EnqueueMicrotask(Microtask microtask);
+ void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data) override;
+ void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data) override;
+ bool IsRunningMicrotasks() const override { return is_running_microtasks_; }
+
+ // Runs all queued Microtasks.
+ // Returns -1 if the execution is terminating, otherwise, returns the number
+ // of microtasks that ran in this round.
int RunMicrotasks(Isolate* isolate);
// Iterate all pending Microtasks in this queue as strong roots, so that
@@ -70,15 +82,21 @@ class V8_EXPORT_PRIVATE MicrotaskQueue {
}
#endif
+ void set_microtasks_policy(v8::MicrotasksPolicy microtasks_policy) {
+ microtasks_policy_ = microtasks_policy;
+ }
+ v8::MicrotasksPolicy microtasks_policy() const { return microtasks_policy_; }
+
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void FireMicrotasksCompletedCallback(Isolate* isolate) const;
- bool IsRunningMicrotasks() const { return is_running_microtasks_; }
intptr_t capacity() const { return capacity_; }
intptr_t size() const { return size_; }
intptr_t start() const { return start_; }
+ Microtask get(intptr_t index) const;
+
MicrotaskQueue* next() const { return next_; }
MicrotaskQueue* prev() const { return prev_; }
@@ -86,6 +104,7 @@ class V8_EXPORT_PRIVATE MicrotaskQueue {
static const size_t kCapacityOffset;
static const size_t kSizeOffset;
static const size_t kStartOffset;
+ static const size_t kFinishedMicrotaskCountOffset;
static const intptr_t kMinimumCapacity;
@@ -103,6 +122,9 @@ class V8_EXPORT_PRIVATE MicrotaskQueue {
intptr_t start_ = 0;
Address* ring_buffer_ = nullptr;
+ // The number of finished microtask.
+ intptr_t finished_microtask_count_ = 0;
+
// MicrotaskQueue instances form a doubly linked list loop, so that all
// instances are reachable through |next_|.
MicrotaskQueue* next_ = nullptr;
@@ -114,8 +136,12 @@ class V8_EXPORT_PRIVATE MicrotaskQueue {
int debug_microtasks_depth_ = 0;
#endif
+ v8::MicrotasksPolicy microtasks_policy_ = v8::MicrotasksPolicy::kAuto;
+
bool is_running_microtasks_ = false;
- std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+ using CallbackWithData =
+ std::pair<MicrotasksCompletedCallbackWithData, void*>;
+ std::vector<CallbackWithData> microtasks_completed_callbacks_;
};
} // namespace internal
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 5cdd8808a6..f134fb3e85 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -123,16 +123,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
- if (IsMipsArchVariant(kMips32r6)) {
- // On R6 the address location is shifted by one instruction
- set_target_address_at(
- instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
- !code.is_null() ? code->constant_pool() : kNullAddress, target);
- } else {
- set_target_address_at(
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- !code.is_null() ? code->constant_pool() : kNullAddress, target);
- }
+ set_target_address_at(instruction_payload,
+ !code.is_null() ? code->constant_pool() : kNullAddress, target);
}
int Assembler::deserialization_special_target_size(
@@ -288,24 +280,6 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
return GetCodeTarget(code_target_index);
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- Mode mode = rmode();
- if (IsEmbeddedObject(mode)) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (IsExternalReference(mode)) {
- visitor->VisitExternalReference(host(), this);
- } else if (IsInternalReference(mode) || IsInternalReferenceEncoded(mode)) {
- visitor->VisitInternalReference(host(), this);
- } else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
// -----------------------------------------------------------------------------
// Assembler.
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 631ba8f9cb..f46a44adda 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -321,7 +321,9 @@ Assembler::Assembler(const AssemblerOptions& options,
block_buffer_growth_ = false;
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -331,19 +333,27 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size =
- (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
-
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
EmitForbiddenSlotInstruction();
@@ -4041,14 +4051,6 @@ Address Assembler::target_address_at(Address pc) {
}
-// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
-// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
-// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
-// OS::nan_value() returns a qNaN.
-void Assembler::QuietNaN(HeapObject object) {
- HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
-}
-
// On Mips, a target address is stored in a lui/ori instruction pair, each
// of which load 16 bits of the 32-bit address to a register.
// Patching the address must replace both instr, and flush the i-cache.
@@ -4104,7 +4106,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 2 * sizeof(int32_t));
+ FlushInstructionCache(pc, 2 * sizeof(int32_t));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 21409f9bf4..5cbf871630 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -50,6 +50,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT };
@@ -159,10 +161,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() { }
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
// Label operations & relative jumps (PPUM Appendix D).
//
@@ -254,8 +266,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- static void QuietNaN(HeapObject nan);
-
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 0eda758193..41bc195003 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -126,6 +126,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index e0de62e1da..0e7ab8fa04 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -16,6 +16,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/objects/heap-number.h"
#include "src/register-configuration.h"
@@ -3846,6 +3847,19 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Subu(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
+ }
+}
+
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 303fbb76b2..c1636fb9f7 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -413,41 +413,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
instr(rd_hi, rd_lo, rs, Operand(j)); \
}
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION(Div);
- DEFINE_INSTRUCTION(Divu);
- DEFINE_INSTRUCTION(Mod);
- DEFINE_INSTRUCTION(Modu);
- DEFINE_INSTRUCTION(Mulh);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION(Mulhu);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Divu);
-
- DEFINE_INSTRUCTION3(Div);
- DEFINE_INSTRUCTION3(Mul);
- DEFINE_INSTRUCTION3(Mulu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
- DEFINE_INSTRUCTION2(Neg);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
- DEFINE_INSTRUCTION(Sle);
- DEFINE_INSTRUCTION(Sleu);
- DEFINE_INSTRUCTION(Sgt);
- DEFINE_INSTRUCTION(Sgtu);
- DEFINE_INSTRUCTION(Sge);
- DEFINE_INSTRUCTION(Sgeu);
+ DEFINE_INSTRUCTION(Addu)
+ DEFINE_INSTRUCTION(Subu)
+ DEFINE_INSTRUCTION(Mul)
+ DEFINE_INSTRUCTION(Div)
+ DEFINE_INSTRUCTION(Divu)
+ DEFINE_INSTRUCTION(Mod)
+ DEFINE_INSTRUCTION(Modu)
+ DEFINE_INSTRUCTION(Mulh)
+ DEFINE_INSTRUCTION2(Mult)
+ DEFINE_INSTRUCTION(Mulhu)
+ DEFINE_INSTRUCTION2(Multu)
+ DEFINE_INSTRUCTION2(Div)
+ DEFINE_INSTRUCTION2(Divu)
+
+ DEFINE_INSTRUCTION3(Div)
+ DEFINE_INSTRUCTION3(Mul)
+ DEFINE_INSTRUCTION3(Mulu)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
// MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
+ DEFINE_INSTRUCTION(Ror)
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
@@ -931,6 +931,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Branch(if_not_equal, ne, with, Operand(scratch));
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// ---------------------------------------------------------------------------
// GC Support
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index f4d8f354d8..061a96c38a 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -20,12 +20,13 @@
#include "src/mips/constants-mips.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
- Simulator::GlobalMonitor::Get);
+ Simulator::GlobalMonitor::Get)
// Utils functions.
bool HaveSameSign(int32_t a, int32_t b) {
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 37652d0690..acbf3cb1a1 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -113,8 +113,7 @@ Address Assembler::target_address_from_return_address(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
- set_target_address_at(
- instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+ set_target_address_at(instruction_payload,
!code.is_null() ? code->constant_pool() : kNullAddress, target);
}
@@ -237,25 +236,6 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
- mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
// -----------------------------------------------------------------------------
// Assembler.
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 10e4806337..902f0e0e57 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -298,7 +298,9 @@ Assembler::Assembler(const AssemblerOptions& options,
block_buffer_growth_ = false;
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -308,19 +310,27 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
- reloc_info_writer.pos());
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
-
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
EmitForbiddenSlotInstruction();
@@ -4291,14 +4301,6 @@ Address Assembler::target_address_at(Address pc) {
}
-// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
-// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
-// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
-// OS::nan_value() returns a qNaN.
-void Assembler::QuietNaN(HeapObject object) {
- HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
-}
-
// On Mips64, a target address is stored in a 4-instruction sequence:
// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
@@ -4342,7 +4344,7 @@ void Assembler::set_target_value_at(Address pc, uint64_t target,
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 4 * kInstrSize);
+ FlushInstructionCache(pc, 4 * kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 09ad7522d6..940f588eba 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -50,6 +50,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// -----------------------------------------------------------------------------
// Machine instruction Operands.
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -157,10 +159,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() { }
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
// Label operations & relative jumps (PPUM Appendix D).
//
@@ -255,8 +267,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static void JumpLabelToJumpRegister(Address pc);
- static void QuietNaN(HeapObject nan);
-
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 4446732ea1..fe0fa095ab 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -126,6 +126,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a0 : number of arguments (on the stack, not including receiver)
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 748aa18dda..98890d441e 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -16,6 +16,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/objects/heap-number.h"
#include "src/register-configuration.h"
@@ -4259,6 +4260,19 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
}
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Dsubu(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
+ }
+}
+
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 2e6991c1ba..cc68ac4ea8 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -424,50 +424,50 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Daddu);
- DEFINE_INSTRUCTION(Div);
- DEFINE_INSTRUCTION(Divu);
- DEFINE_INSTRUCTION(Ddivu);
- DEFINE_INSTRUCTION(Mod);
- DEFINE_INSTRUCTION(Modu);
- DEFINE_INSTRUCTION(Ddiv);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Dsubu);
- DEFINE_INSTRUCTION(Dmod);
- DEFINE_INSTRUCTION(Dmodu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION(Mulh);
- DEFINE_INSTRUCTION(Mulhu);
- DEFINE_INSTRUCTION(Dmul);
- DEFINE_INSTRUCTION(Dmulh);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION2(Dmult);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Dmultu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Ddiv);
- DEFINE_INSTRUCTION2(Divu);
- DEFINE_INSTRUCTION2(Ddivu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
- DEFINE_INSTRUCTION2(Neg);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
- DEFINE_INSTRUCTION(Sle);
- DEFINE_INSTRUCTION(Sleu);
- DEFINE_INSTRUCTION(Sgt);
- DEFINE_INSTRUCTION(Sgtu);
- DEFINE_INSTRUCTION(Sge);
- DEFINE_INSTRUCTION(Sgeu);
+ DEFINE_INSTRUCTION(Addu)
+ DEFINE_INSTRUCTION(Daddu)
+ DEFINE_INSTRUCTION(Div)
+ DEFINE_INSTRUCTION(Divu)
+ DEFINE_INSTRUCTION(Ddivu)
+ DEFINE_INSTRUCTION(Mod)
+ DEFINE_INSTRUCTION(Modu)
+ DEFINE_INSTRUCTION(Ddiv)
+ DEFINE_INSTRUCTION(Subu)
+ DEFINE_INSTRUCTION(Dsubu)
+ DEFINE_INSTRUCTION(Dmod)
+ DEFINE_INSTRUCTION(Dmodu)
+ DEFINE_INSTRUCTION(Mul)
+ DEFINE_INSTRUCTION(Mulh)
+ DEFINE_INSTRUCTION(Mulhu)
+ DEFINE_INSTRUCTION(Dmul)
+ DEFINE_INSTRUCTION(Dmulh)
+ DEFINE_INSTRUCTION2(Mult)
+ DEFINE_INSTRUCTION2(Dmult)
+ DEFINE_INSTRUCTION2(Multu)
+ DEFINE_INSTRUCTION2(Dmultu)
+ DEFINE_INSTRUCTION2(Div)
+ DEFINE_INSTRUCTION2(Ddiv)
+ DEFINE_INSTRUCTION2(Divu)
+ DEFINE_INSTRUCTION2(Ddivu)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
// MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
- DEFINE_INSTRUCTION(Dror);
+ DEFINE_INSTRUCTION(Ror)
+ DEFINE_INSTRUCTION(Dror)
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
@@ -945,6 +945,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Branch(if_not_equal, ne, with, Operand(scratch));
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// ---------------------------------------------------------------------------
// GC Support
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index fad35304cd..33a573b6cc 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -19,12 +19,13 @@
#include "src/mips64/constants-mips64.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
- Simulator::GlobalMonitor::Get);
+ Simulator::GlobalMonitor::Get)
// Util functions.
inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 20742892cf..919953bb61 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -8,6 +8,7 @@
#include "src/feedback-vector.h"
#include "src/objects-body-descriptors.h"
#include "src/objects/cell.h"
+#include "src/objects/data-handler.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-collection.h"
@@ -82,11 +83,8 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
// There are embedder fields.
IteratePointers(obj, start_offset, header_size, v);
// Iterate only tagged payload of the embedder slots and skip raw payload.
- int embedder_fields_offset = RoundUp(header_size, kSystemPointerSize);
- DCHECK_EQ(embedder_fields_offset,
- JSObject::GetEmbedderFieldsStartOffset(map));
- for (int offset =
- embedder_fields_offset + EmbedderDataSlot::kTaggedPayloadOffset;
+ DCHECK_EQ(header_size, JSObject::GetEmbedderFieldsStartOffset(map));
+ for (int offset = header_size + EmbedderDataSlot::kTaggedPayloadOffset;
offset < inobject_fields_offset; offset += kEmbedderDataSlotSize) {
IteratePointer(obj, offset, v);
}
@@ -197,19 +195,18 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
}
};
-class JSWeakCell::BodyDescriptor final : public BodyDescriptorBase {
+class WeakCell::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return IsValidJSObjectSlotImpl(map, obj, offset);
+ return offset >= HeapObject::kHeaderSize;
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
+ IteratePointers(obj, HeapObject::kHeaderSize, kTargetOffset, v);
IterateCustomWeakPointer(obj, kTargetOffset, v);
- IterateJSObjectBodyImpl(map, obj, kTargetOffset + kTaggedSize, object_size,
- v);
+ IteratePointers(obj, kTargetOffset + kTaggedSize, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
@@ -220,7 +217,7 @@ class JSWeakCell::BodyDescriptor final : public BodyDescriptorBase {
class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return JSObject::BodyDescriptor::IsValidSlot(map, obj, offset);
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
@@ -228,7 +225,8 @@ class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
IterateCustomWeakPointer(obj, kTargetOffset, v);
- IteratePointers(obj, kTargetOffset + kPointerSize, object_size, v);
+ IterateJSObjectBodyImpl(map, obj, kTargetOffset + kTaggedSize, object_size,
+ v);
}
static inline int SizeOf(Map map, HeapObject object) {
@@ -248,8 +246,7 @@ class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IterateCustomWeakPointer(obj, kFunctionDataOffset, v);
- IteratePointers(obj,
- SharedFunctionInfo::kStartOfAlwaysStrongPointerFieldsOffset,
+ IteratePointers(obj, SharedFunctionInfo::kStartOfStrongFieldsOffset,
SharedFunctionInfo::kEndOfTaggedFieldsOffset, v);
}
@@ -591,22 +588,22 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
return true;
}
+ static constexpr int kRelocModeMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
- static constexpr int kModeMask =
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
// GC does not visit data/code in the header and in the body directly.
IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
- RelocIterator it(Code::cast(obj), kModeMask);
+ RelocIterator it(Code::cast(obj), kRelocModeMask);
v->VisitRelocInfo(&it);
}
@@ -757,7 +754,7 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
#ifdef V8_COMPRESS_POINTERS
- STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
STATIC_ASSERT(base::bits::IsPowerOfTwo(kEmbedderDataSlotSize));
return (offset < EmbedderDataArray::kHeaderSize) ||
(((offset - EmbedderDataArray::kHeaderSize) &
@@ -775,7 +772,7 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
#ifdef V8_COMPRESS_POINTERS
- STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
// Iterate only tagged payload of the embedder slots and skip raw payload.
for (int offset = EmbedderDataArray::OffsetOfElementAt(0) +
EmbedderDataSlot::kTaggedPayloadOffset;
@@ -902,8 +899,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
- case JS_WEAK_FACTORY_TYPE:
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
@@ -937,8 +934,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
case JS_FUNCTION_TYPE:
return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
- case JS_WEAK_CELL_TYPE:
- return Op::template apply<JSWeakCell::BodyDescriptor>(p1, p2, p3, p4);
+ case WEAK_CELL_TYPE:
+ return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
case JS_WEAK_REF_TYPE:
return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
case ODDBALL_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index cdf392448b..e810aee48c 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -12,8 +12,11 @@
#include "src/disassembler.h"
#include "src/elements.h"
#include "src/field-type.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/layout-descriptor.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/cell-inl.h"
@@ -59,7 +62,7 @@
#include "src/objects/struct-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
-#include "src/transitions.h"
+#include "src/transitions-inl.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -330,18 +333,18 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
JSAsyncFromSyncIterator::cast(*this)->JSAsyncFromSyncIteratorVerify(
isolate);
break;
- case JS_WEAK_CELL_TYPE:
- JSWeakCell::cast(*this)->JSWeakCellVerify(isolate);
+ case WEAK_CELL_TYPE:
+ WeakCell::cast(*this)->WeakCellVerify(isolate);
break;
case JS_WEAK_REF_TYPE:
JSWeakRef::cast(*this)->JSWeakRefVerify(isolate);
break;
- case JS_WEAK_FACTORY_TYPE:
- JSWeakFactory::cast(*this)->JSWeakFactoryVerify(isolate);
+ case JS_FINALIZATION_GROUP_TYPE:
+ JSFinalizationGroup::cast(*this)->JSFinalizationGroupVerify(isolate);
break;
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
- JSWeakFactoryCleanupIterator::cast(*this)
- ->JSWeakFactoryCleanupIteratorVerify(isolate);
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ JSFinalizationGroupCleanupIterator::cast(*this)
+ ->JSFinalizationGroupCleanupIteratorVerify(isolate);
break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(*this)->JSWeakMapVerify(isolate);
@@ -507,8 +510,8 @@ template <class Traits>
void FixedTypedArray<Traits>::FixedTypedArrayVerify(Isolate* isolate) {
CHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
if (base_pointer()->ptr() == ptr()) {
- CHECK(reinterpret_cast<Address>(external_pointer()) ==
- ExternalReference::fixed_typed_array_base_data_offset().address());
+ CHECK_EQ(reinterpret_cast<Address>(external_pointer()),
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag);
} else {
CHECK_EQ(base_pointer(), Smi::kZero);
}
@@ -591,6 +594,9 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
DCHECK(r.IsDouble());
continue;
}
+ if (COMPRESS_POINTERS_BOOL && index.is_inobject()) {
+ VerifyObjectField(isolate, index.offset());
+ }
Object value = RawFastPropertyAt(index);
if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
if (value->IsUninitialized(isolate)) continue;
@@ -605,8 +611,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
CHECK(!field_type->NowStable() || field_type->NowContains(value));
}
CHECK_IMPLIES(is_transitionable_fast_elements_kind,
- !Map::IsInplaceGeneralizableField(details.constness(), r,
- field_type));
+ Map::IsMostGeneralFieldType(r, field_type));
}
}
@@ -635,12 +640,12 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
void Map::MapVerify(Isolate* isolate) {
Heap* heap = isolate->heap();
- CHECK(!Heap::InNewSpace(*this));
+ CHECK(!ObjectInYoungGeneration(*this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kTaggedSize <= instance_size() &&
static_cast<size_t>(instance_size()) < heap->Capacity()));
- CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
+ CHECK(GetBackPointer()->IsUndefined(isolate) ||
!Map::cast(GetBackPointer())->is_stable());
HeapObject::VerifyHeapPointer(isolate, prototype());
HeapObject::VerifyHeapPointer(isolate, instance_descriptors());
@@ -667,6 +672,7 @@ void Map::MapVerify(Isolate* isolate) {
CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(elements_kind()) ||
IsTerminalElementsKind(elements_kind()));
+ CHECK_IMPLIES(is_deprecated(), !is_stable());
if (is_prototype_map()) {
DCHECK(prototype_info() == Smi::kZero ||
prototype_info()->IsPrototypeInfo());
@@ -686,7 +692,7 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
}
void AliasedArgumentsEntry::AliasedArgumentsEntryVerify(Isolate* isolate) {
- VerifySmiField(kAliasedContextSlot);
+ VerifySmiField(kAliasedContextSlotOffset);
}
void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
@@ -990,7 +996,7 @@ void String::StringVerify(Isolate* isolate) {
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
CHECK_IMPLIES(length() == 0, *this == ReadOnlyRoots(isolate).empty_string());
if (IsInternalizedString()) {
- CHECK(!Heap::InNewSpace(*this));
+ CHECK(!ObjectInYoungGeneration(*this));
}
if (IsConsString()) {
ConsString::cast(*this)->ConsStringVerify(isolate);
@@ -1097,7 +1103,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
}
int expected_map_index = Context::FunctionMapIndex(
- language_mode(), kind(), true, HasSharedName(), needs_home_object());
+ language_mode(), kind(), HasSharedName(), needs_home_object());
CHECK_EQ(expected_map_index, function_map_index());
if (scope_info()->length() > 0) {
@@ -1118,6 +1124,11 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
CHECK(!construct_as_builtin());
}
}
+
+ // At this point we only support skipping arguments adaptor frames
+ // for strict mode functions (see https://crbug.com/v8/8895).
+ CHECK_IMPLIES(is_safe_to_skip_arguments_adaptor(),
+ language_mode() == LanguageMode::kStrict);
}
void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
@@ -1202,6 +1213,11 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
}
void Code::CodeVerify(Isolate* isolate) {
+ CHECK_IMPLIES(
+ has_safepoint_table(),
+ IsAligned(safepoint_table_offset(), static_cast<unsigned>(kIntSize)));
+ CHECK_LE(safepoint_table_offset(), handler_table_offset());
+ CHECK_LE(handler_table_offset(), constant_pool_offset());
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), InstructionSize());
CHECK(IsAligned(raw_instruction_start(), kCodeAlignment));
@@ -1236,6 +1252,7 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
if (length()->IsSmi() && HasFastElements()) {
if (elements()->length() > 0) {
CHECK_IMPLIES(HasDoubleElements(), elements()->IsFixedDoubleArray());
+ CHECK_IMPLIES(HasSmiOrObjectElements(), elements()->IsFixedArray());
}
int size = Smi::ToInt(length());
// Holey / Packed backing stores might have slack or might have not been
@@ -1293,20 +1310,38 @@ void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(index()->IsSmi());
}
-void JSWeakCell::JSWeakCellVerify(Isolate* isolate) {
- CHECK(IsJSWeakCell());
- JSObjectVerify(isolate);
+void WeakCell::WeakCellVerify(Isolate* isolate) {
+ CHECK(IsWeakCell());
+
+ CHECK(target()->IsJSReceiver() || target()->IsUndefined(isolate));
- CHECK(next()->IsJSWeakCell() || next()->IsUndefined(isolate));
- if (next()->IsJSWeakCell()) {
- CHECK_EQ(JSWeakCell::cast(next())->prev(), *this);
+ CHECK(prev()->IsWeakCell() || prev()->IsUndefined(isolate));
+ if (prev()->IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(prev())->next(), *this);
}
- CHECK(prev()->IsJSWeakCell() || prev()->IsUndefined(isolate));
- if (prev()->IsJSWeakCell()) {
- CHECK_EQ(JSWeakCell::cast(prev())->next(), *this);
+
+ CHECK(next()->IsWeakCell() || next()->IsUndefined(isolate));
+ if (next()->IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(next())->prev(), *this);
+ }
+
+ CHECK_IMPLIES(key()->IsUndefined(isolate),
+ key_list_prev()->IsUndefined(isolate));
+ CHECK_IMPLIES(key()->IsUndefined(isolate),
+ key_list_next()->IsUndefined(isolate));
+
+ CHECK(key_list_prev()->IsWeakCell() || key_list_prev()->IsUndefined(isolate));
+ if (key_list_prev()->IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(key_list_prev())->key_list_next(), *this);
}
- CHECK(factory()->IsUndefined(isolate) || factory()->IsJSWeakFactory());
+ CHECK(key_list_next()->IsWeakCell() || key_list_next()->IsUndefined(isolate));
+ if (key_list_next()->IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(key_list_next())->key_list_prev(), *this);
+ }
+
+ CHECK(finalization_group()->IsUndefined(isolate) ||
+ finalization_group()->IsJSFinalizationGroup());
}
void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
@@ -1315,32 +1350,31 @@ void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
CHECK(target()->IsUndefined(isolate) || target()->IsJSReceiver());
}
-void JSWeakFactory::JSWeakFactoryVerify(Isolate* isolate) {
- CHECK(IsJSWeakFactory());
+void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
+ CHECK(IsJSFinalizationGroup());
JSObjectVerify(isolate);
VerifyHeapPointer(isolate, cleanup());
- CHECK(active_cells()->IsUndefined(isolate) || active_cells()->IsJSWeakCell());
- if (active_cells()->IsJSWeakCell()) {
- CHECK(JSWeakCell::cast(active_cells())->prev()->IsUndefined(isolate));
+ CHECK(active_cells()->IsUndefined(isolate) || active_cells()->IsWeakCell());
+ if (active_cells()->IsWeakCell()) {
+ CHECK(WeakCell::cast(active_cells())->prev()->IsUndefined(isolate));
}
- CHECK(cleared_cells()->IsUndefined(isolate) ||
- cleared_cells()->IsJSWeakCell());
- if (cleared_cells()->IsJSWeakCell()) {
- CHECK(JSWeakCell::cast(cleared_cells())->prev()->IsUndefined(isolate));
+ CHECK(cleared_cells()->IsUndefined(isolate) || cleared_cells()->IsWeakCell());
+ if (cleared_cells()->IsWeakCell()) {
+ CHECK(WeakCell::cast(cleared_cells())->prev()->IsUndefined(isolate));
}
}
-void JSWeakFactoryCleanupIterator::JSWeakFactoryCleanupIteratorVerify(
- Isolate* isolate) {
- CHECK(IsJSWeakFactoryCleanupIterator());
+void JSFinalizationGroupCleanupIterator::
+ JSFinalizationGroupCleanupIteratorVerify(Isolate* isolate) {
+ CHECK(IsJSFinalizationGroupCleanupIterator());
JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, factory());
+ VerifyHeapPointer(isolate, finalization_group());
}
-void WeakFactoryCleanupJobTask::WeakFactoryCleanupJobTaskVerify(
+void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskVerify(
Isolate* isolate) {
- CHECK(IsWeakFactoryCleanupJobTask());
- CHECK(factory()->IsJSWeakFactory());
+ CHECK(IsFinalizationGroupCleanupJobTask());
+ CHECK(finalization_group()->IsJSFinalizationGroup());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
@@ -1791,6 +1825,12 @@ void Tuple3::Tuple3Verify(Isolate* isolate) {
VerifyObjectField(isolate, kValue3Offset);
}
+void ClassPositions::ClassPositionsVerify(Isolate* isolate) {
+ CHECK(IsClassPositions());
+ VerifySmiField(kStartOffset);
+ VerifySmiField(kEndOffset);
+}
+
void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
Isolate* isolate) {
CHECK(IsObjectBoilerplateDescription());
@@ -2024,6 +2064,14 @@ void DebugInfo::DebugInfoVerify(Isolate* isolate) {
VerifyPointer(isolate, break_points());
}
+void StackTraceFrame::StackTraceFrameVerify(Isolate* isolate) {
+ CHECK(IsStackTraceFrame());
+ VerifySmiField(kFrameIndexOffset);
+ VerifySmiField(kIdOffset);
+ VerifyPointer(isolate, frame_array());
+ VerifyPointer(isolate, frame_info());
+}
+
void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
CHECK(IsStackFrameInfo());
VerifyPointer(isolate, script_name());
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects-definitions.h
index 08f3ce8c76..f79f16b636 100644
--- a/deps/v8/src/objects-definitions.h
+++ b/deps/v8/src/objects-definitions.h
@@ -43,10 +43,8 @@ namespace internal {
V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE) \
V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(STRING_TYPE) \
V(CONS_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
@@ -57,10 +55,8 @@ namespace internal {
V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SLICED_ONE_BYTE_STRING_TYPE) \
V(THIN_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(UNCACHED_EXTERNAL_STRING_TYPE) \
V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
V(SYMBOL_TYPE) \
V(HEAP_NUMBER_TYPE) \
@@ -98,6 +94,7 @@ namespace internal {
V(ALLOCATION_MEMENTO_TYPE) \
V(ASM_WASM_DATA_TYPE) \
V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(CLASS_POSITIONS_TYPE) \
V(DEBUG_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
@@ -111,6 +108,7 @@ namespace internal {
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
+ V(STACK_TRACE_FRAME_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
@@ -123,7 +121,7 @@ namespace internal {
V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
- V(WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE) \
+ V(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE) \
\
V(ALLOCATION_SITE_TYPE) \
V(EMBEDDER_DATA_ARRAY_TYPE) \
@@ -175,6 +173,7 @@ namespace internal {
V(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
V(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
V(WEAK_ARRAY_LIST_TYPE) \
+ V(WEAK_CELL_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -208,10 +207,9 @@ namespace internal {
V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
- V(JS_WEAK_CELL_TYPE) \
V(JS_WEAK_REF_TYPE) \
- V(JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE) \
- V(JS_WEAK_FACTORY_TYPE) \
+ V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
+ V(JS_FINALIZATION_GROUP_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
@@ -263,17 +261,11 @@ namespace internal {
ExternalString) \
V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize, \
external_one_byte_string, ExternalOneByteString) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize, \
- external_string_with_one_byte_data, ExternalStringWithOneByteData) \
V(UNCACHED_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kUncachedSize, \
uncached_external_string, UncachedExternalString) \
V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE, \
ExternalOneByteString::kUncachedSize, uncached_external_one_byte_string, \
UncachedExternalOneByteString) \
- V(UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kUncachedSize, \
- uncached_external_string_with_one_byte_data, \
- UncachedExternalStringWithOneByteData) \
\
V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string, \
InternalizedString) \
@@ -283,10 +275,6 @@ namespace internal {
external_internalized_string, ExternalInternalizedString) \
V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_internalized_string_with_one_byte_data, \
- ExternalInternalizedStringWithOneByteData) \
V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE, \
ExternalTwoByteString::kUncachedSize, \
uncached_external_internalized_string, UncachedExternalInternalizedString) \
@@ -294,10 +282,6 @@ namespace internal {
ExternalOneByteString::kUncachedSize, \
uncached_external_one_byte_internalized_string, \
UncachedExternalOneByteInternalizedString) \
- V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \
- ExternalTwoByteString::kUncachedSize, \
- uncached_external_internalized_string_with_one_byte_data, \
- UncachedExternalInternalizedStringWithOneByteData) \
V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
ThinOneByteString)
@@ -321,6 +305,7 @@ namespace internal {
V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
+ V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
function_template_info) \
@@ -336,6 +321,7 @@ namespace internal {
V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
V(_, SCRIPT_TYPE, Script, script) \
V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
+ V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
V(_, TUPLE3_TYPE, Tuple3, tuple3) \
V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
@@ -352,8 +338,8 @@ namespace internal {
promise_reject_reaction_job_task) \
V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
promise_resolve_thenable_job_task) \
- V(_, WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE, WeakFactoryCleanupJobTask, \
- weak_factory_cleanup_job_task)
+ V(_, FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, \
+ FinalizationGroupCleanupJobTask, finalization_group_cleanup_job_task)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 0bad18efa8..4ea613067c 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -17,50 +17,29 @@
#include "src/base/bits.h"
#include "src/base/tsan.h"
#include "src/builtins/builtins.h"
-#include "src/contexts-inl.h"
-#include "src/conversions-inl.h"
-#include "src/feedback-vector-inl.h"
-#include "src/field-index-inl.h"
+#include "src/conversions.h"
+#include "src/double.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/isolate-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/keys.h"
-#include "src/layout-descriptor-inl.h"
-#include "src/lookup-cache-inl.h"
-#include "src/lookup-inl.h"
-#include "src/maybe-handles-inl.h"
+#include "src/lookup-inl.h" // TODO(jkummerow): Drop.
#include "src/objects/bigint.h"
-#include "src/objects/descriptor-array-inl.h"
-#include "src/objects/embedder-data-array-inl.h"
-#include "src/objects/free-space-inl.h"
#include "src/objects/heap-number-inl.h"
-#include "src/objects/heap-object.h" // TODO(jkummerow): See below [1].
-#include "src/objects/js-proxy-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-proxy-inl.h" // TODO(jkummerow): Drop.
#include "src/objects/literal-objects.h"
-#include "src/objects/maybe-object-inl.h"
-#include "src/objects/oddball-inl.h"
-#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/oddball.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
-#include "src/objects/template-objects.h"
#include "src/objects/templates.h"
#include "src/property-details.h"
#include "src/property.h"
-#include "src/prototype-inl.h"
-#include "src/roots-inl.h"
-#include "src/transitions-inl.h"
#include "src/v8memory.h"
-// [1] This file currently contains the definitions of many
-// HeapObject::IsFoo() predicates, which in turn require #including
-// many other -inl.h files. Find a way to avoid this. Idea:
-// Since e.g. HeapObject::IsSeqString requires things from string-inl.h,
-// and presumably is mostly used from places that require/include string-inl.h
-// anyway, maybe that's where it should be defined?
-
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -84,11 +63,6 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
}
-bool HeapObject::IsUncompiledData() const {
- return IsUncompiledDataWithoutPreparseData() ||
- IsUncompiledDataWithPreparseData();
-}
-
bool HeapObject::IsSloppyArgumentsElements() const {
return IsFixedArrayExact();
}
@@ -108,10 +82,6 @@ bool HeapObject::IsDataHandler() const {
bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
-bool HeapObject::IsExternal(Isolate* isolate) const {
- return map()->FindRootMap(isolate) == isolate->heap()->external_map();
-}
-
#define IS_TYPE_FUNCTION_DEF(type_) \
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(*this)->Is##type_(); \
@@ -330,11 +300,6 @@ bool HeapObject::IsSymbolWrapper() const {
return IsJSValue() && JSValue::cast(*this)->value()->IsSymbol();
}
-bool HeapObject::IsBoolean() const {
- return IsOddball() &&
- ((Oddball::cast(*this)->kind() & Oddball::kNotBooleanMask) == 0);
-}
-
bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
@@ -343,10 +308,6 @@ bool HeapObject::IsStringSet() const { return IsHashTable(); }
bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
-bool HeapObject::IsNormalizedMapCache() const {
- return NormalizedMapCache::IsNormalizedMapCache(*this);
-}
-
bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
bool HeapObject::IsMapCache() const { return IsHashTable(); }
@@ -431,88 +392,32 @@ bool Object::IsMinusZero() const {
i::IsMinusZero(HeapNumber::cast(*this)->value());
}
-OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
-OBJECT_CONSTRUCTORS_IMPL(HashTableBase, FixedArray)
-
-template <typename Derived, typename Shape>
-HashTable<Derived, Shape>::HashTable(Address ptr) : HashTableBase(ptr) {
- SLOW_DCHECK(IsHashTable());
-}
-
-template <typename Derived, typename Shape>
-ObjectHashTableBase<Derived, Shape>::ObjectHashTableBase(Address ptr)
- : HashTable<Derived, Shape>(ptr) {}
-
-ObjectHashTable::ObjectHashTable(Address ptr)
- : ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>(ptr) {
- SLOW_DCHECK(IsObjectHashTable());
-}
-
-EphemeronHashTable::EphemeronHashTable(Address ptr)
- : ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>(ptr) {
- SLOW_DCHECK(IsEphemeronHashTable());
-}
-
-ObjectHashSet::ObjectHashSet(Address ptr)
- : HashTable<ObjectHashSet, ObjectHashSetShape>(ptr) {
- SLOW_DCHECK(IsObjectHashSet());
-}
-
OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray)
-
-NormalizedMapCache::NormalizedMapCache(Address ptr) : WeakFixedArray(ptr) {
- // TODO(jkummerow): Introduce IsNormalizedMapCache() and use
- // OBJECT_CONSTRUCTORS_IMPL macro?
-}
-
OBJECT_CONSTRUCTORS_IMPL(BigIntBase, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
-OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Tuple2)
-
// ------------------------------------
// Cast operations
CAST_ACCESSOR(BigInt)
-CAST_ACCESSOR(ObjectBoilerplateDescription)
-CAST_ACCESSOR(EphemeronHashTable)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(Object)
-CAST_ACCESSOR(ObjectHashSet)
-CAST_ACCESSOR(ObjectHashTable)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(TemplateObjectDescription)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
-bool Object::KeyEquals(Object second) {
- Object first = *this;
- if (second->IsNumber()) {
- if (first->IsNumber()) return first->Number() == second->Number();
- Object temp = first;
- first = second;
- second = temp;
- }
- if (first->IsNumber()) {
- DCHECK_LE(0, first->Number());
- uint32_t expected = static_cast<uint32_t>(first->Number());
- uint32_t index;
- return Name::cast(second)->AsArrayIndex(&index) && index == expected;
- }
- return Name::cast(first)->Equals(Name::cast(second));
-}
-
bool Object::FilterKey(PropertyFilter filter) {
DCHECK(!IsPropertyCell());
- if (IsSymbol()) {
+ if (filter == PRIVATE_NAMES_ONLY) {
+ if (!IsSymbol()) return true;
+ return !Symbol::cast(*this)->is_private_name();
+ } else if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
+
if (Symbol::cast(*this)->is_private()) return true;
} else {
if (filter & SKIP_STRINGS) return true;
@@ -520,33 +425,6 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
- Representation representation) {
- if (!representation.IsDouble()) return object;
- auto result = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- if (object->IsUninitialized(isolate)) {
- result->set_value_as_bits(kHoleNanInt64);
- } else if (object->IsMutableHeapNumber()) {
- // Ensure that all bits of the double value are preserved.
- result->set_value_as_bits(
- MutableHeapNumber::cast(*object)->value_as_bits());
- } else {
- result->set_value(object->Number());
- }
- return result;
-}
-
-Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
- Representation representation) {
- DCHECK(!object->IsUninitialized(isolate));
- if (!representation.IsDouble()) {
- DCHECK(object->FitsRepresentation(representation));
- return object;
- }
- return isolate->factory()->NewHeapNumber(
- MutableHeapNumber::cast(*object)->value());
-}
-
Representation Object::OptimalRepresentation() {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
@@ -603,7 +481,7 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object,
const char* method_name) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
- return ToObject(isolate, object, isolate->native_context(), method_name);
+ return ToObjectImpl(isolate, object, method_name);
}
@@ -695,15 +573,15 @@ MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
- LanguageMode language_mode) {
+ ShouldThrow should_throw) {
LookupIterator it(isolate, object, index);
MAYBE_RETURN_NULL(
- SetProperty(&it, value, language_mode, StoreOrigin::kMaybeKeyed));
+ SetProperty(&it, value, StoreOrigin::kMaybeKeyed, Just(should_throw)));
return value;
}
ObjectSlot HeapObject::RawField(int byte_offset) const {
- return ObjectSlot(FIELD_ADDR(this, byte_offset));
+ return ObjectSlot(FIELD_ADDR(*this, byte_offset));
}
ObjectSlot HeapObject::RawField(const HeapObject obj, int byte_offset) {
@@ -711,7 +589,7 @@ ObjectSlot HeapObject::RawField(const HeapObject obj, int byte_offset) {
}
MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
- return MaybeObjectSlot(FIELD_ADDR(this, byte_offset));
+ return MaybeObjectSlot(FIELD_ADDR(*this, byte_offset));
}
MaybeObjectSlot HeapObject::RawMaybeWeakField(HeapObject obj, int byte_offset) {
@@ -735,15 +613,34 @@ HeapObject MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
- VerifyPointer(isolate, READ_FIELD(this, offset));
+ VerifyPointer(isolate, READ_FIELD(*this, offset));
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ // Ensure upper 32-bits are zeros.
+ Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
+ CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
+#endif
}
void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
- MaybeObject::VerifyMaybeObjectPointer(isolate, READ_WEAK_FIELD(this, offset));
+ MaybeObject::VerifyMaybeObjectPointer(isolate,
+ READ_WEAK_FIELD(*this, offset));
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ // Ensure upper 32-bits are zeros.
+ Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
+ CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
+#endif
}
void HeapObject::VerifySmiField(int offset) {
- CHECK(READ_FIELD(this, offset)->IsSmi());
+ CHECK(READ_FIELD(*this, offset)->IsSmi());
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ // Ensure upper 32-bits are zeros.
+ Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location());
+ CHECK_EQ(kNullAddress, RoundDown<kPtrComprIsolateRootAlignment>(value));
+#endif
}
#endif
@@ -751,7 +648,7 @@ void HeapObject::VerifySmiField(int offset) {
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
// TODO(v8:7464): When RO_SPACE is embedded, this will access a global
// variable instead.
- return ReadOnlyRoots(MemoryChunk::FromHeapObject(*this)->heap());
+ return ReadOnlyRoots(GetHeapFromWritableObject(*this));
}
Map HeapObject::map() const { return map_word().ToMap(); }
@@ -759,7 +656,7 @@ Map HeapObject::map() const { return map_word().ToMap(); }
void HeapObject::set_map(Map value) {
if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
@@ -777,7 +674,7 @@ Map HeapObject::synchronized_map() const {
void HeapObject::synchronized_set_map(Map value) {
if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
@@ -793,7 +690,7 @@ void HeapObject::synchronized_set_map(Map value) {
void HeapObject::set_map_no_write_barrier(Map value) {
if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
+ GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
@@ -878,17 +775,6 @@ HeapObject Object::GetHeapObject() const {
return HeapObject::cast(*this);
}
-void Object::VerifyApiCallResultType() {
-#if DEBUG
- if (IsSmi()) return;
- DCHECK(IsHeapObject());
- if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
- IsBigInt() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) {
- FATAL("API call returned invalid object");
- }
-#endif // DEBUG
-}
-
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object obj = get(kNumberOfCapturesIndex);
@@ -933,13 +819,16 @@ void RegExpMatchInfo::SetCapture(int i, int value) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
- Heap* heap = Heap::FromWritableHeapObject(*this);
- if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
- if (Heap::InNewSpace(*this)) return SKIP_WRITE_BARRIER;
- return UPDATE_WRITE_BARRIER;
+ return GetWriteBarrierModeForObject(*this, &promise);
}
AllocationAlignment HeapObject::RequiredAlignment(Map map) {
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
#ifdef V8_HOST_ARCH_32_BIT
int instance_type = map->instance_type();
if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
@@ -951,154 +840,10 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
return kWordAligned;
}
-bool HeapObject::NeedsRehashing() const {
- switch (map()->instance_type()) {
- case DESCRIPTOR_ARRAY_TYPE:
- return DescriptorArray::cast(*this)->number_of_descriptors() > 1;
- case TRANSITION_ARRAY_TYPE:
- return TransitionArray::cast(*this)->number_of_entries() > 1;
- case ORDERED_HASH_MAP_TYPE:
- return OrderedHashMap::cast(*this)->NumberOfElements() > 0;
- case ORDERED_HASH_SET_TYPE:
- return OrderedHashSet::cast(*this)->NumberOfElements() > 0;
- case NAME_DICTIONARY_TYPE:
- case GLOBAL_DICTIONARY_TYPE:
- case NUMBER_DICTIONARY_TYPE:
- case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case STRING_TABLE_TYPE:
- case HASH_TABLE_TYPE:
- case SMALL_ORDERED_HASH_MAP_TYPE:
- case SMALL_ORDERED_HASH_SET_TYPE:
- case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- return true;
- default:
- return false;
- }
-}
-
Address HeapObject::GetFieldAddress(int field_offset) const {
- return FIELD_ADDR(this, field_offset);
-}
-
-ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
-ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
-
-DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
-
-DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
-DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
-DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-
-int HeapObject::SizeFromMap(Map map) const {
- int instance_size = map->instance_size();
- if (instance_size != kVariableSizeSentinel) return instance_size;
- // Only inline the most frequent cases.
- InstanceType instance_type = map->instance_type();
- if (IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)) {
- return FixedArray::SizeFor(
- FixedArray::unchecked_cast(*this)->synchronized_length());
- }
- if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
- // Native context has fixed size.
- DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
- return Context::SizeFor(Context::unchecked_cast(*this)->length());
- }
- if (instance_type == ONE_BYTE_STRING_TYPE ||
- instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
- // Strings may get concurrently truncated, hence we have to access its
- // length synchronized.
- return SeqOneByteString::SizeFor(
- SeqOneByteString::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == BYTE_ARRAY_TYPE) {
- return ByteArray::SizeFor(
- ByteArray::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == BYTECODE_ARRAY_TYPE) {
- return BytecodeArray::SizeFor(
- BytecodeArray::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == FREE_SPACE_TYPE) {
- return FreeSpace::unchecked_cast(*this)->relaxed_read_size();
- }
- if (instance_type == STRING_TYPE ||
- instance_type == INTERNALIZED_STRING_TYPE) {
- // Strings may get concurrently truncated, hence we have to access its
- // length synchronized.
- return SeqTwoByteString::SizeFor(
- SeqTwoByteString::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- return FixedDoubleArray::SizeFor(
- FixedDoubleArray::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == FEEDBACK_METADATA_TYPE) {
- return FeedbackMetadata::SizeFor(
- FeedbackMetadata::unchecked_cast(*this)->synchronized_slot_count());
- }
- if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
- return DescriptorArray::SizeFor(
- DescriptorArray::unchecked_cast(*this)->number_of_all_descriptors());
- }
- if (IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
- LAST_WEAK_FIXED_ARRAY_TYPE)) {
- return WeakFixedArray::SizeFor(
- WeakFixedArray::unchecked_cast(*this)->synchronized_length());
- }
- if (instance_type == WEAK_ARRAY_LIST_TYPE) {
- return WeakArrayList::SizeForCapacity(
- WeakArrayList::unchecked_cast(*this)->synchronized_capacity());
- }
- if (IsInRange(instance_type, FIRST_FIXED_TYPED_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE)) {
- return FixedTypedArrayBase::unchecked_cast(*this)->TypedArraySize(
- instance_type);
- }
- if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
- return SmallOrderedHashSet::SizeFor(
- SmallOrderedHashSet::unchecked_cast(*this)->Capacity());
- }
- if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
- return SmallOrderedHashMap::SizeFor(
- SmallOrderedHashMap::unchecked_cast(*this)->Capacity());
- }
- if (instance_type == SMALL_ORDERED_NAME_DICTIONARY_TYPE) {
- return SmallOrderedNameDictionary::SizeFor(
- SmallOrderedNameDictionary::unchecked_cast(*this)->Capacity());
- }
- if (instance_type == PROPERTY_ARRAY_TYPE) {
- return PropertyArray::SizeFor(
- PropertyArray::cast(*this)->synchronized_length());
- }
- if (instance_type == FEEDBACK_VECTOR_TYPE) {
- return FeedbackVector::SizeFor(
- FeedbackVector::unchecked_cast(*this)->length());
- }
- if (instance_type == BIGINT_TYPE) {
- return BigInt::SizeFor(BigInt::unchecked_cast(*this)->length());
- }
- if (instance_type == PREPARSE_DATA_TYPE) {
- PreparseData data = PreparseData::unchecked_cast(*this);
- return PreparseData::SizeFor(data->data_length(), data->children_length());
- }
- if (instance_type == CODE_TYPE) {
- return Code::unchecked_cast(*this)->CodeSize();
- }
- DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
- return EmbedderDataArray::SizeFor(
- EmbedderDataArray::unchecked_cast(*this)->length());
+ return FIELD_ADDR(*this, field_offset);
}
-ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
-ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
- kCookedStringsOffset)
-
// static
Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -1177,14 +922,12 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Isolate* isolate,
return GetProperty(&it);
}
-MaybeHandle<Object> Object::SetPropertyOrElement(Isolate* isolate,
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreOrigin store_origin) {
+MaybeHandle<Object> Object::SetPropertyOrElement(
+ Isolate* isolate, Handle<Object> object, Handle<Name> name,
+ Handle<Object> value, Maybe<ShouldThrow> should_throw,
+ StoreOrigin store_origin) {
LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
- MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_origin));
+ MAYBE_RETURN_NULL(SetProperty(&it, value, store_origin, should_throw));
return value;
}
@@ -1196,8 +939,6 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
return GetProperty(&it);
}
-
-
// static
Object Object::GetSimpleHash(Object object) {
DisallowHeapAllocation no_gc;
@@ -1231,6 +972,10 @@ Object Object::GetSimpleHash(Object object) {
uint32_t hash = BigInt::cast(object)->Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
}
+ if (object->IsSharedFunctionInfo()) {
+ uint32_t hash = SharedFunctionInfo::cast(object)->Hash();
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
DCHECK(object->IsJSReceiver());
return object;
}
@@ -1265,7 +1010,7 @@ Relocatable::~Relocatable() {
// offset of the address in respective MemoryChunk.
static inline uint32_t ObjectAddressForHashing(Address object) {
uint32_t value = static_cast<uint32_t>(object);
- return value & MemoryChunk::kAlignmentMask;
+ return value & kPageAlignmentMask;
}
static inline Handle<Object> MakeEntryPair(Isolate* isolate, uint32_t index,
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 60931395a9..d3603631f2 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -10,6 +10,8 @@
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/heap/heap-inl.h" // For InOldSpace.
+#include "src/heap/heap-write-barrier-inl.h" // For GetIsolateFromWritableObj.
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
@@ -69,6 +71,11 @@ namespace internal {
#ifdef OBJECT_PRINT
void Object::Print() const {
+ // Output into debugger's command window if a debugger is attached.
+ DbgStdoutStream dbg_os;
+ this->Print(dbg_os);
+ dbg_os << std::flush;
+
StdoutStream os;
this->Print(os);
os << std::flush;
@@ -91,8 +98,9 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << map()->instance_type();
}
os << "]";
- MemoryChunk* chunk = MemoryChunk::FromAddress(ptr());
- if (chunk->owner()->identity() == OLD_SPACE) os << " in OldSpace";
+ if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
+ os << " in OldSpace";
+ }
if (!IsMap()) os << "\n - map: " << Brief(map());
}
@@ -288,18 +296,18 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_MAP_VALUE_ITERATOR_TYPE:
JSMapIterator::cast(*this)->JSMapIteratorPrint(os);
break;
- case JS_WEAK_CELL_TYPE:
- JSWeakCell::cast(*this)->JSWeakCellPrint(os);
+ case WEAK_CELL_TYPE:
+ WeakCell::cast(*this)->WeakCellPrint(os);
break;
case JS_WEAK_REF_TYPE:
JSWeakRef::cast(*this)->JSWeakRefPrint(os);
break;
- case JS_WEAK_FACTORY_TYPE:
- JSWeakFactory::cast(*this)->JSWeakFactoryPrint(os);
+ case JS_FINALIZATION_GROUP_TYPE:
+ JSFinalizationGroup::cast(*this)->JSFinalizationGroupPrint(os);
break;
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
- JSWeakFactoryCleanupIterator::cast(*this)
- ->JSWeakFactoryCleanupIteratorPrint(os);
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ JSFinalizationGroupCleanupIterator::cast(*this)
+ ->JSFinalizationGroupCleanupIteratorPrint(os);
break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(*this)->JSWeakMapPrint(os);
@@ -415,10 +423,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case STRING_TYPE:
case CONS_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -429,10 +435,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case EXTERNAL_ONE_BYTE_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
case THIN_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
@@ -454,19 +458,27 @@ void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "BytecodeArray");
+ os << "\n";
Disassemble(os);
}
void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
- os << "free space, size " << Size();
+ os << "free space, size " << Size() << "\n";
}
template <class Traits>
void FixedTypedArray<Traits>::FixedTypedArrayPrint(
std::ostream& os) { // NOLINT
- os << "fixed " << Traits::Designator();
+ PrintHeader(os, Traits::ArrayTypeName());
+ os << "\n - length: " << length() << "\n - base_pointer: ";
+ if (base_pointer().ptr() == kNullAddress) {
+ os << "<nullptr>";
+ } else {
+ os << Brief(base_pointer());
+ }
+ os << "\n - external_pointer: " << external_pointer() << "\n";
}
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
@@ -776,16 +788,25 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
SharedFunctionInfo fun_info = function()->shared();
if (fun_info->HasSourceCode()) {
Script script = Script::cast(fun_info->script());
- int lin = script->GetLineNumber(source_position()) + 1;
- int col = script->GetColumnNumber(source_position()) + 1;
String script_name = script->name()->IsString()
? String::cast(script->name())
: GetReadOnlyRoots().empty_string();
- os << "\n - source position: " << source_position();
- os << " (";
- script_name->PrintUC16(os);
- os << ", lin " << lin;
- os << ", col " << col;
+
+ os << "\n - source position: ";
+ // Can't collect source positions here if not available as that would
+ // allocate memory.
+ if (fun_info->HasBytecodeArray() &&
+ fun_info->GetBytecodeArray()->HasSourcePositionTable()) {
+ os << source_position();
+ os << " (";
+ script_name->PrintUC16(os);
+ int lin = script->GetLineNumber(source_position()) + 1;
+ int col = script->GetColumnNumber(source_position()) + 1;
+ os << ", lin " << lin;
+ os << ", col " << col;
+ } else {
+ os << "unavailable";
+ }
os << ")";
}
}
@@ -940,8 +961,8 @@ void PrintContextWithHeader(std::ostream& os, Context context,
context->PrintHeader(os, type);
os << "\n - length: " << context->length();
os << "\n - scope_info: " << Brief(context->scope_info());
- os << "\n - previous: " << Brief(context->previous());
- os << "\n - extension_object: " << Brief(context->extension_object());
+ os << "\n - previous: " << Brief(context->unchecked_previous());
+ os << "\n - extension: " << Brief(context->extension());
os << "\n - native_context: " << Brief(context->native_context());
PrintFixedArrayElements(os, context);
os << "\n";
@@ -1110,22 +1131,23 @@ void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
void FeedbackNexus::Print(std::ostream& os) { // NOLINT
switch (kind()) {
case FeedbackSlotKind::kCall:
- case FeedbackSlotKind::kLoadProperty:
- case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kCloneObject:
+ case FeedbackSlotKind::kHasKeyed:
+ case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadKeyed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kInstanceOf:
- case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kStoreKeyedStrict:
- case FeedbackSlotKind::kStoreInArrayLiteral:
- case FeedbackSlotKind::kCloneObject: {
- os << InlineCacheState2String(StateFromFeedback());
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed: {
+ os << InlineCacheState2String(ic_state());
break;
}
case FeedbackSlotKind::kBinaryOp: {
@@ -1170,7 +1192,7 @@ void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
void String::StringPrint(std::ostream& os) { // NOLINT
- if (!HasOnlyOneByteChars()) {
+ if (!IsOneByteRepresentation()) {
os << "u";
}
if (StringShape(*this).IsInternalized()) {
@@ -1273,14 +1295,16 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
JSCollectionIteratorPrint(os, "JSMapIterator");
}
-void JSWeakCell::JSWeakCellPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSWeakCell");
- os << "\n - factory: " << Brief(factory());
+void WeakCell::WeakCellPrint(std::ostream& os) {
+ PrintHeader(os, "WeakCell");
+ os << "\n - finalization_group: " << Brief(finalization_group());
os << "\n - target: " << Brief(target());
os << "\n - holdings: " << Brief(holdings());
os << "\n - prev: " << Brief(prev());
os << "\n - next: " << Brief(next());
- JSObjectPrintBody(os, *this);
+ os << "\n - key: " << Brief(key());
+ os << "\n - key_list_prev: " << Brief(key_list_prev());
+ os << "\n - key_list_next: " << Brief(key_list_next());
}
void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
@@ -1289,26 +1313,27 @@ void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSWeakFactory::JSWeakFactoryPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSWeakFactory");
+void JSFinalizationGroup::JSFinalizationGroupPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSFinalizationGroup");
os << "\n - native_context: " << Brief(native_context());
os << "\n - cleanup: " << Brief(cleanup());
os << "\n - active_cells: " << Brief(active_cells());
os << "\n - cleared_cells: " << Brief(cleared_cells());
+ os << "\n - key_map: " << Brief(key_map());
JSObjectPrintBody(os, *this);
}
-void JSWeakFactoryCleanupIterator::JSWeakFactoryCleanupIteratorPrint(
- std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSWeakFactoryCleanupIterator");
- os << "\n - factory: " << Brief(factory());
+void JSFinalizationGroupCleanupIterator::
+ JSFinalizationGroupCleanupIteratorPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSFinalizationGroupCleanupIterator");
+ os << "\n - finalization_group: " << Brief(finalization_group());
JSObjectPrintBody(os, *this);
}
-void WeakFactoryCleanupJobTask::WeakFactoryCleanupJobTaskPrint(
+void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskPrint(
std::ostream& os) {
- PrintHeader(os, "WeakFactoryCleanupJobTask");
- os << "\n - factory: " << Brief(factory());
+ PrintHeader(os, "FinalizationGroupCleanupJobTask");
+ os << "\n - finalization_group: " << Brief(finalization_group());
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
@@ -1342,6 +1367,10 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
os << "\n - length: " << Brief(length());
+ if (!buffer()->IsJSArrayBuffer()) {
+ os << "\n <invalid buffer>\n";
+ return;
+ }
if (WasDetached()) os << "\n - detached";
JSObjectPrintBody(os, *this, !WasDetached());
}
@@ -1359,6 +1388,10 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
+ if (!buffer()->IsJSArrayBuffer()) {
+ os << "\n <invalid buffer>";
+ return;
+ }
if (WasDetached()) os << "\n - detached";
JSObjectPrintBody(os, *this, !WasDetached());
}
@@ -1398,6 +1431,9 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - formal_parameter_count: "
<< shared()->internal_formal_parameter_count();
+ if (shared()->is_safe_to_skip_arguments_adaptor()) {
+ os << "\n - safe_to_skip_arguments_adaptor";
+ }
os << "\n - kind: " << shared()->kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(code());
@@ -1454,6 +1490,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - function_map_index: " << function_map_index();
os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ if (is_safe_to_skip_arguments_adaptor()) {
+ os << "\n - safe_to_skip_arguments_adaptor";
+ }
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data());
@@ -1581,7 +1620,8 @@ void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) { // NOLINT
}
void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
- os << "foreign address : " << reinterpret_cast<void*>(foreign_address());
+ PrintHeader(os, "Foreign");
+ os << "\n - foreign address : " << reinterpret_cast<void*>(foreign_address());
os << "\n";
}
@@ -1735,6 +1775,13 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
os << "\n";
}
+void ClassPositions::ClassPositionsPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "ClassPositions");
+ os << "\n - start position: " << start();
+ os << "\n - end position: " << end();
+ os << "\n";
+}
+
void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
std::ostream& os) { // NOLINT
PrintHeader(os, "ArrayBoilerplateDescription");
@@ -2191,6 +2238,12 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
os << "\n - coverage_info: " << Brief(coverage_info());
}
+void StackTraceFrame::StackTraceFramePrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "StackTraceFrame");
+ os << "\n - frame_index: " << frame_index();
+ os << "\n - id: " << id();
+ os << "\n - frame_info: " << Brief(frame_info());
+}
void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "StackFrame");
@@ -2415,7 +2468,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
Isolate* isolate;
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
- if (Isolate::FromWritableHeapObject(*this, &isolate)) {
+ if (GetIsolateFromWritableObject(*this, &isolate)) {
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate, *this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
@@ -2618,11 +2671,29 @@ void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
} // namespace internal
} // namespace v8
+namespace {
+
+inline i::Object GetObjectFromRaw(void* object) {
+ i::Address object_ptr = reinterpret_cast<i::Address>(object);
+#ifdef V8_COMPRESS_POINTERS
+ if (RoundDown<i::kPtrComprIsolateRootAlignment>(object_ptr) ==
+ i::kNullAddress) {
+ // Try to decompress pointer.
+ i::Isolate* isolate = i::Isolate::Current();
+ object_ptr = i::DecompressTaggedAny(isolate->isolate_root(),
+ static_cast<i::Tagged_t>(object_ptr));
+ }
+#endif
+ return i::Object(object_ptr);
+}
+
+} // namespace
+
//
// The following functions are used by our gdb macros.
//
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Object(void* object) {
- i::Object(reinterpret_cast<i::Address>(object))->Print();
+ GetObjectFromRaw(object)->Print();
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
@@ -2662,7 +2733,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
V8_EXPORT_PRIVATE extern void _v8_internal_Print_LayoutDescriptor(
void* object) {
- i::Object o(reinterpret_cast<i::Address>(object));
+ i::Object o(GetObjectFromRaw(object));
if (!o->IsLayoutDescriptor()) {
printf("Please provide a layout descriptor\n");
} else {
@@ -2676,7 +2747,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_StackTrace() {
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
- i::Object o(reinterpret_cast<i::Address>(object));
+ i::Object o(GetObjectFromRaw(object));
if (!o->IsMap()) {
printf("Please provide a valid Map\n");
} else {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 119c6aed72..0dc72661c0 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -6,7 +6,6 @@
#include <algorithm>
#include <cmath>
-#include <iomanip>
#include <memory>
#include <sstream>
#include <vector>
@@ -19,10 +18,10 @@
#include "src/api-natives.h"
#include "src/api.h"
#include "src/arguments.h"
-#include "src/assembler-inl.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/bits.h"
+#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
@@ -31,19 +30,17 @@
#include "src/counters.h"
#include "src/date.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/execution.h"
#include "src/field-index-inl.h"
#include "src/field-index.h"
#include "src/field-type.h"
#include "src/frames-inl.h"
+#include "src/function-kind.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
#include "src/identity-map.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/interpreter/bytecode-decoder.h"
-#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/log.h"
@@ -52,6 +49,7 @@
#include "src/message-template.h"
#include "src/microtask-queue.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
@@ -59,8 +57,10 @@
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -85,28 +85,27 @@
#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
-#include "src/code-comments.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/map-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/string-comparator.h"
#include "src/objects/struct-inl.h"
#include "src/ostreams.h"
#include "src/parsing/preparse-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
-#include "src/safepoint-table.h"
-#include "src/snapshot/code-serializer.h"
-#include "src/snapshot/snapshot.h"
#include "src/source-position-table.h"
#include "src/string-builder-inl.h"
#include "src/string-search.h"
#include "src/string-stream.h"
+#include "src/transitions-inl.h"
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
@@ -114,15 +113,33 @@
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
-#ifdef ENABLE_DISASSEMBLER
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/eh-frame.h"
-#endif
-
namespace v8 {
namespace internal {
+ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw) {
+ if (should_throw.IsJust()) return should_throw.FromJust();
+
+ LanguageMode mode = isolate->context()->scope_info()->language_mode();
+ if (mode == LanguageMode::kStrict) return kThrowOnError;
+
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (!(it.frame()->is_optimized() || it.frame()->is_interpreted())) {
+ continue;
+ }
+ // Get the language mode from closure.
+ JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(it.frame());
+ std::vector<SharedFunctionInfo> functions;
+ js_frame->GetFunctions(&functions);
+ LanguageMode closure_language_mode = functions.back()->language_mode();
+ if (closure_language_mode > mode) {
+ mode = closure_language_mode;
+ }
+ break;
+ }
+
+ return is_sloppy(mode) ? kDontThrow : kThrowOnError;
+}
+
bool ComparisonResultToBool(Operation op, ComparisonResult result) {
switch (op) {
case Operation::kLessThan:
@@ -167,11 +184,38 @@ Handle<FieldType> Object::OptimalType(Isolate* isolate,
return FieldType::Any(isolate);
}
-MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
- Handle<Object> object,
- Handle<Context> native_context,
- const char* method_name) {
- if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
+ Representation representation) {
+ if (!representation.IsDouble()) return object;
+ auto result = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ if (object->IsUninitialized(isolate)) {
+ result->set_value_as_bits(kHoleNanInt64);
+ } else if (object->IsMutableHeapNumber()) {
+ // Ensure that all bits of the double value are preserved.
+ result->set_value_as_bits(
+ MutableHeapNumber::cast(*object)->value_as_bits());
+ } else {
+ result->set_value(object->Number());
+ }
+ return result;
+}
+
+Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
+ Representation representation) {
+ DCHECK(!object->IsUninitialized(isolate));
+ if (!representation.IsDouble()) {
+ DCHECK(object->FitsRepresentation(representation));
+ return object;
+ }
+ return isolate->factory()->NewHeapNumber(
+ MutableHeapNumber::cast(*object)->value());
+}
+
+MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
+ Handle<Object> object,
+ const char* method_name) {
+ DCHECK(!object->IsJSReceiver()); // Use ToObject() for fast path.
+ Handle<Context> native_context = isolate->native_context();
Handle<JSFunction> constructor;
if (object->IsSmi()) {
constructor = handle(native_context->number_function(), isolate);
@@ -415,6 +459,10 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
// -- S y m b o l
Handle<Symbol> symbol = Handle<Symbol>::cast(input);
+ if (symbol->is_private_name()) {
+ return Handle<String>(String::cast(symbol->name()), isolate);
+ }
+
IncrementalStringBuilder builder(isolate);
builder.AppendCString("Symbol(");
if (symbol->name()->IsString()) {
@@ -476,8 +524,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
return isolate->factory()->NewStringFromAsciiChecked("[object Unknown]");
}
- receiver = Object::ToObject(isolate, input, isolate->native_context())
- .ToHandleChecked();
+ receiver = Object::ToObjectImpl(isolate, input).ToHandleChecked();
}
Handle<String> builtin_tag = handle(receiver->class_name(), isolate);
@@ -538,6 +585,11 @@ bool Object::BooleanValue(Isolate* isolate) {
return true;
}
+Object Object::ToBoolean(Isolate* isolate) {
+ if (IsBoolean()) return *this;
+ return isolate->heap()->ToBoolean(BooleanValue(isolate));
+}
+
namespace {
// TODO(bmeurer): Maybe we should introduce a marker interface Number,
@@ -988,62 +1040,6 @@ MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
}
// static
-Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::JSPROXY:
- return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
- it->GetName());
- case LookupIterator::INTERCEPTOR: {
- Maybe<PropertyAttributes> result =
- JSObject::GetPropertyAttributesWithInterceptor(it);
- if (result.IsNothing()) return Nothing<bool>();
- if (result.FromJust() != ABSENT) return Just(true);
- break;
- }
- case LookupIterator::ACCESS_CHECK: {
- if (it->HasAccess()) break;
- Maybe<PropertyAttributes> result =
- JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
- if (result.IsNothing()) return Nothing<bool>();
- return Just(result.FromJust() != ABSENT);
- }
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- // TypedArray out-of-bounds access.
- return Just(false);
- case LookupIterator::ACCESSOR:
- case LookupIterator::DATA:
- return Just(true);
- }
- }
- return Just(false);
-}
-
-// static
-Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
- if (object->IsJSModuleNamespace()) {
- PropertyDescriptor desc;
- return JSReceiver::GetOwnPropertyDescriptor(object->GetIsolate(), object,
- name, &desc);
- }
-
- if (object->IsJSObject()) { // Shortcut.
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, object, LookupIterator::OWN);
- return HasProperty(&it);
- }
-
- Maybe<PropertyAttributes> attributes =
- JSReceiver::GetOwnPropertyAttributes(object, name);
- MAYBE_RETURN(attributes, Nothing<bool>());
- return Just(attributes.FromJust() != ABSENT);
-}
-
-// static
MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
OnNonExistent on_non_existent) {
for (; it->IsFound(); it->Next()) {
@@ -1215,35 +1211,6 @@ MaybeHandle<Object> JSProxy::CheckGetSetTrapResult(Isolate* isolate,
}
-Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::ACCESS_CHECK:
- // Support calling this method without an active context, but refuse
- // access to access-checked objects in that case.
- if (!it->isolate()->context().is_null() && it->HasAccess()) continue;
- V8_FALLTHROUGH;
- case LookupIterator::JSPROXY:
- it->NotFound();
- return it->isolate()->factory()->undefined_value();
- case LookupIterator::ACCESSOR:
- // TODO(verwaest): For now this doesn't call into AccessorInfo, since
- // clients don't need it. Update once relevant.
- it->NotFound();
- return it->isolate()->factory()->undefined_value();
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return it->isolate()->factory()->undefined_value();
- case LookupIterator::DATA:
- return it->GetDataValue();
- }
- }
- return it->isolate()->factory()->undefined_value();
-}
-
bool Object::ToInt32(int32_t* value) {
if (IsSmi()) {
@@ -1356,188 +1323,6 @@ Handle<TemplateList> TemplateList::Add(Isolate* isolate,
return Handle<TemplateList>::cast(fixed_array);
}
-// static
-MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target,
- Handle<AllocationSite> site) {
- // If called through new, new.target can be:
- // - a subclass of constructor,
- // - a proxy wrapper around constructor, or
- // - the constructor itself.
- // If called through Reflect.construct, it's guaranteed to be a constructor.
- Isolate* const isolate = constructor->GetIsolate();
- DCHECK(constructor->IsConstructor());
- DCHECK(new_target->IsConstructor());
- DCHECK(!constructor->has_initial_map() ||
- constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
-
- Handle<Map> initial_map;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, initial_map,
- JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
- Handle<JSObject> result =
- isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
- if (initial_map->is_dictionary_map()) {
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
- result->SetProperties(*dictionary);
- }
- isolate->counters()->constructed_objects()->Increment();
- isolate->counters()->constructed_objects_runtime()->Increment();
- return result;
-}
-
-// 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
-// Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
-MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
- Handle<Object> prototype) {
- // Generate the map with the specified {prototype} based on the Object
- // function's initial map from the current native context.
- // TODO(bmeurer): Use a dedicated cache for Object.create; think about
- // slack tracking for Object.create.
- Handle<Map> map =
- Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
-
- // Actually allocate the object.
- Handle<JSObject> object;
- if (map->is_dictionary_map()) {
- object = isolate->factory()->NewSlowJSObjectFromMap(map);
- } else {
- object = isolate->factory()->NewJSObjectFromMap(map);
- }
- return object;
-}
-
-void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
- DCHECK(object->HasSmiOrObjectElements() ||
- object->HasFastStringWrapperElements());
- FixedArray raw_elems = FixedArray::cast(object->elements());
- Heap* heap = object->GetHeap();
- if (raw_elems->map() != ReadOnlyRoots(heap).fixed_cow_array_map()) return;
- Isolate* isolate = heap->isolate();
- Handle<FixedArray> elems(raw_elems, isolate);
- Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
- elems, isolate->factory()->fixed_array_map());
- object->set_elements(*writable_elems);
- isolate->counters()->cow_arrays_converted()->Increment();
-}
-
-int JSObject::GetHeaderSize(InstanceType type,
- bool function_has_prototype_slot) {
- switch (type) {
- case JS_OBJECT_TYPE:
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_GENERATOR_OBJECT_TYPE:
- return JSGeneratorObject::kSize;
- case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- return JSAsyncFunctionObject::kSize;
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- return JSAsyncGeneratorObject::kSize;
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- return JSAsyncFromSyncIterator::kSize;
- case JS_GLOBAL_PROXY_TYPE:
- return JSGlobalProxy::kSize;
- case JS_GLOBAL_OBJECT_TYPE:
- return JSGlobalObject::kSize;
- case JS_BOUND_FUNCTION_TYPE:
- return JSBoundFunction::kSize;
- case JS_FUNCTION_TYPE:
- return JSFunction::GetHeaderSize(function_has_prototype_slot);
- case JS_VALUE_TYPE:
- return JSValue::kSize;
- case JS_DATE_TYPE:
- return JSDate::kSize;
- case JS_ARRAY_TYPE:
- return JSArray::kSize;
- case JS_ARRAY_BUFFER_TYPE:
- return JSArrayBuffer::kHeaderSize;
- case JS_ARRAY_ITERATOR_TYPE:
- return JSArrayIterator::kSize;
- case JS_TYPED_ARRAY_TYPE:
- return JSTypedArray::kHeaderSize;
- case JS_DATA_VIEW_TYPE:
- return JSDataView::kHeaderSize;
- case JS_SET_TYPE:
- return JSSet::kSize;
- case JS_MAP_TYPE:
- return JSMap::kSize;
- case JS_SET_KEY_VALUE_ITERATOR_TYPE:
- case JS_SET_VALUE_ITERATOR_TYPE:
- return JSSetIterator::kSize;
- case JS_MAP_KEY_ITERATOR_TYPE:
- case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
- case JS_MAP_VALUE_ITERATOR_TYPE:
- return JSMapIterator::kSize;
- case JS_WEAK_CELL_TYPE:
- return JSWeakCell::kSize;
- case JS_WEAK_REF_TYPE:
- return JSWeakRef::kSize;
- case JS_WEAK_FACTORY_TYPE:
- return JSWeakFactory::kSize;
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
- return JSWeakFactoryCleanupIterator::kSize;
- case JS_WEAK_MAP_TYPE:
- return JSWeakMap::kSize;
- case JS_WEAK_SET_TYPE:
- return JSWeakSet::kSize;
- case JS_PROMISE_TYPE:
- return JSPromise::kSize;
- case JS_REGEXP_TYPE:
- return JSRegExp::kSize;
- case JS_REGEXP_STRING_ITERATOR_TYPE:
- return JSRegExpStringIterator::kSize;
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_MESSAGE_OBJECT_TYPE:
- return JSMessageObject::kSize;
- case JS_ARGUMENTS_TYPE:
- return JSObject::kHeaderSize;
- case JS_ERROR_TYPE:
- return JSObject::kHeaderSize;
- case JS_STRING_ITERATOR_TYPE:
- return JSStringIterator::kSize;
- case JS_MODULE_NAMESPACE_TYPE:
- return JSModuleNamespace::kHeaderSize;
-#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- return JSV8BreakIterator::kSize;
- case JS_INTL_COLLATOR_TYPE:
- return JSCollator::kSize;
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- return JSDateTimeFormat::kSize;
- case JS_INTL_LIST_FORMAT_TYPE:
- return JSListFormat::kSize;
- case JS_INTL_LOCALE_TYPE:
- return JSLocale::kSize;
- case JS_INTL_NUMBER_FORMAT_TYPE:
- return JSNumberFormat::kSize;
- case JS_INTL_PLURAL_RULES_TYPE:
- return JSPluralRules::kSize;
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- return JSRelativeTimeFormat::kSize;
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- return JSSegmentIterator::kSize;
- case JS_INTL_SEGMENTER_TYPE:
- return JSSegmenter::kSize;
-#endif // V8_INTL_SUPPORT
- case WASM_GLOBAL_TYPE:
- return WasmGlobalObject::kSize;
- case WASM_INSTANCE_TYPE:
- return WasmInstanceObject::kSize;
- case WASM_MEMORY_TYPE:
- return WasmMemoryObject::kSize;
- case WASM_MODULE_TYPE:
- return WasmModuleObject::kSize;
- case WASM_TABLE_TYPE:
- return WasmTableObject::kSize;
- case WASM_EXCEPTION_TYPE:
- return WasmExceptionObject::kSize;
- default:
- UNREACHABLE();
- }
-}
// ES6 9.5.1
// static
@@ -1634,7 +1419,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
}
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
- kDontThrow);
+ Just(kDontThrow));
Handle<Object> result = args.CallAccessorGetter(info, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) return isolate->factory()->undefined_value();
@@ -1656,8 +1441,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
// Regular accessor.
Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
if (getter->IsFunctionTemplateInfo()) {
- SaveContext save(isolate);
- isolate->set_context(*holder->GetCreationContext());
+ SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
return Builtins::InvokeApiFunction(
isolate, false, Handle<FunctionTemplateInfo>::cast(getter), receiver, 0,
nullptr, isolate->factory()->undefined_value());
@@ -1699,9 +1483,9 @@ bool AccessorInfo::IsCompatibleReceiverMap(Handle<AccessorInfo> info,
->IsTemplateFor(*map);
}
-Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
- Handle<Object> value,
- ShouldThrow should_throw) {
+Maybe<bool> Object::SetPropertyWithAccessor(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> maybe_should_throw) {
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
@@ -1746,7 +1530,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// Here we handle both cases using GenericNamedPropertySetterCallback and
// its Call method.
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
- should_throw);
+ maybe_should_throw);
Handle<Object> result = args.CallAccessorSetter(info, name, value);
// In the case of AccessorNameSetterCallback, we know that the result value
// cannot have been set, so the result of Call will be null. In the case of
@@ -1754,15 +1538,15 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// (signalling an exception) or a boolean Oddball.
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
if (result.is_null()) return Just(true);
- DCHECK(result->BooleanValue(isolate) || should_throw == kDontThrow);
+ DCHECK(result->BooleanValue(isolate) ||
+ GetShouldThrow(isolate, maybe_should_throw) == kDontThrow);
return Just(result->BooleanValue(isolate));
}
// Regular accessor.
Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsFunctionTemplateInfo()) {
- SaveContext save(isolate);
- isolate->set_context(*holder->GetCreationContext());
+ SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
Handle<Object> argv[] = {value};
RETURN_ON_EXCEPTION_VALUE(
isolate, Builtins::InvokeApiFunction(
@@ -1774,15 +1558,14 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
} else if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
+ receiver, Handle<JSReceiver>::cast(setter), value, maybe_should_throw);
}
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, maybe_should_throw),
NewTypeError(MessageTemplate::kNoSetterInCallback,
it->GetName(), it->GetHolder<JSObject>()));
}
-
MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
Handle<Object> receiver,
Handle<JSReceiver> getter) {
@@ -1805,11 +1588,9 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
return Execution::Call(isolate, getter, receiver, 0, nullptr);
}
-
-Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
- Handle<JSReceiver> setter,
- Handle<Object> value,
- ShouldThrow should_throw) {
+Maybe<bool> Object::SetPropertyWithDefinedSetter(
+ Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
Isolate* isolate = setter->GetIsolate();
Handle<Object> argv[] = { value };
@@ -1819,545 +1600,6 @@ Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
return Just(true);
}
-
-// static
-bool JSObject::AllCanRead(LookupIterator* it) {
- // Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
- // which have already been checked.
- DCHECK(it->state() == LookupIterator::ACCESS_CHECK ||
- it->state() == LookupIterator::INTERCEPTOR);
- for (it->Next(); it->IsFound(); it->Next()) {
- if (it->state() == LookupIterator::ACCESSOR) {
- auto accessors = it->GetAccessors();
- if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
- }
- } else if (it->state() == LookupIterator::INTERCEPTOR) {
- if (it->GetInterceptor()->all_can_read()) return true;
- } else if (it->state() == LookupIterator::JSPROXY) {
- // Stop lookupiterating. And no, AllCanNotRead.
- return false;
- }
- }
- return false;
-}
-
-namespace {
-
-MaybeHandle<Object> GetPropertyWithInterceptorInternal(
- LookupIterator* it, Handle<InterceptorInfo> interceptor, bool* done) {
- *done = false;
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- if (interceptor->getter()->IsUndefined(isolate)) {
- return isolate->factory()->undefined_value();
- }
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<Object> result;
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, kDontThrow);
-
- if (it->IsElement()) {
- result = args.CallIndexedGetter(interceptor, it->index());
- } else {
- result = args.CallNamedGetter(interceptor, it->name());
- }
-
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.is_null()) return isolate->factory()->undefined_value();
- *done = true;
- // Rebox handle before return
- return handle(*result, isolate);
-}
-
-Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
- LookupIterator* it, Handle<InterceptorInfo> interceptor) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
- HandleScope scope(isolate);
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- DCHECK_IMPLIES(!it->IsElement() && it->name()->IsSymbol(),
- interceptor->can_intercept_symbols());
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<PropertyAttributes>());
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, kDontThrow);
- if (!interceptor->query()->IsUndefined(isolate)) {
- Handle<Object> result;
- if (it->IsElement()) {
- result = args.CallIndexedQuery(interceptor, it->index());
- } else {
- result = args.CallNamedQuery(interceptor, it->name());
- }
- if (!result.is_null()) {
- int32_t value;
- CHECK(result->ToInt32(&value));
- return Just(static_cast<PropertyAttributes>(value));
- }
- } else if (!interceptor->getter()->IsUndefined(isolate)) {
- // TODO(verwaest): Use GetPropertyWithInterceptor?
- Handle<Object> result;
- if (it->IsElement()) {
- result = args.CallIndexedGetter(interceptor, it->index());
- } else {
- result = args.CallNamedGetter(interceptor, it->name());
- }
- if (!result.is_null()) return Just(DONT_ENUM);
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
- return Just(ABSENT);
-}
-
-Maybe<bool> SetPropertyWithInterceptorInternal(
- LookupIterator* it, Handle<InterceptorInfo> interceptor,
- ShouldThrow should_throw, Handle<Object> value) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- bool result;
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, should_throw);
-
- if (it->IsElement()) {
- // TODO(neis): In the future, we may want to actually return the
- // interceptor's result, which then should be a boolean.
- result = !args.CallIndexedSetter(interceptor, it->index(), value).is_null();
- } else {
- result = !args.CallNamedSetter(interceptor, it->name(), value).is_null();
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
- return Just(result);
-}
-
-Maybe<bool> DefinePropertyWithInterceptorInternal(
- LookupIterator* it, Handle<InterceptorInfo> interceptor,
- ShouldThrow should_throw, PropertyDescriptor& desc) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- bool result;
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, should_throw);
-
- std::unique_ptr<v8::PropertyDescriptor> descriptor(
- new v8::PropertyDescriptor());
- if (PropertyDescriptor::IsAccessorDescriptor(&desc)) {
- descriptor.reset(new v8::PropertyDescriptor(
- v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set())));
- } else if (PropertyDescriptor::IsDataDescriptor(&desc)) {
- if (desc.has_writable()) {
- descriptor.reset(new v8::PropertyDescriptor(
- v8::Utils::ToLocal(desc.value()), desc.writable()));
- } else {
- descriptor.reset(
- new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value())));
- }
- }
- if (desc.has_enumerable()) {
- descriptor->set_enumerable(desc.enumerable());
- }
- if (desc.has_configurable()) {
- descriptor->set_configurable(desc.configurable());
- }
-
- if (it->IsElement()) {
- result = !args.CallIndexedDefiner(interceptor, it->index(), *descriptor)
- .is_null();
- } else {
- result =
- !args.CallNamedDefiner(interceptor, it->name(), *descriptor).is_null();
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
- return Just(result);
-}
-
-} // namespace
-
-MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
- LookupIterator* it) {
- Isolate* isolate = it->isolate();
- Handle<JSObject> checked = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor =
- it->GetInterceptorForFailedAccessCheck();
- if (interceptor.is_null()) {
- while (AllCanRead(it)) {
- if (it->state() == LookupIterator::ACCESSOR) {
- return Object::GetPropertyWithAccessor(it);
- }
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- bool done;
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- GetPropertyWithInterceptor(it, &done), Object);
- if (done) return result;
- }
-
- } else {
- Handle<Object> result;
- bool done;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- GetPropertyWithInterceptorInternal(it, interceptor, &done), Object);
- if (done) return result;
- }
-
- // Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
- // undefined.
- Handle<Name> name = it->GetName();
- if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
- return it->factory()->undefined_value();
- }
-
- isolate->ReportFailedAccessCheck(checked);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return it->factory()->undefined_value();
-}
-
-
-Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
- LookupIterator* it) {
- Isolate* isolate = it->isolate();
- Handle<JSObject> checked = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor =
- it->GetInterceptorForFailedAccessCheck();
- if (interceptor.is_null()) {
- while (AllCanRead(it)) {
- if (it->state() == LookupIterator::ACCESSOR) {
- return Just(it->property_attributes());
- }
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- auto result = GetPropertyAttributesWithInterceptor(it);
- if (isolate->has_scheduled_exception()) break;
- if (result.IsJust() && result.FromJust() != ABSENT) return result;
- }
- } else {
- Maybe<PropertyAttributes> result =
- GetPropertyAttributesWithInterceptorInternal(it, interceptor);
- if (isolate->has_pending_exception()) return Nothing<PropertyAttributes>();
- if (result.FromMaybe(ABSENT) != ABSENT) return result;
- }
- isolate->ReportFailedAccessCheck(checked);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
- return Just(ABSENT);
-}
-
-
-// static
-bool JSObject::AllCanWrite(LookupIterator* it) {
- for (; it->IsFound() && it->state() != LookupIterator::JSPROXY; it->Next()) {
- if (it->state() == LookupIterator::ACCESSOR) {
- Handle<Object> accessors = it->GetAccessors();
- if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
- }
- }
- }
- return false;
-}
-
-
-Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw) {
- Isolate* isolate = it->isolate();
- Handle<JSObject> checked = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor =
- it->GetInterceptorForFailedAccessCheck();
- if (interceptor.is_null()) {
- if (AllCanWrite(it)) {
- return Object::SetPropertyWithAccessor(it, value, should_throw);
- }
- } else {
- Maybe<bool> result = SetPropertyWithInterceptorInternal(
- it, interceptor, should_throw, value);
- if (isolate->has_pending_exception()) return Nothing<bool>();
- if (result.IsJust()) return result;
- }
- isolate->ReportFailedAccessCheck(checked);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- return Just(true);
-}
-
-
-void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- DCHECK(!object->HasFastProperties());
- DCHECK(name->IsUniqueName());
- Isolate* isolate = object->GetIsolate();
-
- uint32_t hash = name->Hash();
-
- if (object->IsJSGlobalObject()) {
- Handle<JSGlobalObject> global_obj = Handle<JSGlobalObject>::cast(object);
- Handle<GlobalDictionary> dictionary(global_obj->global_dictionary(),
- isolate);
- int entry = dictionary->FindEntry(ReadOnlyRoots(isolate), name, hash);
-
- if (entry == GlobalDictionary::kNotFound) {
- DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
- Map::IsPrototypeChainInvalidated(global_obj->map()));
- auto cell = isolate->factory()->NewPropertyCell(name);
- cell->set_value(*value);
- auto cell_type = value->IsUndefined(isolate)
- ? PropertyCellType::kUndefined
- : PropertyCellType::kConstant;
- details = details.set_cell_type(cell_type);
- value = cell;
- dictionary =
- GlobalDictionary::Add(isolate, dictionary, name, value, details);
- global_obj->set_global_dictionary(*dictionary);
- } else {
- Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
- isolate, dictionary, entry, value, details);
- cell->set_value(*value);
- }
- } else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
-
- int entry = dictionary->FindEntry(isolate, name);
- if (entry == NameDictionary::kNotFound) {
- DCHECK_IMPLIES(object->map()->is_prototype_map(),
- Map::IsPrototypeChainInvalidated(object->map()));
- dictionary =
- NameDictionary::Add(isolate, dictionary, name, value, details);
- object->SetProperties(*dictionary);
- } else {
- PropertyDetails original_details = dictionary->DetailsAt(entry);
- int enumeration_index = original_details.dictionary_index();
- DCHECK_GT(enumeration_index, 0);
- details = details.set_index(enumeration_index);
- dictionary->SetEntry(isolate, entry, *name, *value, details);
- }
- }
-}
-
-// static
-Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> proto) {
- PrototypeIterator iter(isolate, object, kStartAtReceiver);
- while (true) {
- if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
- if (iter.IsAtEnd()) return Just(false);
- if (PrototypeIterator::GetCurrent(iter).is_identical_to(proto)) {
- return Just(true);
- }
- }
-}
-
-namespace {
-
-bool HasExcludedProperty(
- const ScopedVector<Handle<Object>>* excluded_properties,
- Handle<Object> search_element) {
- // TODO(gsathya): Change this to be a hashtable.
- for (int i = 0; i < excluded_properties->length(); i++) {
- if (search_element->SameValue(*excluded_properties->at(i))) {
- return true;
- }
- }
-
- return false;
-}
-
-V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
- Handle<JSReceiver> target, Handle<Object> source,
- const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
- // Non-empty strings are the only non-JSReceivers that need to be handled
- // explicitly by Object.assign.
- if (!source->IsJSReceiver()) {
- return Just(!source->IsString() || String::cast(*source)->length() == 0);
- }
-
- // If the target is deprecated, the object will be updated on first store. If
- // the source for that store equals the target, this will invalidate the
- // cached representation of the source. Preventively upgrade the target.
- // Do this on each iteration since any property load could cause deprecation.
- if (target->map()->is_deprecated()) {
- JSObject::MigrateInstance(Handle<JSObject>::cast(target));
- }
-
- Isolate* isolate = target->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
-
- if (!map->IsJSObjectMap()) return Just(false);
- if (!map->OnlyHasSimpleProperties()) return Just(false);
-
- Handle<JSObject> from = Handle<JSObject>::cast(source);
- if (from->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
- return Just(false);
- }
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int length = map->NumberOfOwnDescriptors();
-
- bool stable = true;
-
- for (int i = 0; i < length; i++) {
- Handle<Name> next_key(descriptors->GetKey(i), isolate);
- Handle<Object> prop_value;
- // Directly decode from the descriptor array if |from| did not change shape.
- if (stable) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetStrongValue(i), isolate);
- } else {
- Representation representation = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- prop_value = JSObject::FastPropertyAt(from, representation, index);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value,
- JSReceiver::GetProperty(isolate, from, next_key), Nothing<bool>());
- stable = from->map() == *map;
- }
- } else {
- // If the map did change, do a slower lookup. We are still guaranteed that
- // the object has a simple shape, and that the key is a name.
- LookupIterator it(from, next_key, from,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) continue;
- DCHECK(it.state() == LookupIterator::DATA ||
- it.state() == LookupIterator::ACCESSOR);
- if (!it.IsEnumerable()) continue;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
- }
-
- if (use_set) {
- LookupIterator it(target, next_key, target);
- Maybe<bool> result = Object::SetProperty(
- &it, prop_value, LanguageMode::kStrict, StoreOrigin::kNamed);
- if (result.IsNothing()) return result;
- if (stable) stable = from->map() == *map;
- } else {
- if (excluded_properties != nullptr &&
- HasExcludedProperty(excluded_properties, next_key)) {
- continue;
- }
-
- // 4a ii 2. Perform ? CreateDataProperty(target, nextKey, propValue).
- bool success;
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, target, next_key, &success, LookupIterator::OWN);
- CHECK(success);
- CHECK(JSObject::CreateDataProperty(&it, prop_value, kThrowOnError)
- .FromJust());
- }
- }
-
- return Just(true);
-}
-} // namespace
-
-// static
-Maybe<bool> JSReceiver::SetOrCopyDataProperties(
- Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
- const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
- Maybe<bool> fast_assign =
- FastAssign(target, source, excluded_properties, use_set);
- if (fast_assign.IsNothing()) return Nothing<bool>();
- if (fast_assign.FromJust()) return Just(true);
-
- Handle<JSReceiver> from = Object::ToObject(isolate, source).ToHandleChecked();
- // 3b. Let keys be ? from.[[OwnPropertyKeys]]().
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys,
- KeyAccumulator::GetKeys(from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
- GetKeysConversion::kKeepNumbers),
- Nothing<bool>());
-
- // 4. Repeat for each element nextKey of keys in List order,
- for (int j = 0; j < keys->length(); ++j) {
- Handle<Object> next_key(keys->get(j), isolate);
- // 4a i. Let desc be ? from.[[GetOwnProperty]](nextKey).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
- if (found.IsNothing()) return Nothing<bool>();
- // 4a ii. If desc is not undefined and desc.[[Enumerable]] is true, then
- if (found.FromJust() && desc.enumerable()) {
- // 4a ii 1. Let propValue be ? Get(from, nextKey).
- Handle<Object> prop_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value,
- Runtime::GetObjectProperty(isolate, from, next_key), Nothing<bool>());
-
- if (use_set) {
- // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
- Handle<Object> status;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, status,
- Runtime::SetObjectProperty(isolate, target, next_key, prop_value,
- LanguageMode::kStrict,
- StoreOrigin::kMaybeKeyed),
- Nothing<bool>());
- } else {
- if (excluded_properties != nullptr &&
- HasExcludedProperty(excluded_properties, next_key)) {
- continue;
- }
-
- // 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
- bool success;
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, target, next_key, &success, LookupIterator::OWN);
- CHECK(success);
- CHECK(JSObject::CreateDataProperty(&it, prop_value, kThrowOnError)
- .FromJust());
- }
- }
- }
-
- return Just(true);
-}
-
Map Object::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
@@ -2369,21 +1611,6 @@ Map Object::GetPrototypeChainRootMap(Isolate* isolate) const {
return heap_object->map()->GetPrototypeChainRootMap(isolate);
}
-Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
- DisallowHeapAllocation no_alloc;
- if (IsJSReceiverMap()) {
- return *this;
- }
- int constructor_function_index = GetConstructorFunctionIndex();
- if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- Context native_context = isolate->context()->native_context();
- JSFunction constructor_function =
- JSFunction::cast(native_context->get(constructor_function_index));
- return constructor_function->initial_map();
- }
- return ReadOnlyRoots(isolate).null_value()->map();
-}
-
Smi Object::GetOrCreateHash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
Object hash = Object::GetSimpleHash(*this);
@@ -2615,842 +1842,10 @@ void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
}
-Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
- PretenureFlag pretenure) {
- DCHECK_NE(cons->second()->length(), 0);
-
- // TurboFan can create cons strings with empty first parts.
- while (cons->first()->length() == 0) {
- // We do not want to call this function recursively. Therefore we call
- // String::Flatten only in those cases where String::SlowFlatten is not
- // called again.
- if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
- cons = handle(ConsString::cast(cons->second()), isolate);
- } else {
- return String::Flatten(isolate, handle(cons->second(), isolate));
- }
- }
-
- DCHECK(AllowHeapAllocation::IsAllowed());
- int length = cons->length();
- PretenureFlag tenure = Heap::InNewSpace(*cons) ? pretenure : TENURED;
- Handle<SeqString> result;
- if (cons->IsOneByteRepresentation()) {
- Handle<SeqOneByteString> flat = isolate->factory()->NewRawOneByteString(
- length, tenure).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
- result = flat;
- } else {
- Handle<SeqTwoByteString> flat = isolate->factory()->NewRawTwoByteString(
- length, tenure).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
- result = flat;
- }
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
- DCHECK(result->IsFlat());
- return result;
-}
-
-
-
-bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
- DisallowHeapAllocation no_allocation;
- // Externalizing twice leaks the external resource, so it's
- // prohibited by the API.
- DCHECK(this->SupportsExternalization());
- DCHECK(resource->IsCacheable());
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- DCHECK(static_cast<size_t>(this->length()) == resource->length());
- ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
- resource->length() * sizeof(smart_chars[0])));
- }
-#endif // DEBUG
- int size = this->Size(); // Byte size of the original string.
- // Abort if size does not allow in-place conversion.
- if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
- // Read-only strings cannot be made external, since that would mutate the
- // string.
- if (!Isolate::FromWritableHeapObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
- bool is_one_byte = this->IsOneByteRepresentation();
- bool is_internalized = this->IsInternalizedString();
- bool has_pointers = StringShape(*this).IsIndirect();
- if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
- }
- // Morph the string to an external string by replacing the map and
- // reinitializing the fields. This won't work if the space the existing
- // string occupies is too small for a regular external string. Instead, we
- // resort to an uncached external string instead, omitting the field caching
- // the address of the backing store. When we encounter uncached external
- // strings in generated code, we need to bailout to runtime.
- Map new_map;
- ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
- if (is_internalized) {
- if (is_one_byte) {
- new_map =
- roots
- .uncached_external_internalized_string_with_one_byte_data_map();
- } else {
- new_map = roots.uncached_external_internalized_string_map();
- }
- } else {
- new_map = is_one_byte
- ? roots.uncached_external_string_with_one_byte_data_map()
- : roots.uncached_external_string_map();
- }
- } else {
- new_map =
- is_internalized
- ? (is_one_byte
- ? roots.external_internalized_string_with_one_byte_data_map()
- : roots.external_internalized_string_map())
- : (is_one_byte ? roots.external_string_with_one_byte_data_map()
- : roots.external_string_map());
- }
-
- // Byte size of the external String object.
- int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
- if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
- }
-
- // We are storing the new map using release store after creating a filler for
- // the left-over space to avoid races with the sweeper thread.
- this->synchronized_set_map(new_map);
-
- ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
- self->SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- return true;
-}
-
-bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
- DisallowHeapAllocation no_allocation;
- // Externalizing twice leaks the external resource, so it's
- // prohibited by the API.
- DCHECK(this->SupportsExternalization());
- DCHECK(resource->IsCacheable());
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- // Assert that the resource and the string are equivalent.
- DCHECK(static_cast<size_t>(this->length()) == resource->length());
- if (this->IsTwoByteRepresentation()) {
- ScopedVector<uint16_t> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
- }
- ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
- resource->length() * sizeof(smart_chars[0])));
- }
-#endif // DEBUG
- int size = this->Size(); // Byte size of the original string.
- // Abort if size does not allow in-place conversion.
- if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
- // Read-only strings cannot be made external, since that would mutate the
- // string.
- if (!Isolate::FromWritableHeapObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
- bool is_internalized = this->IsInternalizedString();
- bool has_pointers = StringShape(*this).IsIndirect();
-
- if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
- }
-
- // Morph the string to an external string by replacing the map and
- // reinitializing the fields. This won't work if the space the existing
- // string occupies is too small for a regular external string. Instead, we
- // resort to an uncached external string instead, omitting the field caching
- // the address of the backing store. When we encounter uncached external
- // strings in generated code, we need to bailout to runtime.
- Map new_map;
- ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
- new_map = is_internalized
- ? roots.uncached_external_one_byte_internalized_string_map()
- : roots.uncached_external_one_byte_string_map();
- } else {
- new_map = is_internalized
- ? roots.external_one_byte_internalized_string_map()
- : roots.external_one_byte_string_map();
- }
-
- // Byte size of the external String object.
- int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
- if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
- }
-
- // We are storing the new map using release store after creating a filler for
- // the left-over space to avoid races with the sweeper thread.
- this->synchronized_set_map(new_map);
-
- ExternalOneByteString self = ExternalOneByteString::cast(*this);
- self->SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- return true;
-}
-
-bool String::SupportsExternalization() {
- if (this->IsThinString()) {
- return i::ThinString::cast(*this)->actual()->SupportsExternalization();
- }
-
- Isolate* isolate;
- // RO_SPACE strings cannot be externalized.
- if (!Isolate::FromWritableHeapObject(*this, &isolate)) {
- return false;
- }
-
- // Already an external string.
- if (StringShape(*this).IsExternal()) {
- return false;
- }
-
- return !isolate->heap()->IsInGCPostProcessing();
-}
-
-void String::StringShortPrint(StringStream* accumulator, bool show_details) {
- const char* internalized_marker = this->IsInternalizedString() ? "#" : "";
-
- int len = length();
- if (len > kMaxShortPrintLength) {
- accumulator->Add("<Very long string[%s%u]>", internalized_marker, len);
- return;
- }
-
- if (!LooksValid()) {
- accumulator->Add("<Invalid String>");
- return;
- }
-
- StringCharacterStream stream(*this);
-
- bool truncated = false;
- if (len > kMaxShortPrintLength) {
- len = kMaxShortPrintLength;
- truncated = true;
- }
- bool one_byte = true;
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
-
- if (c < 32 || c >= 127) {
- one_byte = false;
- }
- }
- stream.Reset(*this);
- if (one_byte) {
- if (show_details)
- accumulator->Add("<String[%s%u]: ", internalized_marker, length());
- for (int i = 0; i < len; i++) {
- accumulator->Put(static_cast<char>(stream.GetNext()));
- }
- if (show_details) accumulator->Put('>');
- } else {
- // Backslash indicates that the string contains control
- // characters and that backslashes are therefore escaped.
- if (show_details)
- accumulator->Add("<String[%s%u]\\: ", internalized_marker, length());
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
- if (c == '\n') {
- accumulator->Add("\\n");
- } else if (c == '\r') {
- accumulator->Add("\\r");
- } else if (c == '\\') {
- accumulator->Add("\\\\");
- } else if (c < 32 || c > 126) {
- accumulator->Add("\\x%02x", c);
- } else {
- accumulator->Put(static_cast<char>(c));
- }
- }
- if (truncated) {
- accumulator->Put('.');
- accumulator->Put('.');
- accumulator->Put('.');
- }
- if (show_details) accumulator->Put('>');
- }
- return;
-}
-
-
-void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
- if (end < 0) end = length();
- StringCharacterStream stream(*this, start);
- for (int i = start; i < end && stream.HasMore(); i++) {
- os << AsUC16(stream.GetNext());
- }
-}
-
-
-void JSObject::JSObjectShortPrint(StringStream* accumulator) {
- switch (map()->instance_type()) {
- case JS_ARRAY_TYPE: {
- double length = JSArray::cast(*this)->length()->IsUndefined()
- ? 0
- : JSArray::cast(*this)->length()->Number();
- accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
- break;
- }
- case JS_BOUND_FUNCTION_TYPE: {
- JSBoundFunction bound_function = JSBoundFunction::cast(*this);
- accumulator->Add("<JSBoundFunction");
- accumulator->Add(" (BoundTargetFunction %p)>",
- reinterpret_cast<void*>(
- bound_function->bound_target_function().ptr()));
- break;
- }
- case JS_WEAK_MAP_TYPE: {
- accumulator->Add("<JSWeakMap>");
- break;
- }
- case JS_WEAK_SET_TYPE: {
- accumulator->Add("<JSWeakSet>");
- break;
- }
- case JS_REGEXP_TYPE: {
- accumulator->Add("<JSRegExp");
- JSRegExp regexp = JSRegExp::cast(*this);
- if (regexp->source()->IsString()) {
- accumulator->Add(" ");
- String::cast(regexp->source())->StringShortPrint(accumulator);
- }
- accumulator->Add(">");
-
- break;
- }
- case JS_FUNCTION_TYPE: {
- JSFunction function = JSFunction::cast(*this);
- Object fun_name = function->shared()->DebugName();
- bool printed = false;
- if (fun_name->IsString()) {
- String str = String::cast(fun_name);
- if (str->length() > 0) {
- accumulator->Add("<JSFunction ");
- accumulator->Put(str);
- printed = true;
- }
- }
- if (!printed) {
- accumulator->Add("<JSFunction");
- }
- if (FLAG_trace_file_names) {
- Object source_name = Script::cast(function->shared()->script())->name();
- if (source_name->IsString()) {
- String str = String::cast(source_name);
- if (str->length() > 0) {
- accumulator->Add(" <");
- accumulator->Put(str);
- accumulator->Add(">");
- }
- }
- }
- accumulator->Add(" (sfi = %p)",
- reinterpret_cast<void*>(function->shared().ptr()));
- accumulator->Put('>');
- break;
- }
- case JS_GENERATOR_OBJECT_TYPE: {
- accumulator->Add("<JSGenerator>");
- break;
- }
- case JS_ASYNC_FUNCTION_OBJECT_TYPE: {
- accumulator->Add("<JSAsyncFunctionObject>");
- break;
- }
- case JS_ASYNC_GENERATOR_OBJECT_TYPE: {
- accumulator->Add("<JS AsyncGenerator>");
- break;
- }
-
- // All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
- default: {
- Map map_of_this = map();
- Heap* heap = GetHeap();
- Object constructor = map_of_this->GetConstructor();
- bool printed = false;
- if (constructor->IsHeapObject() &&
- !heap->Contains(HeapObject::cast(constructor))) {
- accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
- } else {
- bool global_object = IsJSGlobalProxy();
- if (constructor->IsJSFunction()) {
- if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
- accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
- } else {
- String constructor_name =
- JSFunction::cast(constructor)->shared()->Name();
- if (constructor_name->length() > 0) {
- accumulator->Add(global_object ? "<GlobalObject " : "<");
- accumulator->Put(constructor_name);
- accumulator->Add(
- " %smap = %p",
- map_of_this->is_deprecated() ? "deprecated-" : "",
- map_of_this);
- printed = true;
- }
- }
- } else if (constructor->IsFunctionTemplateInfo()) {
- accumulator->Add(global_object ? "<RemoteObject>" : "<RemoteObject>");
- printed = true;
- }
- if (!printed) {
- accumulator->Add("<JS%sObject", global_object ? "Global " : "");
- }
- }
- if (IsJSValue()) {
- accumulator->Add(" value = ");
- JSValue::cast(*this)->value()->ShortPrint(accumulator);
- }
- accumulator->Put('>');
- break;
- }
- }
-}
-
-
-void JSObject::PrintElementsTransition(
- FILE* file, Handle<JSObject> object,
- ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
- ElementsKind to_kind, Handle<FixedArrayBase> to_elements) {
- if (from_kind != to_kind) {
- OFStream os(file);
- os << "elements transition [" << ElementsKindToString(from_kind) << " -> "
- << ElementsKindToString(to_kind) << "] in ";
- JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
- PrintF(file, " for ");
- object->ShortPrint(file);
- PrintF(file, " from ");
- from_elements->ShortPrint(file);
- PrintF(file, " to ");
- to_elements->ShortPrint(file);
- PrintF(file, "\n");
- }
-}
-
-
-// static
-MaybeHandle<JSFunction> Map::GetConstructorFunction(
- Handle<Map> map, Handle<Context> native_context) {
- if (map->IsPrimitiveMap()) {
- int const constructor_function_index = map->GetConstructorFunctionIndex();
- if (constructor_function_index != kNoConstructorFunctionIndex) {
- return handle(
- JSFunction::cast(native_context->get(constructor_function_index)),
- native_context->GetIsolate());
- }
- }
- return MaybeHandle<JSFunction>();
-}
-
-void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
- PropertyKind kind,
- PropertyAttributes attributes) {
- OFStream os(file);
- os << "[reconfiguring]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
- } else {
- os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
- }
- os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
- os << attributes << " [";
- JavaScriptFrame::PrintTop(isolate, file, false, true);
- os << "]\n";
-}
-
-VisitorId Map::GetVisitorId(Map map) {
- STATIC_ASSERT(kVisitorIdCount <= 256);
-
- const int instance_type = map->instance_type();
-
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqOneByteString;
- } else {
- return kVisitSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return kVisitShortcutCandidate;
- } else {
- return kVisitConsString;
- }
-
- case kSlicedStringTag:
- return kVisitSlicedString;
-
- case kExternalStringTag:
- return kVisitDataObject;
-
- case kThinStringTag:
- return kVisitThinString;
- }
- UNREACHABLE();
- }
-
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return kVisitByteArray;
-
- case BYTECODE_ARRAY_TYPE:
- return kVisitBytecodeArray;
-
- case FREE_SPACE_TYPE:
- return kVisitFreeSpace;
-
- case EMBEDDER_DATA_ARRAY_TYPE:
- return kVisitEmbedderDataArray;
-
- case FIXED_ARRAY_TYPE:
- case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- case HASH_TABLE_TYPE:
- case ORDERED_HASH_MAP_TYPE:
- case ORDERED_HASH_SET_TYPE:
- case ORDERED_NAME_DICTIONARY_TYPE:
- case NAME_DICTIONARY_TYPE:
- case GLOBAL_DICTIONARY_TYPE:
- case NUMBER_DICTIONARY_TYPE:
- case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case STRING_TABLE_TYPE:
- case SCOPE_INFO_TYPE:
- case SCRIPT_CONTEXT_TABLE_TYPE:
- return kVisitFixedArray;
-
- case AWAIT_CONTEXT_TYPE:
- case BLOCK_CONTEXT_TYPE:
- case CATCH_CONTEXT_TYPE:
- case DEBUG_EVALUATE_CONTEXT_TYPE:
- case EVAL_CONTEXT_TYPE:
- case FUNCTION_CONTEXT_TYPE:
- case MODULE_CONTEXT_TYPE:
- case SCRIPT_CONTEXT_TYPE:
- case WITH_CONTEXT_TYPE:
- return kVisitContext;
-
- case NATIVE_CONTEXT_TYPE:
- return kVisitNativeContext;
-
- case EPHEMERON_HASH_TABLE_TYPE:
- return kVisitEphemeronHashTable;
-
- case WEAK_FIXED_ARRAY_TYPE:
- case WEAK_ARRAY_LIST_TYPE:
- return kVisitWeakArray;
-
- case FIXED_DOUBLE_ARRAY_TYPE:
- return kVisitFixedDoubleArray;
-
- case PROPERTY_ARRAY_TYPE:
- return kVisitPropertyArray;
-
- case FEEDBACK_CELL_TYPE:
- return kVisitFeedbackCell;
-
- case FEEDBACK_VECTOR_TYPE:
- return kVisitFeedbackVector;
-
- case ODDBALL_TYPE:
- return kVisitOddball;
-
- case MAP_TYPE:
- return kVisitMap;
-
- case CODE_TYPE:
- return kVisitCode;
-
- case CELL_TYPE:
- return kVisitCell;
-
- case PROPERTY_CELL_TYPE:
- return kVisitPropertyCell;
-
- case DESCRIPTOR_ARRAY_TYPE:
- return kVisitDescriptorArray;
-
- case TRANSITION_ARRAY_TYPE:
- return kVisitTransitionArray;
-
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- return kVisitJSWeakCollection;
-
- case CALL_HANDLER_INFO_TYPE:
- return kVisitStruct;
-
- case SHARED_FUNCTION_INFO_TYPE:
- return kVisitSharedFunctionInfo;
-
- case JS_PROXY_TYPE:
- return kVisitStruct;
-
- case SYMBOL_TYPE:
- return kVisitSymbol;
-
- case JS_ARRAY_BUFFER_TYPE:
- return kVisitJSArrayBuffer;
-
- case JS_DATA_VIEW_TYPE:
- return kVisitJSDataView;
-
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
- case JS_TYPED_ARRAY_TYPE:
- return kVisitJSTypedArray;
-
- case SMALL_ORDERED_HASH_MAP_TYPE:
- return kVisitSmallOrderedHashMap;
-
- case SMALL_ORDERED_HASH_SET_TYPE:
- return kVisitSmallOrderedHashSet;
-
- case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- return kVisitSmallOrderedNameDictionary;
-
- case CODE_DATA_CONTAINER_TYPE:
- return kVisitCodeDataContainer;
-
- case WASM_INSTANCE_TYPE:
- return kVisitWasmInstanceObject;
-
- case PREPARSE_DATA_TYPE:
- return kVisitPreparseData;
-
- case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
- return kVisitUncompiledDataWithoutPreparseData;
-
- case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
- return kVisitUncompiledDataWithPreparseData;
-
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_ITERATOR_TYPE:
- case JS_ARRAY_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_KEY_VALUE_ITERATOR_TYPE:
- case JS_SET_VALUE_ITERATOR_TYPE:
- case JS_MAP_KEY_ITERATOR_TYPE:
- case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
- case JS_MAP_VALUE_ITERATOR_TYPE:
- case JS_STRING_ITERATOR_TYPE:
- case JS_PROMISE_TYPE:
- case JS_REGEXP_TYPE:
- case JS_REGEXP_STRING_ITERATOR_TYPE:
- case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
- case JS_WEAK_FACTORY_TYPE:
-#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
-#endif // V8_INTL_SUPPORT
- case WASM_EXCEPTION_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
- case JS_BOUND_FUNCTION_TYPE: {
- const bool has_raw_data_fields =
- (FLAG_unbox_double_fields && !map->HasFastPointerLayout()) ||
- (COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
- return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
- }
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- return kVisitJSApiObject;
-
- case JS_WEAK_REF_TYPE:
- return kVisitJSWeakRef;
-
- case JS_WEAK_CELL_TYPE:
- return kVisitJSWeakCell;
-
- case FILLER_TYPE:
- case FOREIGN_TYPE:
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- case FEEDBACK_METADATA_TYPE:
- return kVisitDataObject;
-
- case BIGINT_TYPE:
- return kVisitBigInt;
-
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- case FIXED_BIGUINT64_ARRAY_TYPE:
- case FIXED_BIGINT64_ARRAY_TYPE:
- return kVisitFixedTypedArrayBase;
-
- case FIXED_FLOAT64_ARRAY_TYPE:
- return kVisitFixedFloat64Array;
-
- case ALLOCATION_SITE_TYPE:
- return kVisitAllocationSite;
-
-#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (instance_type == PROTOTYPE_INFO_TYPE) {
- return kVisitPrototypeInfo;
- }
- return kVisitStruct;
-
- case LOAD_HANDLER_TYPE:
- case STORE_HANDLER_TYPE:
- return kVisitDataHandler;
-
- default:
- UNREACHABLE();
- }
-}
-
-void Map::PrintGeneralization(
- Isolate* isolate, FILE* file, const char* reason, int modify_index,
- int split, int descriptors, bool descriptor_to_field,
- Representation old_representation, Representation new_representation,
- MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
- MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
- OFStream os(file);
- os << "[generalizing]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
- } else {
- os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
- }
- os << ":";
- if (descriptor_to_field) {
- os << "c";
- } else {
- os << old_representation.Mnemonic() << "{";
- if (old_field_type.is_null()) {
- os << Brief(*(old_value.ToHandleChecked()));
- } else {
- old_field_type.ToHandleChecked()->PrintTo(os);
- }
- os << "}";
- }
- os << "->" << new_representation.Mnemonic() << "{";
- if (new_field_type.is_null()) {
- os << Brief(*(new_value.ToHandleChecked()));
- } else {
- new_field_type.ToHandleChecked()->PrintTo(os);
- }
- os << "} (";
- if (strlen(reason) > 0) {
- os << reason;
- } else {
- os << "+" << (descriptors - split) << " maps";
- }
- os << ") [";
- JavaScriptFrame::PrintTop(isolate, file, false, true);
- os << "]\n";
-}
-
-void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
- Map new_map) {
- if (new_map->is_dictionary_map()) {
- PrintF(file, "[migrating to slow]\n");
- return;
- }
- PrintF(file, "[migrating]");
- DescriptorArray o = original_map->instance_descriptors();
- DescriptorArray n = new_map->instance_descriptors();
- for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
- Representation o_r = o->GetDetails(i).representation();
- Representation n_r = n->GetDetails(i).representation();
- if (!o_r.Equals(n_r)) {
- String::cast(o->GetKey(i))->PrintOn(file);
- PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o->GetDetails(i).location() == kDescriptor &&
- n->GetDetails(i).location() == kField) {
- Name name = o->GetKey(i);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
- } else {
- PrintF(file, "{symbol %p}", reinterpret_cast<void*>(name.ptr()));
- }
- PrintF(file, " ");
- }
- }
- if (original_map->elements_kind() != new_map->elements_kind()) {
- PrintF(file, "elements_kind[%i->%i]", original_map->elements_kind(),
- new_map->elements_kind());
- }
- PrintF(file, "\n");
-}
-
-bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
- Object object = *o;
- if (object->IsSmi()) return false;
- HeapObject heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
- JSObject js_object = JSObject::cast(object);
- if (!js_object->IsDroppableApiWrapper()) return false;
- Object maybe_constructor = js_object->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return false;
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (js_object->elements()->length() != 0) return false;
-
- return constructor->initial_map() == heap_object->map();
-}
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
- os << AsHex(this->ptr(), kSystemPointerHexDigits, true) << " ";
+ os << AsHex::Address(this->ptr()) << " ";
if (IsString()) {
HeapStringAllocator allocator;
@@ -3772,6 +2167,10 @@ void Tuple3::BriefPrintDetails(std::ostream& os) {
<< Brief(value3());
}
+void ClassPositions::BriefPrintDetails(std::ostream& os) {
+ os << " " << start() << ", " << end();
+}
+
void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
os << " " << elements_kind() << ", " << Brief(constant_elements());
}
@@ -3806,66 +2205,129 @@ bool HeapObject::IsValidSlot(Map map, int offset) {
*this, offset, 0);
}
-String JSReceiver::class_name() {
- ReadOnlyRoots roots = GetReadOnlyRoots();
- if (IsFunction()) return roots.Function_string();
- if (IsJSArgumentsObject()) return roots.Arguments_string();
- if (IsJSArray()) return roots.Array_string();
- if (IsJSArrayBuffer()) {
- if (JSArrayBuffer::cast(*this)->is_shared()) {
- return roots.SharedArrayBuffer_string();
- }
- return roots.ArrayBuffer_string();
- }
- if (IsJSArrayIterator()) return roots.ArrayIterator_string();
- if (IsJSDate()) return roots.Date_string();
- if (IsJSError()) return roots.Error_string();
- if (IsJSGeneratorObject()) return roots.Generator_string();
- if (IsJSMap()) return roots.Map_string();
- if (IsJSMapIterator()) return roots.MapIterator_string();
- if (IsJSProxy()) {
- return map()->is_callable() ? roots.Function_string()
- : roots.Object_string();
- }
- if (IsJSRegExp()) return roots.RegExp_string();
- if (IsJSSet()) return roots.Set_string();
- if (IsJSSetIterator()) return roots.SetIterator_string();
- if (IsJSTypedArray()) {
-#define SWITCH_KIND(Type, type, TYPE, ctype) \
- if (map()->elements_kind() == TYPE##_ELEMENTS) { \
- return roots.Type##Array_string(); \
- }
- TYPED_ARRAYS(SWITCH_KIND)
-#undef SWITCH_KIND
- }
- if (IsJSValue()) {
- Object value = JSValue::cast(*this)->value();
- if (value->IsBoolean()) return roots.Boolean_string();
- if (value->IsString()) return roots.String_string();
- if (value->IsNumber()) return roots.Number_string();
- if (value->IsBigInt()) return roots.BigInt_string();
- if (value->IsSymbol()) return roots.Symbol_string();
- if (value->IsScript()) return roots.Script_string();
- UNREACHABLE();
+int HeapObject::SizeFromMap(Map map) const {
+ int instance_size = map->instance_size();
+ if (instance_size != kVariableSizeSentinel) return instance_size;
+ // Only inline the most frequent cases.
+ InstanceType instance_type = map->instance_type();
+ if (IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)) {
+ return FixedArray::SizeFor(
+ FixedArray::unchecked_cast(*this)->synchronized_length());
}
- if (IsJSWeakMap()) return roots.WeakMap_string();
- if (IsJSWeakSet()) return roots.WeakSet_string();
- if (IsJSGlobalProxy()) return roots.global_string();
-
- Object maybe_constructor = map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (constructor->shared()->IsApiFunction()) {
- maybe_constructor = constructor->shared()->get_api_func_data();
- }
+ if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
+ // Native context has fixed size.
+ DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
+ return Context::SizeFor(Context::unchecked_cast(*this)->length());
}
-
- if (maybe_constructor->IsFunctionTemplateInfo()) {
- FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) return String::cast(info->class_name());
+ if (instance_type == ONE_BYTE_STRING_TYPE ||
+ instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
+ // Strings may get concurrently truncated, hence we have to access its
+ // length synchronized.
+ return SeqOneByteString::SizeFor(
+ SeqOneByteString::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == BYTE_ARRAY_TYPE) {
+ return ByteArray::SizeFor(
+ ByteArray::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == BYTECODE_ARRAY_TYPE) {
+ return BytecodeArray::SizeFor(
+ BytecodeArray::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == FREE_SPACE_TYPE) {
+ return FreeSpace::unchecked_cast(*this)->relaxed_read_size();
+ }
+ if (instance_type == STRING_TYPE ||
+ instance_type == INTERNALIZED_STRING_TYPE) {
+ // Strings may get concurrently truncated, hence we have to access its
+ // length synchronized.
+ return SeqTwoByteString::SizeFor(
+ SeqTwoByteString::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+ return FixedDoubleArray::SizeFor(
+ FixedDoubleArray::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == FEEDBACK_METADATA_TYPE) {
+ return FeedbackMetadata::SizeFor(
+ FeedbackMetadata::unchecked_cast(*this)->synchronized_slot_count());
+ }
+ if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
+ return DescriptorArray::SizeFor(
+ DescriptorArray::unchecked_cast(*this)->number_of_all_descriptors());
+ }
+ if (IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
+ LAST_WEAK_FIXED_ARRAY_TYPE)) {
+ return WeakFixedArray::SizeFor(
+ WeakFixedArray::unchecked_cast(*this)->synchronized_length());
+ }
+ if (instance_type == WEAK_ARRAY_LIST_TYPE) {
+ return WeakArrayList::SizeForCapacity(
+ WeakArrayList::unchecked_cast(*this)->synchronized_capacity());
+ }
+ if (IsInRange(instance_type, FIRST_FIXED_TYPED_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE)) {
+ return FixedTypedArrayBase::unchecked_cast(*this)->TypedArraySize(
+ instance_type);
+ }
+ if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
+ return SmallOrderedHashSet::SizeFor(
+ SmallOrderedHashSet::unchecked_cast(*this)->Capacity());
+ }
+ if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
+ return SmallOrderedHashMap::SizeFor(
+ SmallOrderedHashMap::unchecked_cast(*this)->Capacity());
+ }
+ if (instance_type == SMALL_ORDERED_NAME_DICTIONARY_TYPE) {
+ return SmallOrderedNameDictionary::SizeFor(
+ SmallOrderedNameDictionary::unchecked_cast(*this)->Capacity());
+ }
+ if (instance_type == PROPERTY_ARRAY_TYPE) {
+ return PropertyArray::SizeFor(
+ PropertyArray::cast(*this)->synchronized_length());
+ }
+ if (instance_type == FEEDBACK_VECTOR_TYPE) {
+ return FeedbackVector::SizeFor(
+ FeedbackVector::unchecked_cast(*this)->length());
+ }
+ if (instance_type == BIGINT_TYPE) {
+ return BigInt::SizeFor(BigInt::unchecked_cast(*this)->length());
+ }
+ if (instance_type == PREPARSE_DATA_TYPE) {
+ PreparseData data = PreparseData::unchecked_cast(*this);
+ return PreparseData::SizeFor(data->data_length(), data->children_length());
+ }
+ if (instance_type == CODE_TYPE) {
+ return Code::unchecked_cast(*this)->CodeSize();
+ }
+ DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
+ return EmbedderDataArray::SizeFor(
+ EmbedderDataArray::unchecked_cast(*this)->length());
+}
+
+bool HeapObject::NeedsRehashing() const {
+ switch (map()->instance_type()) {
+ case DESCRIPTOR_ARRAY_TYPE:
+ return DescriptorArray::cast(*this)->number_of_descriptors() > 1;
+ case TRANSITION_ARRAY_TYPE:
+ return TransitionArray::cast(*this)->number_of_entries() > 1;
+ case ORDERED_HASH_MAP_TYPE:
+ return OrderedHashMap::cast(*this)->NumberOfElements() > 0;
+ case ORDERED_HASH_SET_TYPE:
+ return OrderedHashSet::cast(*this)->NumberOfElements() > 0;
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case HASH_TABLE_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return true;
+ default:
+ return false;
}
-
- return roots.Object_string();
}
bool HeapObject::CanBeRehashed() const {
@@ -3898,25 +2360,25 @@ bool HeapObject::CanBeRehashed() const {
return false;
}
-void HeapObject::RehashBasedOnMap(Isolate* isolate) {
+void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
switch (map()->instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
break;
case NAME_DICTIONARY_TYPE:
- NameDictionary::cast(*this)->Rehash(isolate);
+ NameDictionary::cast(*this)->Rehash(roots);
break;
case GLOBAL_DICTIONARY_TYPE:
- GlobalDictionary::cast(*this)->Rehash(isolate);
+ GlobalDictionary::cast(*this)->Rehash(roots);
break;
case NUMBER_DICTIONARY_TYPE:
- NumberDictionary::cast(*this)->Rehash(isolate);
+ NumberDictionary::cast(*this)->Rehash(roots);
break;
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- SimpleNumberDictionary::cast(*this)->Rehash(isolate);
+ SimpleNumberDictionary::cast(*this)->Rehash(roots);
break;
case STRING_TABLE_TYPE:
- StringTable::cast(*this)->Rehash(isolate);
+ StringTable::cast(*this)->Rehash(roots);
break;
case DESCRIPTOR_ARRAY_TYPE:
DCHECK_LE(1, DescriptorArray::cast(*this)->number_of_descriptors());
@@ -3939,758 +2401,8 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
}
}
-namespace {
-std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
- Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
-
- // If the object was instantiated simply with base == new.target, the
- // constructor on the map provides the most accurate name.
- // Don't provide the info for prototypes, since their constructors are
- // reclaimed and replaced by Object in OptimizeAsPrototype.
- if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
- !receiver->map()->is_prototype_map()) {
- Object maybe_constructor = receiver->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- String name = constructor->shared()->DebugName();
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
- return std::make_pair(handle(constructor, isolate),
- handle(name, isolate));
- }
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
- FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) {
- return std::make_pair(
- MaybeHandle<JSFunction>(),
- handle(String::cast(info->class_name()), isolate));
- }
- }
- }
-
- Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
- receiver, isolate->factory()->to_string_tag_symbol());
- if (maybe_tag->IsString())
- return std::make_pair(MaybeHandle<JSFunction>(),
- Handle<String>::cast(maybe_tag));
-
- PrototypeIterator iter(isolate, receiver);
- if (iter.IsAtEnd()) {
- return std::make_pair(MaybeHandle<JSFunction>(),
- handle(receiver->class_name(), isolate));
- }
-
- Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
- LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(*maybe_constructor);
- String name = constructor->shared()->DebugName();
-
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
- return std::make_pair(handle(constructor, isolate),
- handle(name, isolate));
- }
- }
-
- return std::make_pair(MaybeHandle<JSFunction>(),
- handle(receiver->class_name(), isolate));
-}
-} // anonymous namespace
-
-// static
-MaybeHandle<JSFunction> JSReceiver::GetConstructor(
- Handle<JSReceiver> receiver) {
- return GetConstructorHelper(receiver).first;
-}
-
-// static
-Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
- return GetConstructorHelper(receiver).second;
-}
-
-Handle<Context> JSReceiver::GetCreationContext() {
- JSReceiver receiver = *this;
- // Externals are JSObjects with null as a constructor.
- DCHECK(!receiver->IsExternal(GetIsolate()));
- Object constructor = receiver->map()->GetConstructor();
- JSFunction function;
- if (constructor->IsJSFunction()) {
- function = JSFunction::cast(constructor);
- } else if (constructor->IsFunctionTemplateInfo()) {
- // Remote objects don't have a creation context.
- return Handle<Context>::null();
- } else if (receiver->IsJSGeneratorObject()) {
- function = JSGeneratorObject::cast(receiver)->function();
- } else {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- CHECK(receiver->IsJSFunction());
- function = JSFunction::cast(receiver);
- }
-
- return function->has_context()
- ? Handle<Context>(function->context()->native_context(),
- receiver->GetIsolate())
- : Handle<Context>::null();
-}
-
-// static
-MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
- if (type->IsClass()) {
- return MaybeObjectHandle::Weak(type->AsClass(), isolate);
- }
- return MaybeObjectHandle(type);
-}
-
-// static
-FieldType Map::UnwrapFieldType(MaybeObject wrapped_type) {
- if (wrapped_type->IsCleared()) {
- return FieldType::None();
- }
- HeapObject heap_object;
- if (wrapped_type->GetHeapObjectIfWeak(&heap_object)) {
- return FieldType::cast(heap_object);
- }
- return wrapped_type->cast<FieldType>();
-}
-
-MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
- Handle<Name> name, Handle<FieldType> type,
- PropertyAttributes attributes,
- PropertyConstness constness,
- Representation representation,
- TransitionFlag flag) {
- DCHECK(DescriptorArray::kNotFound ==
- map->instance_descriptors()->Search(
- *name, map->NumberOfOwnDescriptors()));
-
- // Ensure the descriptor array does not get too big.
- if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
- return MaybeHandle<Map>();
- }
-
- // Compute the new index for new field.
- int index = map->NextFreePropertyIndex();
-
- if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
- constness = PropertyConstness::kMutable;
- representation = Representation::Tagged();
- type = FieldType::Any(isolate);
- } else {
- Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- isolate, map->instance_type(), &constness, &representation, &type);
- }
-
- MaybeObjectHandle wrapped_type = WrapFieldType(isolate, type);
-
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- constness == PropertyConstness::kMutable);
- Descriptor d = Descriptor::DataField(name, index, attributes, constness,
- representation, wrapped_type);
- Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
- new_map->AccountAddedPropertyField();
- return new_map;
-}
-
-MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
- Handle<Name> name,
- Handle<Object> constant,
- PropertyAttributes attributes,
- TransitionFlag flag) {
- // Ensure the descriptor array does not get too big.
- if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
- return MaybeHandle<Map>();
- }
-
- if (FLAG_track_constant_fields) {
- Representation representation = constant->OptimalRepresentation();
- Handle<FieldType> type = constant->OptimalType(isolate, representation);
- return CopyWithField(isolate, map, name, type, attributes,
- PropertyConstness::kConst, representation, flag);
- } else {
- // Allocate new instance descriptors with (name, constant) added.
- Descriptor d =
- Descriptor::DataConstant(isolate, name, 0, constant, attributes);
- Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
- return new_map;
- }
-}
-
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kSmi: return "s";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kHeapObject: return "h";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- }
-}
-
-bool Map::TransitionRemovesTaggedField(Map target) const {
- int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
- for (int i = target_inobject; i < inobject; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
- if (!IsUnboxedDoubleField(index)) return true;
- }
- return false;
-}
-
-bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
- int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
- int limit = Min(inobject, target_inobject);
- for (int i = 0; i < limit; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
- if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
- return true;
- }
- }
- return false;
-}
-
-bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
- return TransitionRemovesTaggedField(target) ||
- TransitionChangesTaggedFieldToUntaggedField(target);
-}
-
-bool Map::InstancesNeedRewriting(Map target) const {
- int target_number_of_fields = target->NumberOfFields();
- int target_inobject = target->GetInObjectProperties();
- int target_unused = target->UnusedPropertyFields();
- int old_number_of_fields;
-
- return InstancesNeedRewriting(target, target_number_of_fields,
- target_inobject, target_unused,
- &old_number_of_fields);
-}
-
-bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
- int target_inobject, int target_unused,
- int* old_number_of_fields) const {
- // If fields were added (or removed), rewrite the instance.
- *old_number_of_fields = NumberOfFields();
- DCHECK(target_number_of_fields >= *old_number_of_fields);
- if (target_number_of_fields != *old_number_of_fields) return true;
-
- // If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target->instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() !=
- old_desc->GetDetails(i).representation().IsDouble()) {
- return true;
- }
- }
-
- // If no fields were added, and no inobject properties were removed, setting
- // the map is sufficient.
- if (target_inobject == GetInObjectProperties()) return false;
- // In-object slack tracking may have reduced the object size of the new map.
- // In that case, succeed if all existing fields were inobject, and they still
- // fit within the new inobject size.
- DCHECK(target_inobject < GetInObjectProperties());
- if (target_number_of_fields <= target_inobject) {
- DCHECK(target_number_of_fields + target_unused == target_inobject);
- return false;
- }
- // Otherwise, properties will need to be moved to the backing store.
- return true;
-}
-
-
-// static
-void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
- Handle<Map> new_map,
- Isolate* isolate) {
- DCHECK(old_map->is_prototype_map());
- DCHECK(new_map->is_prototype_map());
- bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
- new_map->set_prototype_info(old_map->prototype_info());
- old_map->set_prototype_info(Smi::kZero);
- if (FLAG_trace_prototype_users) {
- PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()->ptr()),
- reinterpret_cast<void*>(old_map->ptr()),
- reinterpret_cast<void*>(new_map->ptr()));
- }
- if (was_registered) {
- if (new_map->prototype_info()->IsPrototypeInfo()) {
- // The new map isn't registered with its prototype yet; reflect this fact
- // in the PrototypeInfo it just inherited from the old map.
- PrototypeInfo::cast(new_map->prototype_info())
- ->set_registry_slot(PrototypeInfo::UNREGISTERED);
- }
- JSObject::LazyRegisterPrototypeUser(new_map, isolate);
- }
-}
-
-namespace {
-// To migrate a fast instance to a fast map:
-// - First check whether the instance needs to be rewritten. If not, simply
-// change the map.
-// - Otherwise, allocate a fixed array large enough to hold all fields, in
-// addition to unused space.
-// - Copy all existing properties in, in the following order: backing store
-// properties, unused fields, inobject properties.
-// - If all allocation succeeded, commit the state atomically:
-// * Copy inobject properties from the backing store back into the object.
-// * Trim the difference in instance size of the object. This also cleanly
-// frees inobject properties that moved to the backing store.
-// * If there are properties left in the backing store, trim of the space used
-// to temporarily store the inobject properties.
-// * If there are properties left in the backing store, install the backing
-// store.
-void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
- Isolate* isolate = object->GetIsolate();
- Handle<Map> old_map(object->map(), isolate);
- // In case of a regular transition.
- if (new_map->GetBackPointer() == *old_map) {
- // If the map does not add named properties, simply set the map.
- if (old_map->NumberOfOwnDescriptors() ==
- new_map->NumberOfOwnDescriptors()) {
- object->synchronized_set_map(*new_map);
- return;
- }
-
- PropertyDetails details = new_map->GetLastDescriptorDetails();
- int target_index = details.field_index() - new_map->GetInObjectProperties();
- int property_array_length = object->property_array()->length();
- bool have_space = old_map->UnusedPropertyFields() > 0 ||
- (details.location() == kField && target_index >= 0 &&
- property_array_length > target_index);
- // Either new_map adds an kDescriptor property, or a kField property for
- // which there is still space, and which does not require a mutable double
- // box (an out-of-object double).
- if (details.location() == kDescriptor ||
- (have_space && ((FLAG_unbox_double_fields && target_index < 0) ||
- !details.representation().IsDouble()))) {
- object->synchronized_set_map(*new_map);
- return;
- }
-
- // If there is still space in the object, we need to allocate a mutable
- // double box.
- if (have_space) {
- FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- DCHECK(details.representation().IsDouble());
- DCHECK(!new_map->IsUnboxedDoubleField(index));
- auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- object->RawFastPropertyAtPut(index, *value);
- object->synchronized_set_map(*new_map);
- return;
- }
-
- // This migration is a transition from a map that has run out of property
- // space. Extend the backing store.
- int grow_by = new_map->UnusedPropertyFields() + 1;
- Handle<PropertyArray> old_storage(object->property_array(), isolate);
- Handle<PropertyArray> new_storage =
- isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
-
- // Properly initialize newly added property.
- Handle<Object> value;
- if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- } else {
- value = isolate->factory()->uninitialized_value();
- }
- DCHECK_EQ(kField, details.location());
- DCHECK_EQ(kData, details.kind());
- DCHECK_GE(target_index, 0); // Must be a backing store index.
- new_storage->set(target_index, *value);
-
- // From here on we cannot fail and we shouldn't GC anymore.
- DisallowHeapAllocation no_allocation;
-
- // Set the new property value and do the map transition.
- object->SetProperties(*new_storage);
- object->synchronized_set_map(*new_map);
- return;
- }
-
- int old_number_of_fields;
- int number_of_fields = new_map->NumberOfFields();
- int inobject = new_map->GetInObjectProperties();
- int unused = new_map->UnusedPropertyFields();
-
- // Nothing to do if no functions were converted to fields and no smis were
- // converted to doubles.
- if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
- unused, &old_number_of_fields)) {
- object->synchronized_set_map(*new_map);
- return;
- }
-
- int total_size = number_of_fields + unused;
- int external = total_size - inobject;
- Handle<PropertyArray> array = isolate->factory()->NewPropertyArray(external);
-
- // We use this array to temporarily store the inobject properties.
- Handle<FixedArray> inobject_props =
- isolate->factory()->NewFixedArray(inobject);
-
- Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors(),
- isolate);
- Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors(),
- isolate);
- int old_nof = old_map->NumberOfOwnDescriptors();
- int new_nof = new_map->NumberOfOwnDescriptors();
-
- // This method only supports generalizing instances to at least the same
- // number of properties.
- DCHECK(old_nof <= new_nof);
-
- for (int i = 0; i < old_nof; i++) {
- PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- Representation old_representation = old_details.representation();
- Representation representation = details.representation();
- Handle<Object> value;
- if (old_details.location() == kDescriptor) {
- if (old_details.kind() == kAccessor) {
- // In case of kAccessor -> kData property reconfiguration, the property
- // must already be prepared for data of certain type.
- DCHECK(!details.representation().IsNone());
- if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- } else {
- value = isolate->factory()->uninitialized_value();
- }
- } else {
- DCHECK_EQ(kData, old_details.kind());
- value = handle(old_descriptors->GetStrongValue(i), isolate);
- DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
- }
- } else {
- DCHECK_EQ(kField, old_details.location());
- FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
- if (object->IsUnboxedDoubleField(index)) {
- uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
- if (representation.IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
- } else {
- value = isolate->factory()->NewHeapNumberFromBits(old_bits);
- }
- } else {
- value = handle(object->RawFastPropertyAt(index), isolate);
- if (!old_representation.IsDouble() && representation.IsDouble()) {
- DCHECK_IMPLIES(old_representation.IsNone(),
- value->IsUninitialized(isolate));
- value = Object::NewStorageFor(isolate, value, representation);
- } else if (old_representation.IsDouble() &&
- !representation.IsDouble()) {
- value = Object::WrapForRead(isolate, value, old_representation);
- }
- }
- }
- DCHECK(!(representation.IsDouble() && value->IsSmi()));
- int target_index = new_descriptors->GetFieldIndex(i);
- if (target_index < inobject) {
- inobject_props->set(target_index, *value);
- } else {
- array->set(target_index - inobject, *value);
- }
- }
-
- for (int i = old_nof; i < new_nof; i++) {
- PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- Handle<Object> value;
- if (details.representation().IsDouble()) {
- value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- } else {
- value = isolate->factory()->uninitialized_value();
- }
- int target_index = new_descriptors->GetFieldIndex(i);
- if (target_index < inobject) {
- inobject_props->set(target_index, *value);
- } else {
- array->set(target_index - inobject, *value);
- }
- }
-
- // From here on we cannot fail and we shouldn't GC anymore.
- DisallowHeapAllocation no_allocation;
-
- Heap* heap = isolate->heap();
-
- int old_instance_size = old_map->instance_size();
-
- heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
-
- // Copy (real) inobject properties. If necessary, stop at number_of_fields to
- // avoid overwriting |one_pointer_filler_map|.
- int limit = Min(inobject, number_of_fields);
- for (int i = 0; i < limit; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- Object value = inobject_props->get(i);
- // Can't use JSObject::FastPropertyAtPut() because proper map was not set
- // yet.
- if (new_map->IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
- // Ensure that all bits of the double value are preserved.
- object->RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
- if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
- // Transition from tagged to untagged slot.
- heap->ClearRecordedSlot(*object,
- HeapObject::RawField(*object, index.offset()));
- } else {
-#ifdef DEBUG
- heap->VerifyClearedSlot(*object,
- HeapObject::RawField(*object, index.offset()));
-#endif
- }
- } else {
- object->RawFastPropertyAtPut(index, value);
- }
- }
-
- object->SetProperties(*array);
-
- // Create filler object past the new instance size.
- int new_instance_size = new_map->instance_size();
- int instance_size_delta = old_instance_size - new_instance_size;
- DCHECK_GE(instance_size_delta, 0);
-
- if (instance_size_delta > 0) {
- Address address = object->address();
- heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
- ClearRecordedSlots::kYes);
- }
-
- // We are storing the new map using release store after creating a filler for
- // the left-over space to avoid races with the sweeper thread.
- object->synchronized_set_map(*new_map);
-}
-
-void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
- int expected_additional_properties) {
- // The global object is always normalized.
- DCHECK(!object->IsJSGlobalObject());
- // JSGlobalProxy must never be normalized
- DCHECK(!object->IsJSGlobalProxy());
-
- DCHECK_IMPLIES(new_map->is_prototype_map(),
- Map::IsPrototypeChainInvalidated(*new_map));
-
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Map> map(object->map(), isolate);
-
- // Allocate new content.
- int real_size = map->NumberOfOwnDescriptors();
- int property_count = real_size;
- if (expected_additional_properties > 0) {
- property_count += expected_additional_properties;
- } else {
- // Make space for two more properties.
- property_count += NameDictionary::kInitialCapacity;
- }
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, property_count);
-
- Handle<DescriptorArray> descs(map->instance_descriptors(), isolate);
- for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- Handle<Name> key(descs->GetKey(i), isolate);
- Handle<Object> value;
- if (details.location() == kField) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (details.kind() == kData) {
- if (object->IsUnboxedDoubleField(index)) {
- double old_value = object->RawFastDoublePropertyAt(index);
- value = isolate->factory()->NewHeapNumber(old_value);
- } else {
- value = handle(object->RawFastPropertyAt(index), isolate);
- if (details.representation().IsDouble()) {
- DCHECK(value->IsMutableHeapNumber());
- double old_value = Handle<MutableHeapNumber>::cast(value)->value();
- value = isolate->factory()->NewHeapNumber(old_value);
- }
- }
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- value = handle(object->RawFastPropertyAt(index), isolate);
- }
-
- } else {
- DCHECK_EQ(kDescriptor, details.location());
- value = handle(descs->GetStrongValue(i), isolate);
- }
- DCHECK(!value.is_null());
- PropertyDetails d(details.kind(), details.attributes(),
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
- }
-
- // Copy the next enumeration index from instance descriptor.
- dictionary->SetNextEnumerationIndex(real_size + 1);
-
- // From here on we cannot fail and we shouldn't GC anymore.
- DisallowHeapAllocation no_allocation;
-
- Heap* heap = isolate->heap();
- int old_instance_size = map->instance_size();
- heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
-
- // Resize the object in the heap if necessary.
- int new_instance_size = new_map->instance_size();
- int instance_size_delta = old_instance_size - new_instance_size;
- DCHECK_GE(instance_size_delta, 0);
-
- if (instance_size_delta > 0) {
- heap->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta, ClearRecordedSlots::kYes);
- }
-
- // We are storing the new map using release store after creating a filler for
- // the left-over space to avoid races with the sweeper thread.
- object->synchronized_set_map(*new_map);
-
- object->SetProperties(*dictionary);
-
- // Ensure that in-object space of slow-mode object does not contain random
- // garbage.
- int inobject_properties = new_map->GetInObjectProperties();
- if (inobject_properties) {
- Heap* heap = isolate->heap();
- heap->ClearRecordedSlotRange(
- object->address() + map->GetInObjectPropertyOffset(0),
- object->address() + new_instance_size);
-
- for (int i = 0; i < inobject_properties; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- object->RawFastPropertyAtPut(index, Smi::kZero);
- }
- }
-
- isolate->counters()->props_to_dictionary()->Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- StdoutStream os;
- os << "Object properties have been normalized:\n";
- object->Print(os);
- }
-#endif
-}
-
-} // namespace
-
-// static
-void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
- Isolate* isolate) {
- if (!old_map->is_prototype_map()) return;
-
- InvalidatePrototypeChains(*old_map);
-
- // If the map was registered with its prototype before, ensure that it
- // registers with its new prototype now. This preserves the invariant that
- // when a map on a prototype chain is registered with its prototype, then
- // all prototypes further up the chain are also registered with their
- // respective prototypes.
- UpdatePrototypeUserRegistration(old_map, new_map, isolate);
-}
-
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
- int expected_additional_properties) {
- if (object->map() == *new_map) return;
- Handle<Map> old_map(object->map(), object->GetIsolate());
- NotifyMapChange(old_map, new_map, object->GetIsolate());
-
- if (old_map->is_dictionary_map()) {
- // For slow-to-fast migrations JSObject::MigrateSlowToFast()
- // must be used instead.
- CHECK(new_map->is_dictionary_map());
-
- // Slow-to-slow migration is trivial.
- object->synchronized_set_map(*new_map);
- } else if (!new_map->is_dictionary_map()) {
- MigrateFastToFast(object, new_map);
- if (old_map->is_prototype_map()) {
- DCHECK(!old_map->is_stable());
- DCHECK(new_map->is_stable());
- DCHECK(new_map->owns_descriptors());
- DCHECK(old_map->owns_descriptors());
- // Transfer ownership to the new map. Keep the descriptor pointer of the
- // old map intact because the concurrent marker might be iterating the
- // object with the old map.
- old_map->set_owns_descriptors(false);
- DCHECK(old_map->is_abandoned_prototype_map());
- // Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
- .NumberOfTransitions());
- DCHECK(new_map->GetBackPointer()->IsUndefined());
- DCHECK(object->map() != *old_map);
- }
- } else {
- MigrateFastToSlow(object, new_map, expected_additional_properties);
- }
-
- // Careful: Don't allocate here!
- // For some callers of this method, |object| might be in an inconsistent
- // state now: the new map might have a new elements_kind, but the object's
- // elements pointer hasn't been updated yet. Callers will fix this, but in
- // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
- // When adding code here, add a DisallowHeapAllocation too.
-}
-
-void JSObject::ForceSetPrototype(Handle<JSObject> object,
- Handle<Object> proto) {
- // object.__proto__ = proto;
- Handle<Map> old_map = Handle<Map>(object->map(), object->GetIsolate());
- Handle<Map> new_map =
- Map::Copy(object->GetIsolate(), old_map, "ForceSetPrototype");
- Map::SetPrototype(object->GetIsolate(), new_map, proto);
- JSObject::MigrateToMap(object, new_map);
-}
-
-int Map::NumberOfFields() const {
- DescriptorArray descriptors = instance_descriptors();
- int result = 0;
- for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).location() == kField) result++;
- }
- return result;
-}
-
-Map::FieldCounts Map::GetFieldCounts() const {
- DescriptorArray descriptors = instance_descriptors();
- int mutable_count = 0;
- int const_count = 0;
- for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() == kField) {
- switch (details.constness()) {
- case PropertyConstness::kMutable:
- mutable_count++;
- break;
- case PropertyConstness::kConst:
- const_count++;
- break;
- }
- }
- }
- return FieldCounts(mutable_count, const_count);
-}
-
-bool Map::HasOutOfObjectProperties() const {
- return GetInObjectProperties() < NumberOfFields();
+bool HeapObject::IsExternal(Isolate* isolate) const {
+ return map()->FindRootMap(isolate) == isolate->heap()->external_map();
}
void DescriptorArray::GeneralizeAllFields() {
@@ -4707,494 +2419,21 @@ void DescriptorArray::GeneralizeAllFields() {
}
}
-Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
- ElementsKind elements_kind,
- int modify_index, PropertyKind kind,
- PropertyAttributes attributes,
- const char* reason) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descriptors = DescriptorArray::CopyUpTo(
- isolate, old_descriptors, number_of_own_descriptors);
- descriptors->GeneralizeAllFields();
-
- Handle<LayoutDescriptor> new_layout_descriptor(
- LayoutDescriptor::FastPointerLayout(), isolate);
- Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
- MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
-
- // Unless the instance is being migrated, ensure that modify_index is a field.
- if (modify_index >= 0) {
- PropertyDetails details = descriptors->GetDetails(modify_index);
- if (details.constness() != PropertyConstness::kMutable ||
- details.location() != kField || details.attributes() != attributes) {
- int field_index = details.location() == kField
- ? details.field_index()
- : new_map->NumberOfFields();
- Descriptor d = Descriptor::DataField(
- isolate, handle(descriptors->GetKey(modify_index), isolate),
- field_index, attributes, Representation::Tagged());
- descriptors->Replace(modify_index, &d);
- if (details.location() != kField) {
- new_map->AccountAddedPropertyField();
- }
- } else {
- DCHECK(details.attributes() == attributes);
- }
-
- if (FLAG_trace_generalization) {
- MaybeHandle<FieldType> field_type = FieldType::None(isolate);
- if (details.location() == kField) {
- field_type = handle(
- map->instance_descriptors()->GetFieldType(modify_index), isolate);
- }
- map->PrintGeneralization(
- isolate, stdout, reason, modify_index,
- new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
- details.location() == kDescriptor, details.representation(),
- Representation::Tagged(), field_type, MaybeHandle<Object>(),
- FieldType::Any(isolate), MaybeHandle<Object>());
- }
- }
- new_map->set_elements_kind(elements_kind);
- return new_map;
-}
-
-void Map::DeprecateTransitionTree(Isolate* isolate) {
- if (is_deprecated()) return;
- DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(isolate, *this, &no_gc);
- int num_transitions = transitions.NumberOfTransitions();
- for (int i = 0; i < num_transitions; ++i) {
- transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
- }
- DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
- set_is_deprecated(true);
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Deprecate", *this, Map()));
- }
- dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kTransitionGroup);
- NotifyLeafMapLayoutChange(isolate);
-}
-
-
-// Installs |new_descriptors| over the current instance_descriptors to ensure
-// proper sharing of descriptor arrays.
-void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
- LayoutDescriptor new_layout_descriptor) {
- // Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
- return;
- }
-
- DescriptorArray to_replace = instance_descriptors();
- // Replace descriptors by new_descriptors in all maps that share it. The old
- // descriptors will not be trimmed in the mark-compactor, we need to mark
- // all its elements.
- Map current = *this;
- MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
- to_replace->number_of_descriptors());
- while (current->instance_descriptors() == to_replace) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->SetEnumLength(kInvalidEnumCacheSentinel);
- current->UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
- current->NumberOfOwnDescriptors());
- current = Map::cast(next);
- }
- set_owns_descriptors(false);
-}
-
-Map Map::FindRootMap(Isolate* isolate) const {
- Map result = *this;
- while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) {
- // Initial map always owns descriptors and doesn't have unused entries
- // in the descriptor array.
- DCHECK(result->owns_descriptors());
- DCHECK_EQ(result->NumberOfOwnDescriptors(),
- result->instance_descriptors()->number_of_descriptors());
- return result;
- }
- result = Map::cast(back);
- }
-}
-
-Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
- DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
- Map result = *this;
- while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) break;
- const Map parent = Map::cast(back);
- if (parent->NumberOfOwnDescriptors() <= descriptor) break;
- result = parent;
- }
- return result;
-}
-
-void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
- PropertyConstness new_constness,
- Representation new_representation,
- const MaybeObjectHandle& new_wrapped_type) {
- DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
- // We store raw pointers in the queue, so no allocations are allowed.
- DisallowHeapAllocation no_allocation;
- PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
- if (details.location() != kField) return;
- DCHECK_EQ(kData, details.kind());
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneQueue<Map> backlog(&zone);
- backlog.push(*this);
-
- while (!backlog.empty()) {
- Map current = backlog.front();
- backlog.pop();
-
- TransitionsAccessor transitions(isolate, current, &no_allocation);
- int num_transitions = transitions.NumberOfTransitions();
- for (int i = 0; i < num_transitions; ++i) {
- Map target = transitions.GetTarget(i);
- backlog.push(target);
- }
- DescriptorArray descriptors = current->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- // Currently constness change implies map change.
- DCHECK_IMPLIES(new_constness != details.constness(),
- FLAG_modify_map_inplace);
-
- // It is allowed to change representation here only from None to something.
- DCHECK(details.representation().Equals(new_representation) ||
- details.representation().IsNone());
-
- // Skip if already updated the shared descriptor.
- if ((FLAG_modify_map_inplace && new_constness != details.constness()) ||
- descriptors->GetFieldType(descriptor) != *new_wrapped_type.object()) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- new_constness == PropertyConstness::kMutable);
- Descriptor d = Descriptor::DataField(
- name, descriptors->GetFieldIndex(descriptor), details.attributes(),
- new_constness, new_representation, new_wrapped_type);
- descriptors->Replace(descriptor, &d);
- }
- }
-}
-
-bool FieldTypeIsCleared(Representation rep, FieldType type) {
- return type->IsNone() && rep.IsHeapObject();
-}
-
-
-// static
-Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
- Handle<FieldType> type1,
- Representation rep2,
- Handle<FieldType> type2,
- Isolate* isolate) {
- // Cleared field types need special treatment. They represent lost knowledge,
- // so we must be conservative, so their generalization with any other type
- // is "Any".
- if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
- return FieldType::Any(isolate);
- }
- if (type1->NowIs(type2)) return type2;
- if (type2->NowIs(type1)) return type1;
- return FieldType::Any(isolate);
-}
-
-// static
-void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
- PropertyConstness new_constness,
- Representation new_representation,
- Handle<FieldType> new_field_type) {
- // Check if we actually need to generalize the field type at all.
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- PropertyConstness old_constness = old_details.constness();
- Representation old_representation = old_details.representation();
- Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
- isolate);
-
- // Return if the current map is general enough to hold requested constness and
- // representation/field type.
- if (((FLAG_modify_map_inplace &&
- IsGeneralizableTo(new_constness, old_constness)) ||
- (!FLAG_modify_map_inplace && (old_constness == new_constness))) &&
- old_representation.Equals(new_representation) &&
- !FieldTypeIsCleared(new_representation, *new_field_type) &&
- // Checking old_field_type for being cleared is not necessary because
- // the NowIs check below would fail anyway in that case.
- new_field_type->NowIs(old_field_type)) {
- DCHECK(GeneralizeFieldType(old_representation, old_field_type,
- new_representation, new_field_type, isolate)
- ->NowIs(old_field_type));
- return;
- }
-
- // Determine the field owner.
- Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
- Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
- isolate);
- DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
-
- new_field_type =
- Map::GeneralizeFieldType(old_representation, old_field_type,
- new_representation, new_field_type, isolate);
- if (FLAG_modify_map_inplace) {
- new_constness = GeneralizeConstness(old_constness, new_constness);
- }
-
- PropertyDetails details = descriptors->GetDetails(modify_index);
- Handle<Name> name(descriptors->GetKey(modify_index), isolate);
-
- MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
- field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
- new_representation, wrapped_type);
- field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kFieldOwnerGroup);
-
- if (FLAG_trace_generalization) {
- map->PrintGeneralization(
- isolate, stdout, "field type generalization", modify_index,
- map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
- details.representation(), details.representation(), old_field_type,
- MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
- }
-}
-
-// TODO(ishell): remove.
-// static
-Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map,
- int modify_index, PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type) {
- DCHECK_EQ(kData, new_kind); // Only kData case is supported.
- MapUpdater mu(isolate, map);
- return mu.ReconfigureToDataField(modify_index, new_attributes,
- PropertyConstness::kConst,
- new_representation, new_field_type);
-}
-
-// TODO(ishell): remove.
-// static
-Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
- ElementsKind new_elements_kind) {
- MapUpdater mu(isolate, map);
- return mu.ReconfigureElementsKind(new_elements_kind);
-}
-
-namespace {
-
-Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
- DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(isolate);
-
- Map target = old_map;
- do {
- target = TransitionsAccessor(isolate, target, &no_allocation)
- .GetMigrationTarget();
- } while (!target.is_null() && target->is_deprecated());
- if (target.is_null()) return Map();
-
- // TODO(ishell): if this validation ever become a bottleneck consider adding a
- // bit to the Map telling whether it contains fields whose field types may be
- // cleared.
- // TODO(ishell): revisit handling of cleared field types in
- // TryReplayPropertyTransitions() and consider checking the target map's field
- // types instead of old_map's types.
- // Go to slow map updating if the old_map has fast properties with cleared
- // field types.
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
- for (int i = 0; i < old_nof; i++) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- if (old_details.location() == kField && old_details.kind() == kData) {
- FieldType old_type = old_descriptors->GetFieldType(i);
- if (FieldTypeIsCleared(old_details.representation(), old_type)) {
- return Map();
- }
- }
- }
-
- SLOW_DCHECK(Map::TryUpdateSlow(isolate, old_map) == target);
- return target;
-}
-} // namespace
-
-// TODO(ishell): Move TryUpdate() and friends to MapUpdater
-// static
-MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
- DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(isolate);
-
- if (!old_map->is_deprecated()) return old_map;
-
- if (FLAG_fast_map_update) {
- Map target_map = SearchMigrationTarget(isolate, *old_map);
- if (!target_map.is_null()) {
- return handle(target_map, isolate);
- }
- }
-
- Map new_map = TryUpdateSlow(isolate, *old_map);
- if (new_map.is_null()) return MaybeHandle<Map>();
- if (FLAG_fast_map_update) {
- TransitionsAccessor(isolate, *old_map, &no_allocation)
- .SetMigrationTarget(new_map);
- }
- return handle(new_map, isolate);
-}
-
-Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
- DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(isolate);
-
- // Check the state of the root map.
- Map root_map = old_map->FindRootMap(isolate);
- if (root_map->is_deprecated()) {
- JSFunction constructor = JSFunction::cast(root_map->GetConstructor());
- DCHECK(constructor->has_initial_map());
- DCHECK(constructor->initial_map()->is_dictionary_map());
- if (constructor->initial_map()->elements_kind() !=
- old_map->elements_kind()) {
- return Map();
- }
- return constructor->initial_map();
- }
- if (!old_map->EquivalentToForTransition(root_map)) return Map();
-
- ElementsKind from_kind = root_map->elements_kind();
- ElementsKind to_kind = old_map->elements_kind();
- if (from_kind != to_kind) {
- // Try to follow existing elements kind transitions.
- root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
- if (root_map.is_null()) return Map();
- // From here on, use the map with correct elements kind as root map.
- }
- return root_map->TryReplayPropertyTransitions(isolate, old_map);
-}
-
-Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
- DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(isolate);
-
- int root_nof = NumberOfOwnDescriptors();
-
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
-
- Map new_map = *this;
- for (int i = root_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- Map transition =
- TransitionsAccessor(isolate, new_map, &no_allocation)
- .SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
- old_details.attributes());
- if (transition.is_null()) return Map();
- new_map = transition;
- DescriptorArray new_descriptors = new_map->instance_descriptors();
-
- PropertyDetails new_details = new_descriptors->GetDetails(i);
- DCHECK_EQ(old_details.kind(), new_details.kind());
- DCHECK_EQ(old_details.attributes(), new_details.attributes());
- if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
- return Map();
- }
- DCHECK(IsGeneralizableTo(old_details.location(), new_details.location()));
- if (!old_details.representation().fits_into(new_details.representation())) {
- return Map();
- }
- if (new_details.location() == kField) {
- if (new_details.kind() == kData) {
- FieldType new_type = new_descriptors->GetFieldType(i);
- // Cleared field types need special treatment. They represent lost
- // knowledge, so we must first generalize the new_type to "Any".
- if (FieldTypeIsCleared(new_details.representation(), new_type)) {
- return Map();
- }
- DCHECK_EQ(kData, old_details.kind());
- if (old_details.location() == kField) {
- FieldType old_type = old_descriptors->GetFieldType(i);
- if (FieldTypeIsCleared(old_details.representation(), old_type) ||
- !old_type->NowIs(new_type)) {
- return Map();
- }
- } else {
- DCHECK_EQ(kDescriptor, old_details.location());
- DCHECK(!FLAG_track_constant_fields);
- Object old_value = old_descriptors->GetStrongValue(i);
- if (!new_type->NowContains(old_value)) {
- return Map();
- }
- }
-
- } else {
- DCHECK_EQ(kAccessor, new_details.kind());
-#ifdef DEBUG
- FieldType new_type = new_descriptors->GetFieldType(i);
- DCHECK(new_type->IsAny());
-#endif
- UNREACHABLE();
- }
- } else {
- DCHECK_EQ(kDescriptor, new_details.location());
- if (old_details.location() == kField ||
- old_descriptors->GetStrongValue(i) !=
- new_descriptors->GetStrongValue(i)) {
- return Map();
- }
- }
- }
- if (new_map->NumberOfOwnDescriptors() != old_nof) return Map();
- return new_map;
-}
-
-
-// static
-Handle<Map> Map::Update(Isolate* isolate, Handle<Map> map) {
- if (!map->is_deprecated()) return map;
- if (FLAG_fast_map_update) {
- Map target_map = SearchMigrationTarget(isolate, *map);
- if (!target_map.is_null()) {
- return handle(target_map, isolate);
- }
- }
- MapUpdater mu(isolate, map);
- return mu.Update();
-}
-
-Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
- ShouldThrow should_throw,
- Handle<Object> value) {
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- return SetPropertyWithInterceptorInternal(it, it->GetInterceptor(),
- should_throw, value);
-}
-
MaybeHandle<Object> Object::SetProperty(Isolate* isolate, Handle<Object> object,
Handle<Name> name, Handle<Object> value,
- LanguageMode language_mode,
- StoreOrigin store_origin) {
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
LookupIterator it(isolate, object, name);
- MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_origin));
+ MAYBE_RETURN_NULL(SetProperty(&it, value, store_origin, should_throw));
return value;
}
Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Handle<Object> value,
- LanguageMode language_mode,
+ Maybe<ShouldThrow> should_throw,
StoreOrigin store_origin, bool* found) {
it->UpdateProtector();
DCHECK(it->IsFound());
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -5221,7 +2460,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
it->isolate());
}
return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
- value, receiver, language_mode);
+ value, receiver, should_throw);
}
case LookupIterator::INTERCEPTOR: {
@@ -5304,38 +2543,38 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
}
Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
- LanguageMode language_mode,
- StoreOrigin store_origin) {
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_origin, &found);
+ SetPropertyInternal(it, value, should_throw, store_origin, &found);
if (found) return result;
}
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
// throw a reference error in strict mode. In sloppy mode, we continue.
- if (is_strict(language_mode) && it->GetReceiver()->IsJSGlobalObject()) {
+ if (it->GetReceiver()->IsJSGlobalObject() &&
+ (GetShouldThrow(it->isolate(), should_throw) ==
+ ShouldThrow::kThrowOnError)) {
it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
MessageTemplate::kNotDefined, it->name()));
return Nothing<bool>();
}
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
return AddDataProperty(it, value, NONE, should_throw, store_origin);
}
Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
- LanguageMode language_mode,
- StoreOrigin store_origin) {
+ StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
Isolate* isolate = it->isolate();
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_origin, &found);
+ SetPropertyInternal(it, value, should_throw, store_origin, &found);
if (found) return result;
}
@@ -5344,8 +2583,6 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
// The property either doesn't exist on the holder or exists there as a data
// property.
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
if (!it->GetReceiver()->IsJSReceiver()) {
return WriteToReadOnlyProperty(it, value, should_throw);
@@ -5421,22 +2658,30 @@ Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
Handle<Object> receiver,
Handle<Object> name,
Handle<Object> value,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
RETURN_FAILURE(
- isolate, should_throw,
+ isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kStrictCannotCreateProperty, name,
Object::TypeOf(isolate, receiver), receiver));
}
-
-Maybe<bool> Object::WriteToReadOnlyProperty(LookupIterator* it,
- Handle<Object> value,
- ShouldThrow should_throw) {
+Maybe<bool> Object::WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> maybe_should_throw) {
+ ShouldThrow should_throw = GetShouldThrow(it->isolate(), maybe_should_throw);
+ if (it->IsFound() && !it->HolderIsReceiver()) {
+ // "Override mistake" attempted, record a use count to track this per
+ // v8:8175
+ v8::Isolate::UseCounterFeature feature =
+ should_throw == kThrowOnError
+ ? v8::Isolate::kAttemptOverrideReadOnlyOnPrototypeStrict
+ : v8::Isolate::kAttemptOverrideReadOnlyOnPrototypeSloppy;
+ it->isolate()->CountUsage(feature);
+ }
return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
it->GetName(), value, should_throw);
}
-
Maybe<bool> Object::WriteToReadOnlyProperty(Isolate* isolate,
Handle<Object> receiver,
Handle<Object> name,
@@ -5447,16 +2692,13 @@ Maybe<bool> Object::WriteToReadOnlyProperty(Isolate* isolate,
Object::TypeOf(isolate, receiver), receiver));
}
-
-Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
- Handle<Object> name,
- Handle<Object> value,
- ShouldThrow should_throw) {
- RETURN_FAILURE(isolate, should_throw,
+Maybe<bool> Object::RedefineIncompatibleProperty(
+ Isolate* isolate, Handle<Object> name, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed, name));
}
-
Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
DCHECK_IMPLIES(it->GetReceiver()->IsJSProxy(),
it->GetName()->IsPrivateName());
@@ -5513,7 +2755,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
- ShouldThrow should_throw,
+ Maybe<ShouldThrow> should_throw,
StoreOrigin store_origin) {
if (!it->GetReceiver()->IsJSReceiver()) {
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
@@ -5524,7 +2766,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
// JSProxy::SetPrivateSymbol.
if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate() &&
!it->GetName()->IsPrivateName()) {
- RETURN_FAILURE(it->isolate(), should_throw,
+ RETURN_FAILURE(it->isolate(), GetShouldThrow(it->isolate(), should_throw),
NewTypeError(MessageTemplate::kProxyPrivate));
}
@@ -5543,7 +2785,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
if (it->ExtendingNonExtensible(receiver)) {
RETURN_FAILURE(
- isolate, should_throw,
+ isolate, GetShouldThrow(it->isolate(), should_throw),
NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
}
@@ -5551,7 +2793,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(it->isolate(), should_throw),
NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
isolate->factory()->length_string(),
Object::TypeOf(isolate, array), array));
@@ -5593,99 +2835,6 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
return Just(true);
}
-void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
- // Only supports adding slack to owned descriptors.
- DCHECK(map->owns_descriptors());
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int old_size = map->NumberOfOwnDescriptors();
- if (slack <= descriptors->number_of_slack_descriptors()) return;
-
- Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpTo(isolate, descriptors, old_size, slack);
-
- DisallowHeapAllocation no_allocation;
- // The descriptors are still the same, so keep the layout descriptor.
- LayoutDescriptor layout_descriptor = map->GetLayoutDescriptor();
-
- if (old_size == 0) {
- map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
- map->NumberOfOwnDescriptors());
- return;
- }
-
- // If the source descriptors had an enum cache we copy it. This ensures
- // that the maps to which we push the new descriptor array back can rely
- // on a cache always being available once it is set. If the map has more
- // enumerated descriptors than available in the original cache, the cache
- // will be lazily replaced by the extended cache when needed.
- new_descriptors->CopyEnumCacheFrom(*descriptors);
-
- // Replace descriptors by new_descriptors in all maps that share it. The old
- // descriptors will not be trimmed in the mark-compactor, we need to mark
- // all its elements.
- MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors,
- descriptors->number_of_descriptors());
-
- Map current = *map;
- while (current->instance_descriptors() == *descriptors) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
- current->NumberOfOwnDescriptors());
- current = Map::cast(next);
- }
- map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
- map->NumberOfOwnDescriptors());
-}
-
-// static
-Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
- Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
- isolate);
- if (map->prototype() == *prototype) return map;
- if (prototype->IsNull(isolate)) {
- return isolate->slow_object_with_null_prototype_map();
- }
- if (prototype->IsJSObject()) {
- Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) {
- JSObject::OptimizeAsPrototype(js_prototype);
- }
- Handle<PrototypeInfo> info =
- Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
- // TODO(verwaest): Use inobject slack tracking for this map.
- if (info->HasObjectCreateMap()) {
- map = handle(info->ObjectCreateMap(), isolate);
- } else {
- map = Map::CopyInitialMap(isolate, map);
- Map::SetPrototype(isolate, map, prototype);
- PrototypeInfo::SetObjectCreateMap(info, map);
- }
- return map;
- }
-
- return Map::TransitionToPrototype(isolate, map, prototype);
-}
-
-// static
-MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
- Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
- isolate);
- if (map->prototype() == *prototype) return map;
- if (prototype->IsNull(isolate)) {
- return isolate->slow_object_with_null_prototype_map();
- }
- if (!prototype->IsJSObject()) return MaybeHandle<Map>();
- Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) return MaybeHandle<Map>();
- Handle<PrototypeInfo> info =
- Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
- if (!info->HasObjectCreateMap()) return MaybeHandle<Map>();
- return handle(info->ObjectCreateMap(), isolate);
-}
template <class T>
static int AppendUniqueCallbacks(Isolate* isolate,
@@ -5740,191 +2889,8 @@ int AccessorInfo::AppendUnique(Isolate* isolate, Handle<Object> descriptors,
valid_descriptors);
}
-static bool ContainsMap(MapHandles const& maps, Map map) {
- DCHECK(!map.is_null());
- for (Handle<Map> current : maps) {
- if (!current.is_null() && *current == map) return true;
- }
- return false;
-}
-
-Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
- MapHandles const& candidates) {
- DisallowHeapAllocation no_allocation;
- DisallowDeoptimization no_deoptimization(isolate);
-
- if (is_prototype_map()) return Map();
-
- ElementsKind kind = elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
-
- Map transition;
- if (IsTransitionableFastElementsKind(kind)) {
- // Check the state of the root map.
- Map root_map = FindRootMap(isolate);
- if (!EquivalentToForElementsKindTransition(root_map)) return Map();
- root_map = root_map->LookupElementsTransitionMap(isolate, kind);
- DCHECK(!root_map.is_null());
- // Starting from the next existing elements kind transition try to
- // replay the property transitions that does not involve instance rewriting
- // (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map->ElementsTransitionMap();
- !root_map.is_null() && root_map->has_fast_elements();
- root_map = root_map->ElementsTransitionMap()) {
- Map current = root_map->TryReplayPropertyTransitions(isolate, *this);
- if (current.is_null()) continue;
- if (InstancesNeedRewriting(current)) continue;
-
- if (ContainsMap(candidates, current) &&
- (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
- transition = current;
- packed = packed && IsFastPackedElementsKind(current->elements_kind());
- }
- }
- }
- return transition;
-}
-
-static Map FindClosestElementsTransition(Isolate* isolate, Map map,
- ElementsKind to_kind) {
- // Ensure we are requested to search elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
- map->NumberOfOwnDescriptors());
- Map current_map = map;
-
- ElementsKind kind = map->elements_kind();
- while (kind != to_kind) {
- Map next_map = current_map->ElementsTransitionMap();
- if (next_map.is_null()) return current_map;
- kind = next_map->elements_kind();
- current_map = next_map;
- }
-
- DCHECK_EQ(to_kind, current_map->elements_kind());
- return current_map;
-}
-
-Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
- Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
- if (to_map->elements_kind() == to_kind) return to_map;
- return Map();
-}
-
-bool Map::IsMapInArrayPrototypeChain(Isolate* isolate) const {
- if (isolate->initial_array_prototype()->map() == *this) {
- return true;
- }
-
- if (isolate->initial_object_prototype()->map() == *this) {
- return true;
- }
-
- return false;
-}
-
-static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
- Handle<Map> map,
- ElementsKind to_kind) {
- DCHECK(IsTransitionElementsKind(map->elements_kind()));
-
- Handle<Map> current_map = map;
-
- ElementsKind kind = map->elements_kind();
- TransitionFlag flag;
- if (map->is_prototype_map()) {
- flag = OMIT_TRANSITION;
- } else {
- flag = INSERT_TRANSITION;
- if (IsFastElementsKind(kind)) {
- while (kind != to_kind && !IsTerminalElementsKind(kind)) {
- kind = GetNextTransitionElementsKind(kind);
- current_map = Map::CopyAsElementsKind(isolate, current_map, kind, flag);
- }
- }
- }
- // In case we are exiting the fast elements kind system, just add the map in
- // the end.
- if (kind != to_kind) {
- current_map = Map::CopyAsElementsKind(isolate, current_map, to_kind, flag);
- }
- DCHECK(current_map->elements_kind() == to_kind);
- return current_map;
-}
-
-Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
- ElementsKind to_kind) {
- ElementsKind from_kind = map->elements_kind();
- if (from_kind == to_kind) return map;
-
- Context native_context = isolate->context()->native_context();
- if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->fast_aliased_arguments_map()) {
- DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->slow_aliased_arguments_map(), isolate);
- }
- } else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->slow_aliased_arguments_map()) {
- DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->fast_aliased_arguments_map(), isolate);
- }
- } else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
- // Reuse map transitions for JSArrays.
- DisallowHeapAllocation no_gc;
- if (native_context->GetInitialJSArrayMap(from_kind) == *map) {
- Object maybe_transitioned_map =
- native_context->get(Context::ArrayMapIndex(to_kind));
- if (maybe_transitioned_map->IsMap()) {
- return handle(Map::cast(maybe_transitioned_map), isolate);
- }
- }
- }
-
- DCHECK(!map->IsUndefined(isolate));
- // Check if we can go back in the elements kind transition chain.
- if (IsHoleyElementsKind(from_kind) &&
- to_kind == GetPackedElementsKind(from_kind) &&
- map->GetBackPointer()->IsMap() &&
- Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
- return handle(Map::cast(map->GetBackPointer()), isolate);
- }
-
- bool allow_store_transition = IsTransitionElementsKind(from_kind);
- // Only store fast element maps in ascending generality.
- if (IsFastElementsKind(to_kind)) {
- allow_store_transition =
- allow_store_transition && IsTransitionableFastElementsKind(from_kind) &&
- IsMoreGeneralElementsKindTransition(from_kind, to_kind);
- }
-
- if (!allow_store_transition) {
- return Map::CopyAsElementsKind(isolate, map, to_kind, OMIT_TRANSITION);
- }
-
- return Map::ReconfigureElementsKind(isolate, map, to_kind);
-}
-
-
-// static
-Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
- ElementsKind kind) {
- Handle<Map> closest_map(FindClosestElementsTransition(isolate, *map, kind),
- isolate);
-
- if (closest_map->elements_kind() == kind) {
- return closest_map;
- }
-
- return AddMissingElementsTransitions(isolate, closest_map, kind);
-}
-
-
-Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind to_kind) {
- Handle<Map> map(object->map(), object->GetIsolate());
- return Map::TransitionElementsTo(object->GetIsolate(), map, to_kind);
-}
void JSProxy::Revoke(Handle<JSProxy> proxy) {
@@ -6035,14 +3001,12 @@ Maybe<bool> JSProxy::CheckHasTrap(Isolate* isolate, Handle<Name> name,
Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
Handle<Object> value, Handle<Object> receiver,
- LanguageMode language_mode) {
+ Maybe<ShouldThrow> should_throw) {
DCHECK(!name->IsPrivate());
Isolate* isolate = proxy->GetIsolate();
STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->set_string();
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
if (proxy->IsRevoked()) {
isolate->Throw(
@@ -6058,8 +3022,9 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
if (trap->IsUndefined(isolate)) {
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, receiver, name, target);
- return Object::SetSuperProperty(&it, value, language_mode,
- StoreOrigin::kMaybeKeyed);
+
+ return Object::SetSuperProperty(&it, value, StoreOrigin::kMaybeKeyed,
+ should_throw);
}
Handle<Object> trap_result;
@@ -6069,7 +3034,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
if (!trap_result->BooleanValue(isolate)) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
trap_name, name));
}
@@ -6083,7 +3048,6 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
return Just(true);
}
-
Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
Handle<Name> name,
LanguageMode language_mode) {
@@ -6163,142 +3127,18 @@ MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
// static
-MaybeHandle<Context> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
+MaybeHandle<NativeContext> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
DCHECK(proxy->map()->is_constructor());
if (proxy->IsRevoked()) {
THROW_NEW_ERROR(proxy->GetIsolate(),
- NewTypeError(MessageTemplate::kProxyRevoked), Context);
+ NewTypeError(MessageTemplate::kProxyRevoked),
+ NativeContext);
}
Handle<JSReceiver> target(JSReceiver::cast(proxy->target()),
proxy->GetIsolate());
return JSReceiver::GetFunctionRealm(target);
}
-
-// static
-MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
- Handle<JSBoundFunction> function) {
- DCHECK(function->map()->is_constructor());
- return JSReceiver::GetFunctionRealm(
- handle(function->bound_target_function(), function->GetIsolate()));
-}
-
-// static
-MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
- Handle<JSBoundFunction> function) {
- Handle<String> prefix = isolate->factory()->bound__string();
- Handle<String> target_name = prefix;
- Factory* factory = isolate->factory();
- // Concatenate the "bound " up to the last non-bound target.
- while (function->bound_target_function()->IsJSBoundFunction()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, target_name,
- factory->NewConsString(prefix, target_name),
- String);
- function = handle(JSBoundFunction::cast(function->bound_target_function()),
- isolate);
- }
- if (function->bound_target_function()->IsJSFunction()) {
- Handle<JSFunction> target(
- JSFunction::cast(function->bound_target_function()), isolate);
- Handle<Object> name = JSFunction::GetName(isolate, target);
- if (!name->IsString()) return target_name;
- return factory->NewConsString(target_name, Handle<String>::cast(name));
- }
- // This will omit the proper target name for bound JSProxies.
- return target_name;
-}
-
-// static
-Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
- Handle<JSBoundFunction> function) {
- int nof_bound_arguments = function->bound_arguments()->length();
- while (function->bound_target_function()->IsJSBoundFunction()) {
- function = handle(JSBoundFunction::cast(function->bound_target_function()),
- isolate);
- // Make sure we never overflow {nof_bound_arguments}, the number of
- // arguments of a function is strictly limited by the max length of an
- // JSAarray, Smi::kMaxValue is thus a reasonably good overestimate.
- int length = function->bound_arguments()->length();
- if (V8_LIKELY(Smi::kMaxValue - nof_bound_arguments > length)) {
- nof_bound_arguments += length;
- } else {
- nof_bound_arguments = Smi::kMaxValue;
- }
- }
- // All non JSFunction targets get a direct property and don't use this
- // accessor.
- Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
- isolate);
- Maybe<int> target_length = JSFunction::GetLength(isolate, target);
- if (target_length.IsNothing()) return target_length;
-
- int length = Max(0, target_length.FromJust() - nof_bound_arguments);
- return Just(length);
-}
-
-// static
-Handle<Object> JSFunction::GetName(Isolate* isolate,
- Handle<JSFunction> function) {
- if (function->shared()->name_should_print_as_anonymous()) {
- return isolate->factory()->anonymous_string();
- }
- return handle(function->shared()->Name(), isolate);
-}
-
-// static
-Maybe<int> JSFunction::GetLength(Isolate* isolate,
- Handle<JSFunction> function) {
- int length = 0;
- IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
- if (is_compiled_scope.is_compiled()) {
- length = function->shared()->GetLength();
- } else {
- // If the function isn't compiled yet, the length is not computed
- // correctly yet. Compile it now and return the right length.
- if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
- &is_compiled_scope)) {
- length = function->shared()->GetLength();
- }
- if (isolate->has_pending_exception()) return Nothing<int>();
- }
- DCHECK_GE(length, 0);
- return Just(length);
-}
-
-// static
-Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
- DCHECK(function->map()->is_constructor());
- return handle(function->context()->native_context(), function->GetIsolate());
-}
-
-
-// static
-MaybeHandle<Context> JSObject::GetFunctionRealm(Handle<JSObject> object) {
- DCHECK(object->map()->is_constructor());
- DCHECK(!object->IsJSFunction());
- return object->GetCreationContext();
-}
-
-
-// static
-MaybeHandle<Context> JSReceiver::GetFunctionRealm(Handle<JSReceiver> receiver) {
- if (receiver->IsJSProxy()) {
- return JSProxy::GetFunctionRealm(Handle<JSProxy>::cast(receiver));
- }
-
- if (receiver->IsJSFunction()) {
- return JSFunction::GetFunctionRealm(Handle<JSFunction>::cast(receiver));
- }
-
- if (receiver->IsJSBoundFunction()) {
- return JSBoundFunction::GetFunctionRealm(
- Handle<JSBoundFunction>::cast(receiver));
- }
-
- return JSObject::GetFunctionRealm(Handle<JSObject>::cast(receiver));
-}
-
-
Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
PropertyDescriptor desc;
Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
@@ -6308,1402 +3148,6 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
return Just(desc.ToAttributes());
}
-
-void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- DCHECK(object->map()->GetInObjectProperties() ==
- map->GetInObjectProperties());
- ElementsKind obj_kind = object->map()->elements_kind();
- ElementsKind map_kind = map->elements_kind();
- if (map_kind != obj_kind) {
- ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
- if (IsDictionaryElementsKind(obj_kind)) {
- to_kind = obj_kind;
- }
- if (IsDictionaryElementsKind(to_kind)) {
- NormalizeElements(object);
- } else {
- TransitionElementsKind(object, to_kind);
- }
- map = Map::ReconfigureElementsKind(object->GetIsolate(), map, to_kind);
- }
- int number_of_fields = map->NumberOfFields();
- int inobject = map->GetInObjectProperties();
- int unused = map->UnusedPropertyFields();
- int total_size = number_of_fields + unused;
- int external = total_size - inobject;
- // Allocate mutable double boxes if necessary. It is always necessary if we
- // have external properties, but is also necessary if we only have inobject
- // properties but don't unbox double fields.
- if (!FLAG_unbox_double_fields || external > 0) {
- Isolate* isolate = object->GetIsolate();
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- Handle<FixedArray> storage;
- if (!FLAG_unbox_double_fields) {
- storage = isolate->factory()->NewFixedArray(inobject);
- }
-
- Handle<PropertyArray> array =
- isolate->factory()->NewPropertyArray(external);
-
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- Representation representation = details.representation();
- if (!representation.IsDouble()) continue;
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (map->IsUnboxedDoubleField(index)) continue;
- auto box = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- if (index.is_inobject()) {
- storage->set(index.property_index(), *box);
- } else {
- array->set(index.outobject_array_index(), *box);
- }
- }
-
- object->SetProperties(*array);
-
- if (!FLAG_unbox_double_fields) {
- for (int i = 0; i < inobject; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
- Object value = storage->get(i);
- object->RawFastPropertyAtPut(index, value);
- }
- }
- }
- object->synchronized_set_map(*map);
-}
-
-
-void JSObject::MigrateInstance(Handle<JSObject> object) {
- Handle<Map> original_map(object->map(), object->GetIsolate());
- Handle<Map> map = Map::Update(object->GetIsolate(), original_map);
- map->set_is_migration_target(true);
- MigrateToMap(object, map);
- if (FLAG_trace_migration) {
- object->PrintInstanceMigration(stdout, *original_map, *map);
- }
-#if VERIFY_HEAP
- if (FLAG_verify_heap) {
- object->JSObjectVerify(object->GetIsolate());
- }
-#endif
-}
-
-
-// static
-bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- DisallowDeoptimization no_deoptimization(isolate);
- Handle<Map> original_map(object->map(), isolate);
- Handle<Map> new_map;
- if (!Map::TryUpdate(isolate, original_map).ToHandle(&new_map)) {
- return false;
- }
- JSObject::MigrateToMap(object, new_map);
- if (FLAG_trace_migration && *original_map != object->map()) {
- object->PrintInstanceMigration(stdout, *original_map, object->map());
- }
-#if VERIFY_HEAP
- if (FLAG_verify_heap) {
- object->JSObjectVerify(isolate);
- }
-#endif
- return true;
-}
-
-void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
- Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes) {
- LookupIterator it(isolate, object, name, object,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
-#ifdef DEBUG
- uint32_t index;
- DCHECK(!object->IsJSProxy());
- DCHECK(!name->AsArrayIndex(&index));
- Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
- DCHECK(maybe.IsJust());
- DCHECK(!it.IsFound());
- DCHECK(object->map()->is_extensible() || name->IsPrivate());
-#endif
- CHECK(Object::AddDataProperty(&it, value, attributes, kThrowOnError,
- StoreOrigin::kNamed)
- .IsJust());
-}
-
-void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
- const char* name, Handle<Object> value,
- PropertyAttributes attributes) {
- JSObject::AddProperty(isolate, object,
- isolate->factory()->InternalizeUtf8String(name), value,
- attributes);
-}
-
-// Reconfigures a property to a data property with attributes, even if it is not
-// reconfigurable.
-// Requires a LookupIterator that does not look at the prototype chain beyond
-// hidden prototypes.
-MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
- LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- AccessorInfoHandling handling) {
- MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(it, value, attributes,
- kThrowOnError, handling));
- return value;
-}
-
-
-Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
- LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw, AccessorInfoHandling handling) {
- it->UpdateProtector();
- Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
-
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::JSPROXY:
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
-
- case LookupIterator::ACCESS_CHECK:
- if (!it->HasAccess()) {
- it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
- return Just(true);
- }
- break;
-
- // If there's an interceptor, try to store the property with the
- // interceptor.
- // In case of success, the attributes will have been reset to the default
- // attributes of the interceptor, rather than the incoming attributes.
- //
- // TODO(verwaest): JSProxy afterwards verify the attributes that the
- // JSProxy claims it has, and verifies that they are compatible. If not,
- // they throw. Here we should do the same.
- case LookupIterator::INTERCEPTOR:
- if (handling == DONT_FORCE_FIELD) {
- Maybe<bool> result =
- JSObject::SetPropertyWithInterceptor(it, should_throw, value);
- if (result.IsNothing() || result.FromJust()) return result;
- }
- break;
-
- case LookupIterator::ACCESSOR: {
- Handle<Object> accessors = it->GetAccessors();
-
- // Special handling for AccessorInfo, which behaves like a data
- // property.
- if (accessors->IsAccessorInfo() && handling == DONT_FORCE_FIELD) {
- PropertyAttributes current_attributes = it->property_attributes();
- // Ensure the context isn't changed after calling into accessors.
- AssertNoContextChange ncc(it->isolate());
-
- // Update the attributes before calling the setter. The setter may
- // later change the shape of the property.
- if (current_attributes != attributes) {
- it->TransitionToAccessorPair(accessors, attributes);
- }
-
- return Object::SetPropertyWithAccessor(it, value, should_throw);
- }
-
- it->ReconfigureDataProperty(value, attributes);
- return Just(true);
- }
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return Object::RedefineIncompatibleProperty(
- it->isolate(), it->GetName(), value, should_throw);
-
- case LookupIterator::DATA: {
- // Regular property update if the attributes match.
- if (it->property_attributes() == attributes) {
- return Object::SetDataProperty(it, value);
- }
-
- // Special case: properties of typed arrays cannot be reconfigured to
- // non-writable nor to non-enumerable.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
- return Object::RedefineIncompatibleProperty(
- it->isolate(), it->GetName(), value, should_throw);
- }
-
- // Reconfigure the data property if the attributes mismatch.
- it->ReconfigureDataProperty(value, attributes);
-
- return Just(true);
- }
- }
- }
-
- return Object::AddDataProperty(it, value, attributes, should_throw,
- StoreOrigin::kNamed);
-}
-
-MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes) {
- DCHECK(!value->IsTheHole());
- LookupIterator it(object, name, object, LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
-}
-
-MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
-}
-
-MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, object, LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
-}
-
-Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
- LookupIterator* it) {
- return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
-}
-
-Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
- LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyAttributes(it);
- case LookupIterator::INTERCEPTOR: {
- Maybe<PropertyAttributes> result =
- JSObject::GetPropertyAttributesWithInterceptor(it);
- if (result.IsNothing()) return result;
- if (result.FromJust() != ABSENT) return result;
- break;
- }
- case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess()) break;
- return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return Just(ABSENT);
- case LookupIterator::ACCESSOR:
- if (it->GetHolder<Object>()->IsJSModuleNamespace()) {
- return JSModuleNamespace::GetPropertyAttributes(it);
- } else {
- return Just(it->property_attributes());
- }
- case LookupIterator::DATA:
- return Just(it->property_attributes());
- }
- }
- return Just(ABSENT);
-}
-
-
-Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
- Handle<WeakFixedArray> array(
- isolate->factory()->NewWeakFixedArray(kEntries, TENURED));
- return Handle<NormalizedMapCache>::cast(array);
-}
-
-
-MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
- PropertyNormalizationMode mode) {
- DisallowHeapAllocation no_gc;
- MaybeObject value = WeakFixedArray::Get(GetIndex(fast_map));
- HeapObject heap_object;
- if (!value->GetHeapObjectIfWeak(&heap_object)) {
- return MaybeHandle<Map>();
- }
-
- Map normalized_map = Map::cast(heap_object);
- if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
- return MaybeHandle<Map>();
- }
- return handle(normalized_map, GetIsolate());
-}
-
-void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map) {
- DisallowHeapAllocation no_gc;
- DCHECK(normalized_map->is_dictionary_map());
- WeakFixedArray::Set(GetIndex(fast_map),
- HeapObjectReference::Weak(*normalized_map));
-}
-
-void JSObject::NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties,
- const char* reason) {
- if (!object->HasFastProperties()) return;
-
- Handle<Map> map(object->map(), object->GetIsolate());
- Handle<Map> new_map = Map::Normalize(object->GetIsolate(), map, mode, reason);
-
- MigrateToMap(object, new_map, expected_additional_properties);
-}
-
-
-void JSObject::MigrateSlowToFast(Handle<JSObject> object,
- int unused_property_fields,
- const char* reason) {
- if (object->HasFastProperties()) return;
- DCHECK(!object->IsJSGlobalObject());
- Isolate* isolate = object->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
-
- // Make sure we preserve dictionary representation if there are too many
- // descriptors.
- int number_of_elements = dictionary->NumberOfElements();
- if (number_of_elements > kMaxNumberOfDescriptors) return;
-
- Handle<FixedArray> iteration_order =
- NameDictionary::IterationIndices(isolate, dictionary);
-
- int instance_descriptor_length = iteration_order->length();
- int number_of_fields = 0;
-
- // Compute the length of the instance descriptor.
- ReadOnlyRoots roots(isolate);
- for (int i = 0; i < instance_descriptor_length; i++) {
- int index = Smi::ToInt(iteration_order->get(i));
- DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(index)));
-
- PropertyKind kind = dictionary->DetailsAt(index).kind();
- if (kind == kData) {
- if (FLAG_track_constant_fields) {
- number_of_fields += 1;
- } else {
- Object value = dictionary->ValueAt(index);
- if (!value->IsJSFunction()) {
- number_of_fields += 1;
- }
- }
- }
- }
-
- Handle<Map> old_map(object->map(), isolate);
-
- int inobject_props = old_map->GetInObjectProperties();
-
- // Allocate new map.
- Handle<Map> new_map = Map::CopyDropDescriptors(isolate, old_map);
- // We should not only set this bit if we need to. We should not retain the
- // old bit because turning a map into dictionary always sets this bit.
- new_map->set_may_have_interesting_symbols(new_map->has_named_interceptor() ||
- new_map->is_access_check_needed());
- new_map->set_is_dictionary_map(false);
-
- NotifyMapChange(old_map, new_map, isolate);
-
-
- if (instance_descriptor_length == 0) {
- DisallowHeapAllocation no_gc;
- DCHECK_LE(unused_property_fields, inobject_props);
- // Transform the object.
- new_map->SetInObjectUnusedPropertyFields(inobject_props);
- object->synchronized_set_map(*new_map);
- object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
- // Check that it really works.
- DCHECK(object->HasFastProperties());
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
- }
- return;
- }
-
- // Allocate the instance descriptor.
- Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
- isolate, instance_descriptor_length, 0, TENURED);
-
- int number_of_allocated_fields =
- number_of_fields + unused_property_fields - inobject_props;
- if (number_of_allocated_fields < 0) {
- // There is enough inobject space for all fields (including unused).
- number_of_allocated_fields = 0;
- unused_property_fields = inobject_props - number_of_fields;
- }
-
- // Allocate the property array for the fields.
- Handle<PropertyArray> fields =
- factory->NewPropertyArray(number_of_allocated_fields);
-
- bool is_transitionable_elements_kind =
- IsTransitionableFastElementsKind(old_map->elements_kind());
-
- // Fill in the instance descriptor and the fields.
- int current_offset = 0;
- for (int i = 0; i < instance_descriptor_length; i++) {
- int index = Smi::ToInt(iteration_order->get(i));
- Name k = dictionary->NameAt(index);
- // Dictionary keys are internalized upon insertion.
- // TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
- CHECK(k->IsUniqueName());
- Handle<Name> key(k, isolate);
-
- // Properly mark the {new_map} if the {key} is an "interesting symbol".
- if (key->IsInterestingSymbol()) {
- new_map->set_may_have_interesting_symbols(true);
- }
-
- Object value = dictionary->ValueAt(index);
-
- PropertyDetails details = dictionary->DetailsAt(index);
- DCHECK_EQ(kField, details.location());
- DCHECK_EQ(PropertyConstness::kMutable, details.constness());
-
- Descriptor d;
- if (details.kind() == kData) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- d = Descriptor::DataConstant(key, handle(value, isolate),
- details.attributes());
- } else {
- // Ensure that we make constant field only when elements kind is not
- // transitionable.
- PropertyConstness constness =
- FLAG_track_constant_fields && !is_transitionable_elements_kind
- ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
- d = Descriptor::DataField(
- key, current_offset, details.attributes(), constness,
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged(),
- MaybeObjectHandle(FieldType::Any(isolate)));
- }
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- d = Descriptor::AccessorConstant(key, handle(value, isolate),
- details.attributes());
- }
- details = d.GetDetails();
- if (details.location() == kField) {
- if (current_offset < inobject_props) {
- object->InObjectPropertyAtPut(current_offset, value,
- UPDATE_WRITE_BARRIER);
- } else {
- int offset = current_offset - inobject_props;
- fields->set(offset, value);
- }
- current_offset += details.field_width_in_words();
- }
- descriptors->Set(i, &d);
- }
- DCHECK(current_offset == number_of_fields);
-
- descriptors->Sort();
-
- Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
- isolate, new_map, descriptors, descriptors->number_of_descriptors());
-
- DisallowHeapAllocation no_gc;
- new_map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
- if (number_of_allocated_fields == 0) {
- new_map->SetInObjectUnusedPropertyFields(unused_property_fields);
- } else {
- new_map->SetOutOfObjectUnusedPropertyFields(unused_property_fields);
- }
-
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
- }
- // Transform the object.
- object->synchronized_set_map(*new_map);
-
- object->SetProperties(*fields);
- DCHECK(object->IsJSObject());
-
- // Check that it really works.
- DCHECK(object->HasFastProperties());
-}
-
-void JSObject::RequireSlowElements(NumberDictionary dictionary) {
- if (dictionary->requires_slow_elements()) return;
- dictionary->set_requires_slow_elements();
- if (map()->is_prototype_map()) {
- // If this object is a prototype (the callee will check), invalidate any
- // prototype chains involving it.
- InvalidatePrototypeChains(map());
- }
-}
-
-Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
- DCHECK(!object->HasFixedTypedArrayElements());
- Isolate* isolate = object->GetIsolate();
- bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
- {
- DisallowHeapAllocation no_gc;
- FixedArrayBase elements = object->elements();
-
- if (is_sloppy_arguments) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
- }
-
- if (elements->IsNumberDictionary()) {
- return handle(NumberDictionary::cast(elements), isolate);
- }
- }
-
- DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
- object->HasFastArgumentsElements() ||
- object->HasFastStringWrapperElements());
-
- Handle<NumberDictionary> dictionary =
- object->GetElementsAccessor()->Normalize(object);
-
- // Switch to using the dictionary as the backing storage for elements.
- ElementsKind target_kind = is_sloppy_arguments
- ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS
- : object->HasFastStringWrapperElements()
- ? SLOW_STRING_WRAPPER_ELEMENTS
- : DICTIONARY_ELEMENTS;
- Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
- // Set the new map first to satify the elements type assert in set_elements().
- JSObject::MigrateToMap(object, new_map);
-
- if (is_sloppy_arguments) {
- SloppyArgumentsElements::cast(object->elements())
- ->set_arguments(*dictionary);
- } else {
- object->set_elements(*dictionary);
- }
-
- isolate->counters()->elements_to_dictionary()->Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- StdoutStream os;
- os << "Object elements have been normalized:\n";
- object->Print(os);
- }
-#endif
-
- DCHECK(object->HasDictionaryElements() ||
- object->HasSlowArgumentsElements() ||
- object->HasSlowStringWrapperElements());
- return dictionary;
-}
-
-namespace {
-
-Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
- DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- DCHECK(PropertyArray::HashField::is_valid(hash));
-
- ReadOnlyRoots roots = properties->GetReadOnlyRoots();
- if (properties == roots.empty_fixed_array() ||
- properties == roots.empty_property_array() ||
- properties == roots.empty_property_dictionary()) {
- return Smi::FromInt(hash);
- }
-
- if (properties->IsPropertyArray()) {
- PropertyArray::cast(properties)->SetHash(hash);
- DCHECK_LT(0, PropertyArray::cast(properties)->length());
- return properties;
- }
-
- if (properties->IsGlobalDictionary()) {
- GlobalDictionary::cast(properties)->SetHash(hash);
- return properties;
- }
-
- DCHECK(properties->IsNameDictionary());
- NameDictionary::cast(properties)->SetHash(hash);
- return properties;
-}
-
-int GetIdentityHashHelper(JSReceiver object) {
- DisallowHeapAllocation no_gc;
- Object properties = object->raw_properties_or_hash();
- if (properties->IsSmi()) {
- return Smi::ToInt(properties);
- }
-
- if (properties->IsPropertyArray()) {
- return PropertyArray::cast(properties)->Hash();
- }
-
- if (properties->IsNameDictionary()) {
- return NameDictionary::cast(properties)->Hash();
- }
-
- if (properties->IsGlobalDictionary()) {
- return GlobalDictionary::cast(properties)->Hash();
- }
-
-#ifdef DEBUG
- ReadOnlyRoots roots = object->GetReadOnlyRoots();
- DCHECK(properties == roots.empty_fixed_array() ||
- properties == roots.empty_property_dictionary());
-#endif
-
- return PropertyArray::kNoHashSentinel;
-}
-} // namespace
-
-void JSReceiver::SetIdentityHash(int hash) {
- DisallowHeapAllocation no_gc;
- DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- DCHECK(PropertyArray::HashField::is_valid(hash));
-
- HeapObject existing_properties = HeapObject::cast(raw_properties_or_hash());
- Object new_properties = SetHashAndUpdateProperties(existing_properties, hash);
- set_raw_properties_or_hash(new_properties);
-}
-
-void JSReceiver::SetProperties(HeapObject properties) {
- DCHECK_IMPLIES(properties->IsPropertyArray() &&
- PropertyArray::cast(properties)->length() == 0,
- properties == GetReadOnlyRoots().empty_property_array());
- DisallowHeapAllocation no_gc;
- int hash = GetIdentityHashHelper(*this);
- Object new_properties = properties;
-
- // TODO(cbruni): Make GetIdentityHashHelper return a bool so that we
- // don't have to manually compare against kNoHashSentinel.
- if (hash != PropertyArray::kNoHashSentinel) {
- new_properties = SetHashAndUpdateProperties(properties, hash);
- }
-
- set_raw_properties_or_hash(new_properties);
-}
-
-Object JSReceiver::GetIdentityHash() {
- DisallowHeapAllocation no_gc;
-
- int hash = GetIdentityHashHelper(*this);
- if (hash == PropertyArray::kNoHashSentinel) {
- return GetReadOnlyRoots().undefined_value();
- }
-
- return Smi::FromInt(hash);
-}
-
-// static
-Smi JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver key) {
- DisallowHeapAllocation no_gc;
- int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
- DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
-
- key->SetIdentityHash(hash);
- return Smi::FromInt(hash);
-}
-
-Smi JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
-
- int hash = GetIdentityHashHelper(*this);
- if (hash != PropertyArray::kNoHashSentinel) {
- return Smi::FromInt(hash);
- }
-
- return JSReceiver::CreateIdentityHash(isolate, *this);
-}
-
-Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
- ShouldThrow should_throw) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
-
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, should_throw);
- Handle<Object> result;
- if (it->IsElement()) {
- result = args.CallIndexedDeleter(interceptor, it->index());
- } else {
- result = args.CallNamedDeleter(interceptor, it->name());
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- if (result.is_null()) return Nothing<bool>();
-
- DCHECK(result->IsBoolean());
- // Rebox CustomArguments::kReturnValueOffset before returning.
- return Just(result->IsTrue(isolate));
-}
-
-void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
- int entry) {
- DCHECK(!object->HasFastProperties());
- Isolate* isolate = object->GetIsolate();
-
- if (object->IsJSGlobalObject()) {
- // If we have a global object, invalidate the cell and swap in a new one.
- Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
- DCHECK_NE(GlobalDictionary::kNotFound, entry);
-
- auto cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
- cell->set_value(ReadOnlyRoots(isolate).the_hole_value());
- cell->set_property_details(
- PropertyDetails::Empty(PropertyCellType::kUninitialized));
- } else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
- DCHECK_NE(NameDictionary::kNotFound, entry);
-
- dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
- object->SetProperties(*dictionary);
- }
- if (object->map()->is_prototype_map()) {
- // Invalidate prototype validity cell as this may invalidate transitioning
- // store IC handlers.
- JSObject::InvalidatePrototypeChains(object->map());
- }
-}
-
-
-Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
- LanguageMode language_mode) {
- it->UpdateProtector();
-
- Isolate* isolate = it->isolate();
-
- if (it->state() == LookupIterator::JSPROXY) {
- return JSProxy::DeletePropertyOrElement(it->GetHolder<JSProxy>(),
- it->GetName(), language_mode);
- }
-
- if (it->GetReceiver()->IsJSProxy()) {
- if (it->state() != LookupIterator::NOT_FOUND) {
- DCHECK_EQ(LookupIterator::DATA, it->state());
- DCHECK(it->name()->IsPrivate());
- it->Delete();
- }
- return Just(true);
- }
- Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
-
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::JSPROXY:
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess()) break;
- isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- return Just(false);
- case LookupIterator::INTERCEPTOR: {
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
- Maybe<bool> result =
- JSObject::DeletePropertyWithInterceptor(it, should_throw);
- // An exception was thrown in the interceptor. Propagate.
- if (isolate->has_pending_exception()) return Nothing<bool>();
- // Delete with interceptor succeeded. Return result.
- // TODO(neis): In strict mode, we should probably throw if the
- // interceptor returns false.
- if (result.IsJust()) return result;
- break;
- }
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return Just(true);
- case LookupIterator::DATA:
- case LookupIterator::ACCESSOR: {
- if (!it->IsConfigurable()) {
- // Fail if the property is not configurable.
- if (is_strict(language_mode)) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kStrictDeleteProperty, it->GetName(),
- receiver));
- return Nothing<bool>();
- }
- return Just(false);
- }
-
- it->Delete();
-
- return Just(true);
- }
- }
- }
-
- return Just(true);
-}
-
-
-Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
- LanguageMode language_mode) {
- LookupIterator it(object->GetIsolate(), object, index, object,
- LookupIterator::OWN);
- return DeleteProperty(&it, language_mode);
-}
-
-
-Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- LanguageMode language_mode) {
- LookupIterator it(object, name, object, LookupIterator::OWN);
- return DeleteProperty(&it, language_mode);
-}
-
-
-Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
- Handle<Name> name,
- LanguageMode language_mode) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, object, LookupIterator::OWN);
- return DeleteProperty(&it, language_mode);
-}
-
-// ES6 19.1.2.4
-// static
-Object JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
- Handle<Object> key,
- Handle<Object> attributes) {
- // 1. If Type(O) is not Object, throw a TypeError exception.
- if (!object->IsJSReceiver()) {
- Handle<String> fun_name =
- isolate->factory()->InternalizeUtf8String("Object.defineProperty");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
- }
- // 2. Let key be ToPropertyKey(P).
- // 3. ReturnIfAbrupt(key).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
- Object::ToPropertyKey(isolate, key));
- // 4. Let desc be ToPropertyDescriptor(Attributes).
- // 5. ReturnIfAbrupt(desc).
- PropertyDescriptor desc;
- if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
- return ReadOnlyRoots(isolate).exception();
- }
- // 6. Let success be DefinePropertyOrThrow(O,key, desc).
- Maybe<bool> success = DefineOwnProperty(
- isolate, Handle<JSReceiver>::cast(object), key, &desc, kThrowOnError);
- // 7. ReturnIfAbrupt(success).
- MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
- CHECK(success.FromJust());
- // 8. Return O.
- return *object;
-}
-
-// ES6 19.1.2.3.1
-// static
-MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> properties) {
- // 1. If Type(O) is not Object, throw a TypeError exception.
- if (!object->IsJSReceiver()) {
- Handle<String> fun_name =
- isolate->factory()->InternalizeUtf8String("Object.defineProperties");
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name),
- Object);
- }
- // 2. Let props be ToObject(Properties).
- // 3. ReturnIfAbrupt(props).
- Handle<JSReceiver> props;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, props,
- Object::ToObject(isolate, properties), Object);
-
- // 4. Let keys be props.[[OwnPropertyKeys]]().
- // 5. ReturnIfAbrupt(keys).
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys, KeyAccumulator::GetKeys(props, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES),
- Object);
- // 6. Let descriptors be an empty List.
- int capacity = keys->length();
- std::vector<PropertyDescriptor> descriptors(capacity);
- size_t descriptors_index = 0;
- // 7. Repeat for each element nextKey of keys in List order,
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Object> next_key(keys->get(i), isolate);
- // 7a. Let propDesc be props.[[GetOwnProperty]](nextKey).
- // 7b. ReturnIfAbrupt(propDesc).
- bool success = false;
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, props, next_key, &success, LookupIterator::OWN);
- DCHECK(success);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return MaybeHandle<Object>();
- PropertyAttributes attrs = maybe.FromJust();
- // 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
- if (attrs == ABSENT) continue;
- if (attrs & DONT_ENUM) continue;
- // 7c i. Let descObj be Get(props, nextKey).
- // 7c ii. ReturnIfAbrupt(descObj).
- Handle<Object> desc_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, desc_obj, Object::GetProperty(&it),
- Object);
- // 7c iii. Let desc be ToPropertyDescriptor(descObj).
- success = PropertyDescriptor::ToPropertyDescriptor(
- isolate, desc_obj, &descriptors[descriptors_index]);
- // 7c iv. ReturnIfAbrupt(desc).
- if (!success) return MaybeHandle<Object>();
- // 7c v. Append the pair (a two element List) consisting of nextKey and
- // desc to the end of descriptors.
- descriptors[descriptors_index].set_name(next_key);
- descriptors_index++;
- }
- // 8. For each pair from descriptors in list order,
- for (size_t i = 0; i < descriptors_index; ++i) {
- PropertyDescriptor* desc = &descriptors[i];
- // 8a. Let P be the first element of pair.
- // 8b. Let desc be the second element of pair.
- // 8c. Let status be DefinePropertyOrThrow(O, P, desc).
- Maybe<bool> status =
- DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object),
- desc->name(), desc, kThrowOnError);
- // 8d. ReturnIfAbrupt(status).
- if (status.IsNothing()) return MaybeHandle<Object>();
- CHECK(status.FromJust());
- }
- // 9. Return o.
- return object;
-}
-
-// static
-Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
- if (object->IsJSArray()) {
- return JSArray::DefineOwnProperty(isolate, Handle<JSArray>::cast(object),
- key, desc, should_throw);
- }
- if (object->IsJSProxy()) {
- return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
- key, desc, should_throw);
- }
- if (object->IsJSTypedArray()) {
- return JSTypedArray::DefineOwnProperty(
- isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
- }
-
- // OrdinaryDefineOwnProperty, by virtue of calling
- // DefineOwnPropertyIgnoreAttributes, can handle arguments
- // (ES#sec-arguments-exotic-objects-defineownproperty-p-desc).
- return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
- desc, should_throw);
-}
-
-
-// static
-Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
- bool success = false;
- DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, key, &success, LookupIterator::OWN);
- DCHECK(success); // ...so creating a LookupIterator can't fail.
-
- // Deal with access checks first.
- if (it.state() == LookupIterator::ACCESS_CHECK) {
- if (!it.HasAccess()) {
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- return Just(true);
- }
- it.Next();
- }
-
- return OrdinaryDefineOwnProperty(&it, desc, should_throw);
-}
-
-
-// ES6 9.1.6.1
-// static
-Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
- Isolate* isolate = it->isolate();
- // 1. Let current be O.[[GetOwnProperty]](P).
- // 2. ReturnIfAbrupt(current).
- PropertyDescriptor current;
- MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
-
- it->Restart();
- // Handle interceptor
- for (; it->IsFound(); it->Next()) {
- if (it->state() == LookupIterator::INTERCEPTOR) {
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- Maybe<bool> result = DefinePropertyWithInterceptorInternal(
- it, it->GetInterceptor(), should_throw, *desc);
- if (result.IsNothing() || result.FromJust()) {
- return result;
- }
- }
- }
- }
-
- // TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
- // the iterator every time. Currently, the reasons why we need it are:
- // - handle interceptors correctly
- // - handle accessors correctly (which might change the holder's map)
- it->Restart();
- // 3. Let extensible be the value of the [[Extensible]] internal slot of O.
- Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
- bool extensible = JSObject::IsExtensible(object);
-
- return ValidateAndApplyPropertyDescriptor(
- isolate, it, extensible, desc, &current, should_throw, Handle<Name>());
-}
-
-
-// ES6 9.1.6.2
-// static
-Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
- Isolate* isolate, bool extensible, PropertyDescriptor* desc,
- PropertyDescriptor* current, Handle<Name> property_name,
- ShouldThrow should_throw) {
- // 1. Return ValidateAndApplyPropertyDescriptor(undefined, undefined,
- // Extensible, Desc, Current).
- return ValidateAndApplyPropertyDescriptor(
- isolate, nullptr, extensible, desc, current, should_throw, property_name);
-}
-
-
-// ES6 9.1.6.3
-// static
-Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
- Isolate* isolate, LookupIterator* it, bool extensible,
- PropertyDescriptor* desc, PropertyDescriptor* current,
- ShouldThrow should_throw, Handle<Name> property_name) {
- // We either need a LookupIterator, or a property name.
- DCHECK((it == nullptr) != property_name.is_null());
- Handle<JSObject> object;
- if (it != nullptr) object = Handle<JSObject>::cast(it->GetReceiver());
- bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
- bool desc_is_accessor_descriptor =
- PropertyDescriptor::IsAccessorDescriptor(desc);
- bool desc_is_generic_descriptor =
- PropertyDescriptor::IsGenericDescriptor(desc);
- // 1. (Assert)
- // 2. If current is undefined, then
- if (current->is_empty()) {
- // 2a. If extensible is false, return false.
- if (!extensible) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kDefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- // 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
- // (This is equivalent to !IsAccessorDescriptor(desc).)
- DCHECK((desc_is_generic_descriptor || desc_is_data_descriptor) ==
- !desc_is_accessor_descriptor);
- if (!desc_is_accessor_descriptor) {
- // 2c i. If O is not undefined, create an own data property named P of
- // object O whose [[Value]], [[Writable]], [[Enumerable]] and
- // [[Configurable]] attribute values are described by Desc. If the value
- // of an attribute field of Desc is absent, the attribute of the newly
- // created property is set to its default value.
- if (it != nullptr) {
- if (!desc->has_writable()) desc->set_writable(false);
- if (!desc->has_enumerable()) desc->set_enumerable(false);
- if (!desc->has_configurable()) desc->set_configurable(false);
- Handle<Object> value(
- desc->has_value()
- ? desc->value()
- : Handle<Object>::cast(isolate->factory()->undefined_value()));
- MaybeHandle<Object> result =
- JSObject::DefineOwnPropertyIgnoreAttributes(it, value,
- desc->ToAttributes());
- if (result.is_null()) return Nothing<bool>();
- }
- } else {
- // 2d. Else Desc must be an accessor Property Descriptor,
- DCHECK(desc_is_accessor_descriptor);
- // 2d i. If O is not undefined, create an own accessor property named P
- // of object O whose [[Get]], [[Set]], [[Enumerable]] and
- // [[Configurable]] attribute values are described by Desc. If the value
- // of an attribute field of Desc is absent, the attribute of the newly
- // created property is set to its default value.
- if (it != nullptr) {
- if (!desc->has_enumerable()) desc->set_enumerable(false);
- if (!desc->has_configurable()) desc->set_configurable(false);
- Handle<Object> getter(
- desc->has_get()
- ? desc->get()
- : Handle<Object>::cast(isolate->factory()->null_value()));
- Handle<Object> setter(
- desc->has_set()
- ? desc->set()
- : Handle<Object>::cast(isolate->factory()->null_value()));
- MaybeHandle<Object> result =
- JSObject::DefineAccessor(it, getter, setter, desc->ToAttributes());
- if (result.is_null()) return Nothing<bool>();
- }
- }
- // 2e. Return true.
- return Just(true);
- }
- // 3. Return true, if every field in Desc is absent.
- // 4. Return true, if every field in Desc also occurs in current and the
- // value of every field in Desc is the same value as the corresponding field
- // in current when compared using the SameValue algorithm.
- if ((!desc->has_enumerable() ||
- desc->enumerable() == current->enumerable()) &&
- (!desc->has_configurable() ||
- desc->configurable() == current->configurable()) &&
- (!desc->has_value() ||
- (current->has_value() && current->value()->SameValue(*desc->value()))) &&
- (!desc->has_writable() ||
- (current->has_writable() && current->writable() == desc->writable())) &&
- (!desc->has_get() ||
- (current->has_get() && current->get()->SameValue(*desc->get()))) &&
- (!desc->has_set() ||
- (current->has_set() && current->set()->SameValue(*desc->set())))) {
- return Just(true);
- }
- // 5. If the [[Configurable]] field of current is false, then
- if (!current->configurable()) {
- // 5a. Return false, if the [[Configurable]] field of Desc is true.
- if (desc->has_configurable() && desc->configurable()) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- // 5b. Return false, if the [[Enumerable]] field of Desc is present and the
- // [[Enumerable]] fields of current and Desc are the Boolean negation of
- // each other.
- if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- }
-
- bool current_is_data_descriptor =
- PropertyDescriptor::IsDataDescriptor(current);
- // 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
- if (desc_is_generic_descriptor) {
- // Nothing to see here.
-
- // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) have
- // different results, then:
- } else if (current_is_data_descriptor != desc_is_data_descriptor) {
- // 7a. Return false, if the [[Configurable]] field of current is false.
- if (!current->configurable()) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- // 7b. If IsDataDescriptor(current) is true, then:
- if (current_is_data_descriptor) {
- // 7b i. If O is not undefined, convert the property named P of object O
- // from a data property to an accessor property. Preserve the existing
- // values of the converted property's [[Configurable]] and [[Enumerable]]
- // attributes and set the rest of the property's attributes to their
- // default values.
- // --> Folded into step 10.
- } else {
- // 7c i. If O is not undefined, convert the property named P of object O
- // from an accessor property to a data property. Preserve the existing
- // values of the converted propertyā€™s [[Configurable]] and [[Enumerable]]
- // attributes and set the rest of the propertyā€™s attributes to their
- // default values.
- // --> Folded into step 10.
- }
-
- // 8. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
- // true, then:
- } else if (current_is_data_descriptor && desc_is_data_descriptor) {
- // 8a. If the [[Configurable]] field of current is false, then:
- if (!current->configurable()) {
- // 8a i. Return false, if the [[Writable]] field of current is false and
- // the [[Writable]] field of Desc is true.
- if (!current->writable() && desc->has_writable() && desc->writable()) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- // 8a ii. If the [[Writable]] field of current is false, then:
- if (!current->writable()) {
- // 8a ii 1. Return false, if the [[Value]] field of Desc is present and
- // SameValue(Desc.[[Value]], current.[[Value]]) is false.
- if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- }
- }
- } else {
- // 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
- // are both true,
- DCHECK(PropertyDescriptor::IsAccessorDescriptor(current) &&
- desc_is_accessor_descriptor);
- // 9a. If the [[Configurable]] field of current is false, then:
- if (!current->configurable()) {
- // 9a i. Return false, if the [[Set]] field of Desc is present and
- // SameValue(Desc.[[Set]], current.[[Set]]) is false.
- if (desc->has_set() && !desc->set()->SameValue(*current->set())) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- // 9a ii. Return false, if the [[Get]] field of Desc is present and
- // SameValue(Desc.[[Get]], current.[[Get]]) is false.
- if (desc->has_get() && !desc->get()->SameValue(*current->get())) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
- }
- }
-
- // 10. If O is not undefined, then:
- if (it != nullptr) {
- // 10a. For each field of Desc that is present, set the corresponding
- // attribute of the property named P of object O to the value of the field.
- PropertyAttributes attrs = NONE;
-
- if (desc->has_enumerable()) {
- attrs = static_cast<PropertyAttributes>(
- attrs | (desc->enumerable() ? NONE : DONT_ENUM));
- } else {
- attrs = static_cast<PropertyAttributes>(
- attrs | (current->enumerable() ? NONE : DONT_ENUM));
- }
- if (desc->has_configurable()) {
- attrs = static_cast<PropertyAttributes>(
- attrs | (desc->configurable() ? NONE : DONT_DELETE));
- } else {
- attrs = static_cast<PropertyAttributes>(
- attrs | (current->configurable() ? NONE : DONT_DELETE));
- }
- if (desc_is_data_descriptor ||
- (desc_is_generic_descriptor && current_is_data_descriptor)) {
- if (desc->has_writable()) {
- attrs = static_cast<PropertyAttributes>(
- attrs | (desc->writable() ? NONE : READ_ONLY));
- } else {
- attrs = static_cast<PropertyAttributes>(
- attrs | (current->writable() ? NONE : READ_ONLY));
- }
- Handle<Object> value(
- desc->has_value() ? desc->value()
- : current->has_value()
- ? current->value()
- : Handle<Object>::cast(
- isolate->factory()->undefined_value()));
- return JSObject::DefineOwnPropertyIgnoreAttributes(it, value, attrs,
- should_throw);
- } else {
- DCHECK(desc_is_accessor_descriptor ||
- (desc_is_generic_descriptor &&
- PropertyDescriptor::IsAccessorDescriptor(current)));
- Handle<Object> getter(
- desc->has_get()
- ? desc->get()
- : current->has_get()
- ? current->get()
- : Handle<Object>::cast(isolate->factory()->null_value()));
- Handle<Object> setter(
- desc->has_set()
- ? desc->set()
- : current->has_set()
- ? current->set()
- : Handle<Object>::cast(isolate->factory()->null_value()));
- MaybeHandle<Object> result =
- JSObject::DefineAccessor(it, getter, setter, attrs);
- if (result.is_null()) return Nothing<bool>();
- }
- }
-
- // 11. Return true.
- return Just(true);
-}
-
-// static
-Maybe<bool> JSReceiver::CreateDataProperty(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- ShouldThrow should_throw) {
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, key,
- LookupIterator::OWN);
- return CreateDataProperty(&it, value, should_throw);
-}
-
-// static
-Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
- Handle<Object> value,
- ShouldThrow should_throw) {
- DCHECK(!it->check_prototype_chain());
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- Isolate* isolate = receiver->GetIsolate();
-
- if (receiver->IsJSObject()) {
- return JSObject::CreateDataProperty(it, value, should_throw); // Shortcut.
- }
-
- PropertyDescriptor new_desc;
- new_desc.set_value(value);
- new_desc.set_writable(true);
- new_desc.set_enumerable(true);
- new_desc.set_configurable(true);
-
- return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
- &new_desc, should_throw);
-}
-
-Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
- Handle<Object> value,
- ShouldThrow should_throw) {
- DCHECK(it->GetReceiver()->IsJSObject());
- MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- Isolate* isolate = receiver->GetIsolate();
-
- if (it->IsFound()) {
- Maybe<PropertyAttributes> attributes = GetPropertyAttributes(it);
- MAYBE_RETURN(attributes, Nothing<bool>());
- if ((attributes.FromJust() & DONT_DELETE) != 0) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- } else {
- if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver()))) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kDefineDisallowed, it->GetName()));
- }
- }
-
- RETURN_ON_EXCEPTION_VALUE(it->isolate(),
- DefineOwnPropertyIgnoreAttributes(it, value, NONE),
- Nothing<bool>());
-
- return Just(true);
-}
-
// TODO(jkummerow): Consider unification with FastAsArrayLength() in
// accessors.cc.
bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
@@ -7717,13 +3161,12 @@ bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
}
-
// ES6 9.4.2.1
// static
Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
Handle<Object> name,
PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
// 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
// 2. If P is "length", then:
// TODO(jkummerow): Check if we need slow string comparison.
@@ -7751,7 +3194,7 @@ Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
// return false.
if (index >= old_len && old_len_desc.has_writable() &&
!old_len_desc.writable()) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kDefineDisallowed, name));
}
// 3g. Let succeeded be OrdinaryDefineOwnProperty(A, P, Desc).
@@ -7782,7 +3225,6 @@ Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
return OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
}
-
// Part of ES6 9.4.2.4 ArraySetLength.
// static
bool JSArray::AnythingToArrayLength(Isolate* isolate,
@@ -7819,12 +3261,11 @@ bool JSArray::AnythingToArrayLength(Isolate* isolate,
return true;
}
-
// ES6 9.4.2.4
// static
Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
// 1. If the [[Value]] field of Desc is absent, then
if (!desc->has_value()) {
// 1a. Return OrdinaryDefineOwnProperty(A, "length", Desc).
@@ -7863,7 +3304,7 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
}
// 13. If oldLenDesc.[[Writable]] is false, return false.
if (!old_len_desc.writable()) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
isolate->factory()->length_string()));
}
@@ -7898,7 +3339,7 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
bool result = actual_new_len == new_len;
if (!result) {
RETURN_FAILURE(
- isolate, should_throw,
+ isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kStrictDeleteProperty,
isolate->factory()->NewNumberFromUint(actual_new_len - 1),
a));
@@ -7906,13 +3347,12 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
return Just(result);
}
-
// ES6 9.5.6
// static
Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Object> key,
PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
STACK_CHECK(isolate, Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
DCHECK(!Handle<Symbol>::cast(key)->IsPrivateName());
@@ -7963,7 +3403,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Nothing<bool>());
// 10. If booleanTrapResult is false, return false.
if (!trap_result_obj->BooleanValue(isolate)) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
trap_name, property_name));
}
@@ -7999,9 +3439,9 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
// 16. Else targetDesc is not undefined,
// 16a. If IsCompatiblePropertyDescriptor(extensibleTarget, Desc,
// targetDesc) is false, throw a TypeError exception.
- Maybe<bool> valid =
- IsCompatiblePropertyDescriptor(isolate, extensible_target, desc,
- &target_desc, property_name, kDontThrow);
+ Maybe<bool> valid = IsCompatiblePropertyDescriptor(
+ isolate, extensible_target, desc, &target_desc, property_name,
+ Just(kDontThrow));
MAYBE_RETURN(valid, Nothing<bool>());
if (!valid.FromJust()) {
isolate->Throw(*isolate->factory()->NewTypeError(
@@ -8024,12 +3464,12 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
DCHECK(!private_name->IsPrivateName());
// Despite the generic name, this can only add private data properties.
if (!PropertyDescriptor::IsDataDescriptor(desc) ||
desc->ToAttributes() != DONT_ENUM) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kProxyPrivate));
}
DCHECK(proxy->map()->is_dictionary_map());
@@ -8055,140 +3495,6 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
-// static
-Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key,
- PropertyDescriptor* desc) {
- bool success = false;
- DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, key, &success, LookupIterator::OWN);
- DCHECK(success); // ...so creating a LookupIterator can't fail.
- return GetOwnPropertyDescriptor(&it, desc);
-}
-
-namespace {
-
-Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
- PropertyDescriptor* desc) {
- if (it->state() == LookupIterator::ACCESS_CHECK) {
- if (it->HasAccess()) {
- it->Next();
- } else if (!JSObject::AllCanRead(it) ||
- it->state() != LookupIterator::INTERCEPTOR) {
- it->Restart();
- return Just(false);
- }
- }
-
- if (it->state() != LookupIterator::INTERCEPTOR) return Just(false);
-
- Isolate* isolate = it->isolate();
- Handle<InterceptorInfo> interceptor = it->GetInterceptor();
- if (interceptor->descriptor()->IsUndefined(isolate)) return Just(false);
-
- Handle<Object> result;
- Handle<JSObject> holder = it->GetHolder<JSObject>();
-
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
-
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, kDontThrow);
- if (it->IsElement()) {
- result = args.CallIndexedDescriptor(interceptor, it->index());
- } else {
- result = args.CallNamedDescriptor(interceptor, it->name());
- }
- if (!result.is_null()) {
- // Request successfully intercepted, try to set the property
- // descriptor.
- Utils::ApiCheck(
- PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
- it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
- : "v8::NamedPropertyDescriptorCallback",
- "Invalid property descriptor.");
-
- return Just(true);
- }
-
- it->Next();
- return Just(false);
-}
-} // namespace
-
-// ES6 9.1.5.1
-// Returns true on success, false if the property didn't exist, nothing if
-// an exception was thrown.
-// static
-Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
- PropertyDescriptor* desc) {
- Isolate* isolate = it->isolate();
- // "Virtual" dispatch.
- if (it->IsFound() && it->GetHolder<JSReceiver>()->IsJSProxy()) {
- return JSProxy::GetOwnPropertyDescriptor(isolate, it->GetHolder<JSProxy>(),
- it->GetName(), desc);
- }
-
- Maybe<bool> intercepted = GetPropertyDescriptorWithInterceptor(it, desc);
- MAYBE_RETURN(intercepted, Nothing<bool>());
- if (intercepted.FromJust()) {
- return Just(true);
- }
-
- // Request was not intercepted, continue as normal.
- // 1. (Assert)
- // 2. If O does not have an own property with key P, return undefined.
- Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
- MAYBE_RETURN(maybe, Nothing<bool>());
- PropertyAttributes attrs = maybe.FromJust();
- if (attrs == ABSENT) return Just(false);
- DCHECK(!isolate->has_pending_exception());
-
- // 3. Let D be a newly created Property Descriptor with no fields.
- DCHECK(desc->is_empty());
- // 4. Let X be O's own property whose key is P.
- // 5. If X is a data property, then
- bool is_accessor_pair = it->state() == LookupIterator::ACCESSOR &&
- it->GetAccessors()->IsAccessorPair();
- if (!is_accessor_pair) {
- // 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
- Handle<Object> value;
- if (!Object::GetProperty(it).ToHandle(&value)) {
- DCHECK(isolate->has_pending_exception());
- return Nothing<bool>();
- }
- desc->set_value(value);
- // 5b. Set D.[[Writable]] to the value of X's [[Writable]] attribute
- desc->set_writable((attrs & READ_ONLY) == 0);
- } else {
- // 6. Else X is an accessor property, so
- Handle<AccessorPair> accessors =
- Handle<AccessorPair>::cast(it->GetAccessors());
- // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
- desc->set_get(
- AccessorPair::GetComponent(isolate, accessors, ACCESSOR_GETTER));
- // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
- desc->set_set(
- AccessorPair::GetComponent(isolate, accessors, ACCESSOR_SETTER));
- }
-
- // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
- desc->set_enumerable((attrs & DONT_ENUM) == 0);
- // 8. Set D.[[Configurable]] to the value of X's [[Configurable]] attribute.
- desc->set_configurable((attrs & DONT_DELETE) == 0);
- // 9. Return D.
- DCHECK(PropertyDescriptor::IsAccessorDescriptor(desc) !=
- PropertyDescriptor::IsDataDescriptor(desc));
- return Just(true);
-}
-
-
// ES6 9.5.5
// static
Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
@@ -8280,9 +3586,9 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
PropertyDescriptor::CompletePropertyDescriptor(isolate, desc);
// 15. Let valid be IsCompatiblePropertyDescriptor (extensibleTarget,
// resultDesc, targetDesc).
- Maybe<bool> valid =
- IsCompatiblePropertyDescriptor(isolate, extensible_target.FromJust(),
- desc, &target_desc, name, kDontThrow);
+ Maybe<bool> valid = IsCompatiblePropertyDescriptor(
+ isolate, extensible_target.FromJust(), desc, &target_desc, name,
+ Just(kDontThrow));
MAYBE_RETURN(valid, Nothing<bool>());
// 16. If valid is false, throw a TypeError exception.
if (!valid.FromJust()) {
@@ -8305,220 +3611,6 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
return Just(true);
}
-
-Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
- IntegrityLevel level,
- ShouldThrow should_throw) {
- DCHECK(level == SEALED || level == FROZEN);
-
- if (receiver->IsJSObject()) {
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
-
- if (!object->HasSloppyArgumentsElements() &&
- !object->IsJSModuleNamespace()) { // Fast path.
- // Prevent memory leaks by not adding unnecessary transitions.
- Maybe<bool> test = JSObject::TestIntegrityLevel(object, level);
- MAYBE_RETURN(test, Nothing<bool>());
- if (test.FromJust()) return test;
-
- if (level == SEALED) {
- return JSObject::PreventExtensionsWithTransition<SEALED>(object,
- should_throw);
- } else {
- return JSObject::PreventExtensionsWithTransition<FROZEN>(object,
- should_throw);
- }
- }
- }
-
- Isolate* isolate = receiver->GetIsolate();
-
- MAYBE_RETURN(JSReceiver::PreventExtensions(receiver, should_throw),
- Nothing<bool>());
-
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
-
- PropertyDescriptor no_conf;
- no_conf.set_configurable(false);
-
- PropertyDescriptor no_conf_no_write;
- no_conf_no_write.set_configurable(false);
- no_conf_no_write.set_writable(false);
-
- if (level == SEALED) {
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Object> key(keys->get(i), isolate);
- MAYBE_RETURN(
- DefineOwnProperty(isolate, receiver, key, &no_conf, kThrowOnError),
- Nothing<bool>());
- }
- return Just(true);
- }
-
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Object> key(keys->get(i), isolate);
- PropertyDescriptor current_desc;
- Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
- isolate, receiver, key, &current_desc);
- MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust()) {
- PropertyDescriptor desc =
- PropertyDescriptor::IsAccessorDescriptor(&current_desc)
- ? no_conf
- : no_conf_no_write;
- MAYBE_RETURN(
- DefineOwnProperty(isolate, receiver, key, &desc, kThrowOnError),
- Nothing<bool>());
- }
- }
- return Just(true);
-}
-
-namespace {
-
-template <typename Dictionary>
-bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
- ReadOnlyRoots roots,
- PropertyAttributes level) {
- DCHECK(level == SEALED || level == FROZEN);
-
- uint32_t capacity = dict->Capacity();
- for (uint32_t i = 0; i < capacity; i++) {
- Object key;
- if (!dict->ToKey(roots, i, &key)) continue;
- if (key->FilterKey(ALL_PROPERTIES)) continue;
- PropertyDetails details = dict->DetailsAt(i);
- if (details.IsConfigurable()) return false;
- if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
- return false;
- }
- }
- return true;
-}
-
-bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
- DCHECK(level == SEALED || level == FROZEN);
- DCHECK(!map->IsCustomElementsReceiverMap());
- DCHECK(!map->is_dictionary_map());
-
- DescriptorArray descriptors = map->instance_descriptors();
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descriptors->GetKey(i)->IsPrivate()) continue;
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.IsConfigurable()) return false;
- if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
- return false;
- }
- }
- return true;
-}
-
-bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
-
- if (object->HasFastProperties()) {
- return TestFastPropertiesIntegrityLevel(object->map(), level);
- }
-
- return TestDictionaryPropertiesIntegrityLevel(
- object->property_dictionary(), object->GetReadOnlyRoots(), level);
-}
-
-bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->HasSloppyArgumentsElements());
-
- ElementsKind kind = object->GetElementsKind();
-
- if (IsDictionaryElementsKind(kind)) {
- return TestDictionaryPropertiesIntegrityLevel(
- NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
- level);
- }
- if (IsFixedTypedArrayElementsKind(kind)) {
- return TestPropertiesIntegrityLevel(object, level);
- }
-
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- // Only DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS have
- // PropertyAttributes so just test if empty
- return accessor->NumberOfElements(object) == 0;
-}
-
-bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
-
- return !object->map()->is_extensible() &&
- TestElementsIntegrityLevel(object, level) &&
- TestPropertiesIntegrityLevel(object, level);
-}
-
-Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
- PropertyAttributes level) {
- DCHECK(level == SEALED || level == FROZEN);
-
- Maybe<bool> extensible = JSReceiver::IsExtensible(receiver);
- MAYBE_RETURN(extensible, Nothing<bool>());
- if (extensible.FromJust()) return Just(false);
-
- Isolate* isolate = receiver->GetIsolate();
-
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
-
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Object> key(keys->get(i), isolate);
- PropertyDescriptor current_desc;
- Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
- isolate, receiver, key, &current_desc);
- MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust()) {
- if (current_desc.configurable()) return Just(false);
- if (level == FROZEN &&
- PropertyDescriptor::IsDataDescriptor(&current_desc) &&
- current_desc.writable()) {
- return Just(false);
- }
- }
- }
- return Just(true);
-}
-
-} // namespace
-
-Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
- IntegrityLevel level) {
- if (!receiver->map()->IsCustomElementsReceiverMap()) {
- return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
- level);
- }
- return GenericTestIntegrityLevel(receiver, level);
-}
-
-Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
- IntegrityLevel level) {
- if (!object->map()->IsCustomElementsReceiverMap() &&
- !object->HasSloppyArgumentsElements()) {
- return Just(FastTestIntegrityLevel(*object, level));
- }
- return GenericTestIntegrityLevel(Handle<JSReceiver>::cast(object), level);
-}
-
-Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
- ShouldThrow should_throw) {
- if (object->IsJSProxy()) {
- return JSProxy::PreventExtensions(Handle<JSProxy>::cast(object),
- should_throw);
- }
- DCHECK(object->IsJSObject());
- return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
- should_throw);
-}
-
-
Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
ShouldThrow should_throw) {
Isolate* isolate = proxy->GetIsolate();
@@ -8564,71 +3656,6 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
return Just(true);
}
-
-Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
- ShouldThrow should_throw) {
- Isolate* isolate = object->GetIsolate();
-
- if (!object->HasSloppyArgumentsElements()) {
- return PreventExtensionsWithTransition<NONE>(object, should_throw);
- }
-
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kNoAccess));
- }
-
- if (!object->map()->is_extensible()) return Just(true);
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return Just(true);
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter),
- should_throw);
- }
-
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kCannotPreventExt));
- }
-
- if (!object->HasFixedTypedArrayElements()) {
- // If there are fast elements we normalize.
- Handle<NumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() ||
- object->HasSlowArgumentsElements());
-
- // Make sure that we never go back to fast case.
- object->RequireSlowElements(*dictionary);
- }
-
- // Do a map transition, other objects with this map may still
- // be extensible.
- // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Handle<Map> new_map =
- Map::Copy(isolate, handle(object->map(), isolate), "PreventExtensions");
-
- new_map->set_is_extensible(false);
- JSObject::MigrateToMap(object, new_map);
- DCHECK(!object->map()->is_extensible());
-
- return Just(true);
-}
-
-
-Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
- if (object->IsJSProxy()) {
- return JSProxy::IsExtensible(Handle<JSProxy>::cast(object));
- }
- return Just(JSObject::IsExtensible(Handle<JSObject>::cast(object)));
-}
-
-
Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
STACK_CHECK(isolate, Nothing<bool>());
@@ -8669,1612 +3696,6 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
return target_result;
}
-
-bool JSObject::IsExtensible(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
- return true;
- }
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, *object);
- if (iter.IsAtEnd()) return false;
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return iter.GetCurrent<JSObject>()->map()->is_extensible();
- }
- return object->map()->is_extensible();
-}
-
-namespace {
-
-template <typename Dictionary>
-void ApplyAttributesToDictionary(Isolate* isolate, ReadOnlyRoots roots,
- Handle<Dictionary> dictionary,
- const PropertyAttributes attributes) {
- int capacity = dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object k;
- if (!dictionary->ToKey(roots, i, &k)) continue;
- if (k->FilterKey(ALL_PROPERTIES)) continue;
- PropertyDetails details = dictionary->DetailsAt(i);
- int attrs = attributes;
- // READ_ONLY is an invalid attribute for JS setters/getters.
- if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
- Object v = dictionary->ValueAt(i);
- if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
- }
- details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
- dictionary->DetailsAtPut(isolate, i, details);
- }
-}
-
-} // namespace
-
-template <PropertyAttributes attrs>
-Maybe<bool> JSObject::PreventExtensionsWithTransition(
- Handle<JSObject> object, ShouldThrow should_throw) {
- STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
-
- // Sealing/freezing sloppy arguments or namespace objects should be handled
- // elsewhere.
- DCHECK(!object->HasSloppyArgumentsElements());
- DCHECK_IMPLIES(object->IsJSModuleNamespace(), attrs == NONE);
-
- Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kNoAccess));
- }
-
- if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return Just(true);
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensionsWithTransition<attrs>(
- PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
- }
-
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
- MessageTemplate message = MessageTemplate::kNone;
- switch (attrs) {
- case NONE:
- message = MessageTemplate::kCannotPreventExt;
- break;
-
- case SEALED:
- message = MessageTemplate::kCannotSeal;
- break;
-
- case FROZEN:
- message = MessageTemplate::kCannotFreeze;
- break;
- }
- RETURN_FAILURE(isolate, should_throw, NewTypeError(message));
- }
-
- Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasFixedTypedArrayElements() &&
- !object->HasDictionaryElements() &&
- !object->HasSlowStringWrapperElements()) {
- int length = object->IsJSArray()
- ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
- : object->elements()->length();
- new_element_dictionary =
- length == 0 ? isolate->factory()->empty_slow_element_dictionary()
- : object->GetElementsAccessor()->Normalize(object);
- }
-
- Handle<Symbol> transition_marker;
- if (attrs == NONE) {
- transition_marker = isolate->factory()->nonextensible_symbol();
- } else if (attrs == SEALED) {
- transition_marker = isolate->factory()->sealed_symbol();
- } else {
- DCHECK(attrs == FROZEN);
- transition_marker = isolate->factory()->frozen_symbol();
- }
-
- Handle<Map> old_map(object->map(), isolate);
- TransitionsAccessor transitions(isolate, old_map);
- Map transition = transitions.SearchSpecial(*transition_marker);
- if (!transition.is_null()) {
- Handle<Map> transition_map(transition, isolate);
- DCHECK(transition_map->has_dictionary_elements() ||
- transition_map->has_fixed_typed_array_elements() ||
- transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
- DCHECK(!transition_map->is_extensible());
- JSObject::MigrateToMap(object, transition_map);
- } else if (transitions.CanHaveMoreTransitions()) {
- // Create a new descriptor array with the appropriate property attributes
- Handle<Map> new_map = Map::CopyForPreventExtensions(
- isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
- JSObject::MigrateToMap(object, new_map);
- } else {
- DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
- // Slow path: need to normalize properties for safety
- NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0,
- "SlowPreventExtensions");
-
- // Create a new map, since other objects with this map may be extensible.
- // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Handle<Map> new_map = Map::Copy(isolate, handle(object->map(), isolate),
- "SlowCopyForPreventExtensions");
- new_map->set_is_extensible(false);
- if (!new_element_dictionary.is_null()) {
- ElementsKind new_kind =
- IsStringWrapperElementsKind(old_map->elements_kind())
- ? SLOW_STRING_WRAPPER_ELEMENTS
- : DICTIONARY_ELEMENTS;
- new_map->set_elements_kind(new_kind);
- }
- JSObject::MigrateToMap(object, new_map);
-
- if (attrs != NONE) {
- ReadOnlyRoots roots(isolate);
- if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
- ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
- } else {
- Handle<NameDictionary> dictionary(object->property_dictionary(),
- isolate);
- ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
- }
- }
- }
-
- // Both seal and preventExtensions always go through without modifications to
- // typed array elements. Freeze works only if there are no actual elements.
- if (object->HasFixedTypedArrayElements()) {
- if (attrs == FROZEN &&
- JSArrayBufferView::cast(*object)->byte_length() > 0) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kCannotFreezeArrayBufferView));
- return Nothing<bool>();
- }
- return Just(true);
- }
-
- DCHECK(object->map()->has_dictionary_elements() ||
- object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
- if (!new_element_dictionary.is_null()) {
- object->set_elements(*new_element_dictionary);
- }
-
- if (object->elements() !=
- ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
- Handle<NumberDictionary> dictionary(object->element_dictionary(), isolate);
- // Make sure we never go back to the fast case
- object->RequireSlowElements(*dictionary);
- if (attrs != NONE) {
- ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate), dictionary,
- attrs);
- }
- }
-
- return Just(true);
-}
-
-
-Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
- Representation representation,
- FieldIndex index) {
- Isolate* isolate = object->GetIsolate();
- if (object->IsUnboxedDoubleField(index)) {
- double value = object->RawFastDoublePropertyAt(index);
- return isolate->factory()->NewHeapNumber(value);
- }
- Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
- return Object::WrapForRead(isolate, raw_value, representation);
-}
-
-// static
-MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
- ToPrimitiveHint hint) {
- Isolate* const isolate = receiver->GetIsolate();
- Handle<Object> exotic_to_prim;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, exotic_to_prim,
- Object::GetMethod(receiver, isolate->factory()->to_primitive_symbol()),
- Object);
- if (!exotic_to_prim->IsUndefined(isolate)) {
- Handle<Object> hint_string =
- isolate->factory()->ToPrimitiveHintString(hint);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, exotic_to_prim, receiver, 1, &hint_string),
- Object);
- if (result->IsPrimitive()) return result;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
- Object);
- }
- return OrdinaryToPrimitive(receiver, (hint == ToPrimitiveHint::kString)
- ? OrdinaryToPrimitiveHint::kString
- : OrdinaryToPrimitiveHint::kNumber);
-}
-
-
-// static
-MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
- Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint) {
- Isolate* const isolate = receiver->GetIsolate();
- Handle<String> method_names[2];
- switch (hint) {
- case OrdinaryToPrimitiveHint::kNumber:
- method_names[0] = isolate->factory()->valueOf_string();
- method_names[1] = isolate->factory()->toString_string();
- break;
- case OrdinaryToPrimitiveHint::kString:
- method_names[0] = isolate->factory()->toString_string();
- method_names[1] = isolate->factory()->valueOf_string();
- break;
- }
- for (Handle<String> name : method_names) {
- Handle<Object> method;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, method,
- JSReceiver::GetProperty(isolate, receiver, name),
- Object);
- if (method->IsCallable()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, method, receiver, 0, nullptr), Object);
- if (result->IsPrimitive()) return result;
- }
- }
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
- Object);
-}
-
-
-// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
-bool JSObject::HasEnumerableElements() {
- // TODO(cbruni): cleanup
- JSObject object = *this;
- switch (object->GetElementsKind()) {
- case PACKED_SMI_ELEMENTS:
- case PACKED_ELEMENTS:
- case PACKED_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
- return length > 0;
- }
- case HOLEY_SMI_ELEMENTS:
- case HOLEY_ELEMENTS: {
- FixedArray elements = FixedArray::cast(object->elements());
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : elements->length();
- Isolate* isolate = GetIsolate();
- for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(isolate, i)) return true;
- }
- return false;
- }
- case HOLEY_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
- // Zero-length arrays would use the empty FixedArray...
- if (length == 0) return false;
- // ...so only cast to FixedDoubleArray otherwise.
- FixedDoubleArray elements = FixedDoubleArray::cast(object->elements());
- for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) return true;
- }
- return false;
- }
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
- int length = object->elements()->length();
- return length > 0;
- }
- case DICTIONARY_ELEMENTS: {
- NumberDictionary elements = NumberDictionary::cast(object->elements());
- return elements->NumberOfEnumerableProperties() > 0;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- // We're approximating non-empty arguments objects here.
- return true;
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- if (String::cast(JSValue::cast(object)->value())->length() > 0) {
- return true;
- }
- return object->elements()->length() > 0;
- case NO_ELEMENTS:
- return false;
- }
- UNREACHABLE();
-}
-
-int Map::NumberOfEnumerableProperties() const {
- int result = 0;
- DescriptorArray descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
- !descs->GetKey(i)->FilterKey(ENUMERABLE_STRINGS)) {
- result++;
- }
- }
- return result;
-}
-
-int Map::NextFreePropertyIndex() const {
- int free_index = 0;
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray descs = instance_descriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (details.location() == kField) {
- int candidate = details.field_index() + details.field_width_in_words();
- if (candidate > free_index) free_index = candidate;
- }
- }
- return free_index;
-}
-
-bool Map::OnlyHasSimpleProperties() const {
- // Wrapped string elements aren't explicitly stored in the elements backing
- // store, but are loaded indirectly from the underlying string.
- return !IsStringWrapperElementsKind(elements_kind()) &&
- !IsSpecialReceiverMap() && !has_hidden_prototype() &&
- !is_dictionary_map();
-}
-
-V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
- Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
- Handle<FixedArray>* result) {
- Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
-
- if (!map->IsJSObjectMap()) return Just(false);
- if (!map->OnlyHasSimpleProperties()) return Just(false);
-
- Handle<JSObject> object(JSObject::cast(*receiver), isolate);
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- int number_of_own_elements =
- object->GetElementsAccessor()->GetCapacity(*object, object->elements());
- Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
- number_of_own_descriptors + number_of_own_elements);
- int count = 0;
-
- if (object->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
- MAYBE_RETURN(object->GetElementsAccessor()->CollectValuesOrEntries(
- isolate, object, values_or_entries, get_entries, &count,
- ENUMERABLE_STRINGS),
- Nothing<bool>());
- }
-
- bool stable = object->map() == *map;
-
- for (int index = 0; index < number_of_own_descriptors; index++) {
- Handle<Name> next_key(descriptors->GetKey(index), isolate);
- if (!next_key->IsString()) continue;
- Handle<Object> prop_value;
-
- // Directly decode from the descriptor array if |from| did not change shape.
- if (stable) {
- PropertyDetails details = descriptors->GetDetails(index);
- if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetStrongValue(index), isolate);
- } else {
- Representation representation = details.representation();
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
- prop_value =
- JSObject::FastPropertyAt(object, representation, field_index);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value,
- JSReceiver::GetProperty(isolate, object, next_key),
- Nothing<bool>());
- stable = object->map() == *map;
- }
- } else {
- // If the map did change, do a slower lookup. We are still guaranteed that
- // the object has a simple shape, and that the key is a name.
- LookupIterator it(isolate, object, next_key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) continue;
- DCHECK(it.state() == LookupIterator::DATA ||
- it.state() == LookupIterator::ACCESSOR);
- if (!it.IsEnumerable()) continue;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
- }
-
- if (get_entries) {
- prop_value = MakeEntryPair(isolate, next_key, prop_value);
- }
-
- values_or_entries->set(count, *prop_value);
- count++;
- }
-
- DCHECK_LE(count, values_or_entries->length());
- *result = FixedArray::ShrinkOrEmpty(isolate, values_or_entries, count);
- return Just(true);
-}
-
-MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
- Handle<JSReceiver> object,
- PropertyFilter filter,
- bool try_fast_path,
- bool get_entries) {
- Handle<FixedArray> values_or_entries;
- if (try_fast_path && filter == ENUMERABLE_STRINGS) {
- Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
- isolate, object, get_entries, &values_or_entries);
- if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
- if (fast_values_or_entries.FromJust()) return values_or_entries;
- }
-
- PropertyFilter key_filter =
- static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
-
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys,
- KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, key_filter,
- GetKeysConversion::kConvertToString),
- MaybeHandle<FixedArray>());
-
- values_or_entries = isolate->factory()->NewFixedArray(keys->length());
- int length = 0;
-
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
-
- if (filter & ONLY_ENUMERABLE) {
- PropertyDescriptor descriptor;
- Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
- isolate, object, key, &descriptor);
- MAYBE_RETURN(did_get_descriptor, MaybeHandle<FixedArray>());
- if (!did_get_descriptor.FromJust() || !descriptor.enumerable()) continue;
- }
-
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, Object::GetPropertyOrElement(isolate, object, key),
- MaybeHandle<FixedArray>());
-
- if (get_entries) {
- Handle<FixedArray> entry_storage =
- isolate->factory()->NewUninitializedFixedArray(2);
- entry_storage->set(0, *key);
- entry_storage->set(1, *value);
- value = isolate->factory()->NewJSArrayWithElements(entry_storage,
- PACKED_ELEMENTS, 2);
- }
-
- values_or_entries->set(length, *value);
- length++;
- }
- DCHECK_LE(length, values_or_entries->length());
- return FixedArray::ShrinkOrEmpty(isolate, values_or_entries, length);
-}
-
-MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
- PropertyFilter filter,
- bool try_fast_path) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
- try_fast_path, false);
-}
-
-MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
- PropertyFilter filter,
- bool try_fast_path) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
- try_fast_path, true);
-}
-
-Handle<FixedArray> JSReceiver::GetOwnElementIndices(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES);
- accumulator.CollectOwnElementIndices(receiver, object);
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
- DCHECK(keys->ContainsSortedNumbers());
- return keys;
-}
-
-bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
- if (IsDictionaryElementsKind(elements_kind())) {
- return false;
- }
-
- for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
- iter.Advance()) {
- // Be conservative, don't walk into proxies.
- if (iter.GetCurrent()->IsJSProxy()) return true;
- // String wrappers have non-configurable, non-writable elements.
- if (iter.GetCurrent()->IsStringWrapper()) return true;
- JSObject current = iter.GetCurrent<JSObject>();
-
- if (current->HasDictionaryElements() &&
- current->element_dictionary()->requires_slow_elements()) {
- return true;
- }
-
- if (current->HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(current->elements());
- Object arguments = parameter_map->get(1);
- if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
- return true;
- }
- }
- }
-
- return false;
-}
-
-
-MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- return DefineAccessor(&it, getter, setter, attributes);
-}
-
-
-MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- Isolate* isolate = it->isolate();
-
- it->UpdateProtector();
-
- if (it->state() == LookupIterator::ACCESS_CHECK) {
- if (!it->HasAccess()) {
- isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
- it->Next();
- }
-
- Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
- // Ignore accessors on typed arrays.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
- return it->factory()->undefined_value();
- }
-
- DCHECK(getter->IsCallable() || getter->IsUndefined(isolate) ||
- getter->IsNull(isolate) || getter->IsFunctionTemplateInfo());
- DCHECK(setter->IsCallable() || setter->IsUndefined(isolate) ||
- setter->IsNull(isolate) || setter->IsFunctionTemplateInfo());
- it->TransitionToAccessorProperty(getter, setter, attributes);
-
- return isolate->factory()->undefined_value();
-}
-
-MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
- Handle<Name> name,
- Handle<AccessorInfo> info,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
-
- // Duplicate ACCESS_CHECK outside of GetPropertyAttributes for the case that
- // the FailedAccessCheckCallbackFunction doesn't throw an exception.
- //
- // TODO(verwaest): Force throw an exception if the callback doesn't, so we can
- // remove reliance on default return values.
- if (it.state() == LookupIterator::ACCESS_CHECK) {
- if (!it.HasAccess()) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return it.factory()->undefined_value();
- }
- it.Next();
- }
-
- // Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasFixedTypedArrayElements()) {
- return it.factory()->undefined_value();
- }
-
- CHECK(GetPropertyAttributes(&it).IsJust());
-
- // ES5 forbids turning a property into an accessor if it's not
- // configurable. See 8.6.1 (Table 5).
- if (it.IsFound() && !it.IsConfigurable()) {
- return it.factory()->undefined_value();
- }
-
- it.TransitionToAccessorPair(info, attributes);
-
- return object;
-}
-
-Object JSObject::SlowReverseLookup(Object value) {
- if (HasFastProperties()) {
- int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray descs = map()->instance_descriptors();
- bool value_is_number = value->IsNumber();
- for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (details.location() == kField) {
- DCHECK_EQ(kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
- if (IsUnboxedDoubleField(field_index)) {
- if (value_is_number) {
- double property = RawFastDoublePropertyAt(field_index);
- if (property == value->Number()) {
- return descs->GetKey(i);
- }
- }
- } else {
- Object property = RawFastPropertyAt(field_index);
- if (field_index.is_double()) {
- DCHECK(property->IsMutableHeapNumber());
- if (value_is_number && property->Number() == value->Number()) {
- return descs->GetKey(i);
- }
- } else if (property == value) {
- return descs->GetKey(i);
- }
- }
- } else {
- DCHECK_EQ(kDescriptor, details.location());
- if (details.kind() == kData) {
- if (descs->GetStrongValue(i) == value) {
- return descs->GetKey(i);
- }
- }
- }
- }
- return GetReadOnlyRoots().undefined_value();
- } else if (IsJSGlobalObject()) {
- return JSGlobalObject::cast(*this)->global_dictionary()->SlowReverseLookup(
- value);
- } else {
- return property_dictionary()->SlowReverseLookup(value);
- }
-}
-
-Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
- int inobject_properties) {
- Handle<Map> result = isolate->factory()->NewMap(
- map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
- inobject_properties);
- Handle<Object> prototype(map->prototype(), isolate);
- Map::SetPrototype(isolate, result, prototype);
- result->set_constructor_or_backpointer(map->GetConstructor());
- result->set_bit_field(map->bit_field());
- result->set_bit_field2(map->bit_field2());
- int new_bit_field3 = map->bit_field3();
- new_bit_field3 = OwnsDescriptorsBit::update(new_bit_field3, true);
- new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
- new_bit_field3 = EnumLengthBits::update(new_bit_field3,
- kInvalidEnumCacheSentinel);
- new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
- if (!map->is_dictionary_map()) {
- new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
- }
- result->set_bit_field3(new_bit_field3);
- return result;
-}
-
-Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
- PropertyNormalizationMode mode, const char* reason) {
- DCHECK(!fast_map->is_dictionary_map());
-
- Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
- isolate);
- bool use_cache =
- !fast_map->is_prototype_map() && !maybe_cache->IsUndefined(isolate);
- Handle<NormalizedMapCache> cache;
- if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
-
- Handle<Map> new_map;
- if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) new_map->DictionaryMapVerify(isolate);
-#endif
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit,
- // except for the code cache, which can contain some ICs which can be
- // applied to the shared map, dependent code and weak cell cache.
- Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
-
- if (new_map->is_prototype_map()) {
- // For prototype maps, the PrototypeInfo is not copied.
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- kTransitionsOrPrototypeInfoOffset));
- DCHECK_EQ(fresh->raw_transitions(),
- MaybeObject::FromObject(Smi::kZero));
- STATIC_ASSERT(kDescriptorsOffset ==
- kTransitionsOrPrototypeInfoOffset + kTaggedSize);
- DCHECK_EQ(
- 0,
- memcmp(
- HeapObject::RawField(*fresh, kDescriptorsOffset).ToVoidPtr(),
- HeapObject::RawField(*new_map, kDescriptorsOffset).ToVoidPtr(),
- kDependentCodeOffset - kDescriptorsOffset));
- } else {
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- Map::kDependentCodeOffset));
- }
- STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
- Map::kDependentCodeOffset + kTaggedSize);
- int offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
- reinterpret_cast<void*>(new_map->address() + offset),
- Map::kSize - offset));
- }
-#endif
- } else {
- new_map = Map::CopyNormalized(isolate, fast_map, mode);
- if (use_cache) {
- cache->Set(fast_map, new_map);
- isolate->counters()->maps_normalized()->Increment();
- }
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
- }
- }
- fast_map->NotifyLeafMapLayoutChange(isolate);
- return new_map;
-}
-
-Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
- PropertyNormalizationMode mode) {
- int new_instance_size = map->instance_size();
- if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= map->GetInObjectProperties() * kTaggedSize;
- }
-
- Handle<Map> result = RawCopy(
- isolate, map, new_instance_size,
- mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
- // Clear the unused_property_fields explicitly as this field should not
- // be accessed for normalized maps.
- result->SetInObjectUnusedPropertyFields(0);
- result->set_is_dictionary_map(true);
- result->set_is_migration_target(false);
- result->set_may_have_interesting_symbols(true);
- result->set_construction_counter(kNoSlackTracking);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) result->DictionaryMapVerify(isolate);
-#endif
-
- return result;
-}
-
-// Return an immutable prototype exotic object version of the input map.
-// Never even try to cache it in the transition tree, as it is intended
-// for the global object and its prototype chain, and excluding it saves
-// memory on the map transition tree.
-
-// static
-Handle<Map> Map::TransitionToImmutableProto(Isolate* isolate, Handle<Map> map) {
- Handle<Map> new_map = Map::Copy(isolate, map, "ImmutablePrototype");
- new_map->set_is_immutable_proto(true);
- return new_map;
-}
-
-namespace {
-void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
-#ifdef DEBUG
- // Strict function maps have Function as a constructor but the
- // Function's initial map is a sloppy function map. Same holds for
- // GeneratorFunction / AsyncFunction and its initial map.
- Object constructor = map->GetConstructor();
- DCHECK(constructor->IsJSFunction());
- DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
- *map == *isolate->strict_function_map() ||
- *map == *isolate->strict_function_with_name_map() ||
- *map == *isolate->generator_function_map() ||
- *map == *isolate->generator_function_with_name_map() ||
- *map == *isolate->generator_function_with_home_object_map() ||
- *map == *isolate->generator_function_with_name_and_home_object_map() ||
- *map == *isolate->async_function_map() ||
- *map == *isolate->async_function_with_name_map() ||
- *map == *isolate->async_function_with_home_object_map() ||
- *map == *isolate->async_function_with_name_and_home_object_map());
-#endif
- // Initial maps must always own their descriptors and it's descriptor array
- // does not contain descriptors that do not belong to the map.
- DCHECK(map->owns_descriptors());
- DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
-}
-} // namespace
-
-// static
-Handle<Map> Map::CopyInitialMapNormalized(Isolate* isolate, Handle<Map> map,
- PropertyNormalizationMode mode) {
- EnsureInitialMap(isolate, map);
- return CopyNormalized(isolate, map, mode);
-}
-
-// static
-Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
- int instance_size, int inobject_properties,
- int unused_property_fields) {
- EnsureInitialMap(isolate, map);
- Handle<Map> result =
- RawCopy(isolate, map, instance_size, inobject_properties);
-
- // Please note instance_type and instance_size are set when allocated.
- result->SetInObjectUnusedPropertyFields(unused_property_fields);
-
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- if (number_of_own_descriptors > 0) {
- // The copy will use the same descriptors array.
- result->UpdateDescriptors(isolate, map->instance_descriptors(),
- map->GetLayoutDescriptor(),
- number_of_own_descriptors);
-
- DCHECK_EQ(result->NumberOfFields(),
- result->GetInObjectProperties() - result->UnusedPropertyFields());
- }
-
- return result;
-}
-
-Handle<Map> Map::CopyDropDescriptors(Isolate* isolate, Handle<Map> map) {
- Handle<Map> result =
- RawCopy(isolate, map, map->instance_size(),
- map->IsJSObjectMap() ? map->GetInObjectProperties() : 0);
-
- // Please note instance_type and instance_size are set when allocated.
- if (map->IsJSObjectMap()) {
- result->CopyUnusedPropertyFields(*map);
- }
- map->NotifyLeafMapLayoutChange(isolate);
- return result;
-}
-
-Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- Descriptor* descriptor) {
- // Sanity check. This path is only to be taken if the map owns its descriptor
- // array, implying that its NumberOfOwnDescriptors equals the number of
- // descriptors in the descriptor array.
- DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
-
- Handle<Map> result = CopyDropDescriptors(isolate, map);
- Handle<Name> name = descriptor->GetKey();
-
- // Properly mark the {result} if the {name} is an "interesting symbol".
- if (name->IsInterestingSymbol()) {
- result->set_may_have_interesting_symbols(true);
- }
-
- // Ensure there's space for the new descriptor in the shared descriptor array.
- if (descriptors->number_of_slack_descriptors() == 0) {
- int old_size = descriptors->number_of_descriptors();
- if (old_size == 0) {
- descriptors = DescriptorArray::Allocate(isolate, 0, 1);
- } else {
- int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
- EnsureDescriptorSlack(isolate, map, slack);
- descriptors = handle(map->instance_descriptors(), isolate);
- }
- }
-
- Handle<LayoutDescriptor> layout_descriptor =
- FLAG_unbox_double_fields
- ? LayoutDescriptor::ShareAppend(isolate, map,
- descriptor->GetDetails())
- : handle(LayoutDescriptor::FastPointerLayout(), isolate);
-
- {
- DisallowHeapAllocation no_gc;
- descriptors->Append(descriptor);
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
- }
-
- DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
- ConnectTransition(isolate, map, result, name, SIMPLE_PROPERTY_TRANSITION);
-
- return result;
-}
-
-void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
- Handle<Map> child, Handle<Name> name,
- SimpleTransitionFlag flag) {
- DCHECK_IMPLIES(name->IsInterestingSymbol(),
- child->may_have_interesting_symbols());
- DCHECK_IMPLIES(parent->may_have_interesting_symbols(),
- child->may_have_interesting_symbols());
- // Do not track transitions during bootstrap except for element transitions.
- if (isolate->bootstrapper()->IsActive() &&
- !name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
- if (FLAG_trace_maps) {
- LOG(isolate,
- MapEvent("Transition", *parent, *child,
- child->is_prototype_map() ? "prototype" : "", *name));
- }
- return;
- }
- if (!parent->GetBackPointer()->IsUndefined(isolate)) {
- parent->set_owns_descriptors(false);
- } else {
- // |parent| is initial map and it must keep the ownership, there must be no
- // descriptors in the descriptors array that do not belong to the map.
- DCHECK(parent->owns_descriptors());
- DCHECK_EQ(parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors()->number_of_descriptors());
- }
- if (parent->is_prototype_map()) {
- DCHECK(child->is_prototype_map());
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Transition", *parent, *child, "prototype", *name));
- }
- } else {
- TransitionsAccessor(isolate, parent).Insert(name, child, flag);
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Transition", *parent, *child, "", *name));
- }
- }
-}
-
-Handle<Map> Map::CopyReplaceDescriptors(
- Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
- MaybeHandle<Name> maybe_name, const char* reason,
- SimpleTransitionFlag simple_flag) {
- DCHECK(descriptors->IsSortedNoDuplicates());
-
- Handle<Map> result = CopyDropDescriptors(isolate, map);
-
- // Properly mark the {result} if the {name} is an "interesting symbol".
- Handle<Name> name;
- if (maybe_name.ToHandle(&name) && name->IsInterestingSymbol()) {
- result->set_may_have_interesting_symbols(true);
- }
-
- if (!map->is_prototype_map()) {
- if (flag == INSERT_TRANSITION &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
-
- DCHECK(!maybe_name.is_null());
- ConnectTransition(isolate, map, result, name, simple_flag);
- } else {
- descriptors->GeneralizeAllFields();
- result->InitializeDescriptors(isolate, *descriptors,
- LayoutDescriptor::FastPointerLayout());
- }
- } else {
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
- }
- if (FLAG_trace_maps &&
- // Mirror conditions above that did not call ConnectTransition().
- (map->is_prototype_map() ||
- !(flag == INSERT_TRANSITION &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions()))) {
- LOG(isolate, MapEvent("ReplaceDescriptors", *map, *result, reason,
- maybe_name.is_null() ? Name() : *name));
- }
- return result;
-}
-
-
-// Creates transition tree starting from |split_map| and adding all descriptors
-// starting from descriptor with index |split_map|.NumberOfOwnDescriptors().
-// The way how it is done is tricky because of GC and special descriptors
-// marking logic.
-Handle<Map> Map::AddMissingTransitions(
- Isolate* isolate, Handle<Map> split_map,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- DCHECK(descriptors->IsSortedNoDuplicates());
- int split_nof = split_map->NumberOfOwnDescriptors();
- int nof_descriptors = descriptors->number_of_descriptors();
- DCHECK_LT(split_nof, nof_descriptors);
-
- // Start with creating last map which will own full descriptors array.
- // This is necessary to guarantee that GC will mark the whole descriptor
- // array if any of the allocations happening below fail.
- // Number of unused properties is temporarily incorrect and the layout
- // descriptor could unnecessarily be in slow mode but we will fix after
- // all the other intermediate maps are created.
- // Also the last map might have interesting symbols, we temporarily set
- // the flag and clear it right before the descriptors are installed. This
- // makes heap verification happy and ensures the flag ends up accurate.
- Handle<Map> last_map = CopyDropDescriptors(isolate, split_map);
- last_map->InitializeDescriptors(isolate, *descriptors,
- *full_layout_descriptor);
- last_map->SetInObjectUnusedPropertyFields(0);
- last_map->set_may_have_interesting_symbols(true);
-
- // During creation of intermediate maps we violate descriptors sharing
- // invariant since the last map is not yet connected to the transition tree
- // we create here. But it is safe because GC never trims map's descriptors
- // if there are no dead transitions from that map and this is exactly the
- // case for all the intermediate maps we create here.
- Handle<Map> map = split_map;
- for (int i = split_nof; i < nof_descriptors - 1; ++i) {
- Handle<Map> new_map = CopyDropDescriptors(isolate, map);
- InstallDescriptors(isolate, map, new_map, i, descriptors,
- full_layout_descriptor);
-
- map = new_map;
- }
- map->NotifyLeafMapLayoutChange(isolate);
- last_map->set_may_have_interesting_symbols(false);
- InstallDescriptors(isolate, map, last_map, nof_descriptors - 1, descriptors,
- full_layout_descriptor);
- return last_map;
-}
-
-
-// Since this method is used to rewrite an existing transition tree, it can
-// always insert transitions without checking.
-void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
- Handle<Map> child, int new_descriptor,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- DCHECK(descriptors->IsSortedNoDuplicates());
-
- child->SetInstanceDescriptors(isolate, *descriptors, new_descriptor + 1);
- child->CopyUnusedPropertyFields(*parent);
- PropertyDetails details = descriptors->GetDetails(new_descriptor);
- if (details.location() == kField) {
- child->AccountAddedPropertyField();
- }
-
- if (FLAG_unbox_double_fields) {
- Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
- full_layout_descriptor);
- child->set_layout_descriptor(*layout_descriptor);
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
- }
-#else
- SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
-#endif
- child->set_visitor_id(Map::GetVisitorId(*child));
- }
-
- Handle<Name> name = handle(descriptors->GetKey(new_descriptor), isolate);
- if (parent->may_have_interesting_symbols() || name->IsInterestingSymbol()) {
- child->set_may_have_interesting_symbols(true);
- }
- ConnectTransition(isolate, parent, child, name, SIMPLE_PROPERTY_TRANSITION);
-}
-
-Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
- ElementsKind kind, TransitionFlag flag) {
- // Only certain objects are allowed to have non-terminal fast transitional
- // elements kinds.
- DCHECK(map->IsJSObjectMap());
- DCHECK_IMPLIES(
- !map->CanHaveFastTransitionableElementsKind(),
- IsDictionaryElementsKind(kind) || IsTerminalElementsKind(kind));
-
- Map maybe_elements_transition_map;
- if (flag == INSERT_TRANSITION) {
- // Ensure we are requested to add elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
- map->NumberOfOwnDescriptors());
-
- maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(maybe_elements_transition_map.is_null() ||
- (maybe_elements_transition_map->elements_kind() ==
- DICTIONARY_ELEMENTS &&
- kind == DICTIONARY_ELEMENTS));
- DCHECK(!IsFastElementsKind(kind) ||
- IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
- DCHECK(kind != map->elements_kind());
- }
-
- bool insert_transition =
- flag == INSERT_TRANSITION &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions() &&
- maybe_elements_transition_map.is_null();
-
- if (insert_transition) {
- Handle<Map> new_map = CopyForElementsTransition(isolate, map);
- new_map->set_elements_kind(kind);
-
- Handle<Name> name = isolate->factory()->elements_transition_symbol();
- ConnectTransition(isolate, map, new_map, name, SPECIAL_TRANSITION);
- return new_map;
- }
-
- // Create a new free-floating map only if we are not allowed to store it.
- Handle<Map> new_map = Copy(isolate, map, "CopyAsElementsKind");
- new_map->set_elements_kind(kind);
- return new_map;
-}
-
-Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
- Handle<SharedFunctionInfo> shared_info) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
- // Initial map for sloppy mode function is stored in the function
- // constructor. Initial maps for strict mode are cached as special transitions
- // using |strict_function_transition_symbol| as a key.
- if (is_sloppy(shared_info->language_mode())) return initial_map;
-
- Handle<Map> function_map(Map::cast(isolate->native_context()->get(
- shared_info->function_map_index())),
- isolate);
-
- STATIC_ASSERT(LanguageModeSize == 2);
- DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
- Handle<Symbol> transition_symbol =
- isolate->factory()->strict_function_transition_symbol();
- Map maybe_transition = TransitionsAccessor(isolate, initial_map)
- .SearchSpecial(*transition_symbol);
- if (!maybe_transition.is_null()) {
- return handle(maybe_transition, isolate);
- }
- initial_map->NotifyLeafMapLayoutChange(isolate);
-
- // Create new map taking descriptors from the |function_map| and all
- // the other details from the |initial_map|.
- Handle<Map> map =
- Map::CopyInitialMap(isolate, function_map, initial_map->instance_size(),
- initial_map->GetInObjectProperties(),
- initial_map->UnusedPropertyFields());
- map->SetConstructor(initial_map->GetConstructor());
- map->set_prototype(initial_map->prototype());
- map->set_construction_counter(initial_map->construction_counter());
-
- if (TransitionsAccessor(isolate, initial_map).CanHaveMoreTransitions()) {
- Map::ConnectTransition(isolate, initial_map, map, transition_symbol,
- SPECIAL_TRANSITION);
- }
- return map;
-}
-
-Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
- DCHECK(!map->is_prototype_map());
- Handle<Map> new_map = CopyDropDescriptors(isolate, map);
-
- if (map->owns_descriptors()) {
- // In case the map owned its own descriptors, share the descriptors and
- // transfer ownership to the new map.
- // The properties did not change, so reuse descriptors.
- new_map->InitializeDescriptors(isolate, map->instance_descriptors(),
- map->GetLayoutDescriptor());
- } else {
- // In case the map did not own its own descriptors, a split is forced by
- // copying the map; creating a new descriptor array cell.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- isolate, descriptors, number_of_own_descriptors);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- new_map->InitializeDescriptors(isolate, *new_descriptors,
- *new_layout_descriptor);
- }
- return new_map;
-}
-
-Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- isolate, descriptors, number_of_own_descriptors);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- return CopyReplaceDescriptors(
- isolate, map, new_descriptors, new_layout_descriptor, OMIT_TRANSITION,
- MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
-}
-
-
-Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
- Handle<Map> copy =
- Copy(isolate, handle(isolate->object_function()->initial_map(), isolate),
- "MapCreate");
-
- // Check that we do not overflow the instance size when adding the extra
- // inobject properties. If the instance size overflows, we allocate as many
- // properties as we can as inobject properties.
- if (inobject_properties > JSObject::kMaxInObjectProperties) {
- inobject_properties = JSObject::kMaxInObjectProperties;
- }
-
- int new_instance_size =
- JSObject::kHeaderSize + kTaggedSize * inobject_properties;
-
- // Adjust the map with the extra inobject properties.
- copy->set_instance_size(new_instance_size);
- copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
- DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
- copy->SetInObjectUnusedPropertyFields(inobject_properties);
- copy->set_visitor_id(Map::GetVisitorId(*copy));
- return copy;
-}
-
-Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
- PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker,
- const char* reason) {
- int num_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
- isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
- attrs_to_add);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
- transition_marker, reason, SPECIAL_TRANSITION);
- new_map->set_is_extensible(false);
- if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
- ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
- ? SLOW_STRING_WRAPPER_ELEMENTS
- : DICTIONARY_ELEMENTS;
- new_map->set_elements_kind(new_kind);
- }
- return new_map;
-}
-
-namespace {
-
-bool CanHoldValue(DescriptorArray descriptors, int descriptor,
- PropertyConstness constness, Object value) {
- PropertyDetails details = descriptors->GetDetails(descriptor);
- if (details.location() == kField) {
- if (details.kind() == kData) {
- return IsGeneralizableTo(constness, details.constness()) &&
- value->FitsRepresentation(details.representation()) &&
- descriptors->GetFieldType(descriptor)->NowContains(value);
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- return false;
- }
-
- } else {
- DCHECK_EQ(kDescriptor, details.location());
- DCHECK_EQ(PropertyConstness::kConst, details.constness());
- if (details.kind() == kData) {
- DCHECK(!FLAG_track_constant_fields);
- DCHECK(descriptors->GetStrongValue(descriptor) != value ||
- value->FitsRepresentation(details.representation()));
- return descriptors->GetStrongValue(descriptor) == value;
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- return false;
- }
- }
- UNREACHABLE();
-}
-
-Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
- int descriptor,
- PropertyConstness constness,
- Handle<Object> value) {
- if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
- *value)) {
- return map;
- }
-
- PropertyAttributes attributes =
- map->instance_descriptors()->GetDetails(descriptor).attributes();
- Representation representation = value->OptimalRepresentation();
- Handle<FieldType> type = value->OptimalType(isolate, representation);
-
- MapUpdater mu(isolate, map);
- return mu.ReconfigureToDataField(descriptor, attributes, constness,
- representation, type);
-}
-
-} // namespace
-
-// static
-Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
- int descriptor,
- PropertyConstness constness,
- Handle<Object> value) {
- // Dictionaries can store any property value.
- DCHECK(!map->is_dictionary_map());
- // Update to the newest map before storing the property.
- return UpdateDescriptorForValue(isolate, Update(isolate, map), descriptor,
- constness, value);
-}
-
-Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- PropertyConstness constness,
- StoreOrigin store_origin) {
- RuntimeCallTimerScope stats_scope(
- isolate, *map,
- map->is_prototype_map()
- ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
- : RuntimeCallCounterId::kMap_TransitionToDataProperty);
-
- DCHECK(name->IsUniqueName());
- DCHECK(!map->is_dictionary_map());
-
- // Migrate to the newest map before storing the property.
- map = Update(isolate, map);
-
- Map maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kData, attributes);
- if (!maybe_transition.is_null()) {
- Handle<Map> transition(maybe_transition, isolate);
- int descriptor = transition->LastAdded();
-
- DCHECK_EQ(attributes, transition->instance_descriptors()
- ->GetDetails(descriptor)
- .attributes());
-
- return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
- value);
- }
-
- TransitionFlag flag = INSERT_TRANSITION;
- MaybeHandle<Map> maybe_map;
- if (!map->TooManyFastProperties(store_origin)) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- maybe_map =
- Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
- } else {
- Representation representation = value->OptimalRepresentation();
- Handle<FieldType> type = value->OptimalType(isolate, representation);
- maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
- constness, representation, flag);
- }
- }
-
- Handle<Map> result;
- if (!maybe_map.ToHandle(&result)) {
- const char* reason = "TooManyFastProperties";
-#if V8_TRACE_MAPS
- std::unique_ptr<ScopedVector<char>> buffer;
- if (FLAG_trace_maps) {
- ScopedVector<char> name_buffer(100);
- name->NameShortPrint(name_buffer);
- buffer.reset(new ScopedVector<char>(128));
- SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.start());
- reason = buffer->start();
- }
-#endif
- Handle<Object> maybe_constructor(map->GetConstructor(), isolate);
- if (FLAG_feedback_normalization && map->new_target_is_base() &&
- maybe_constructor->IsJSFunction() &&
- !JSFunction::cast(*maybe_constructor)->shared()->native()) {
- Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(maybe_constructor);
- DCHECK_NE(*constructor,
- constructor->context()->native_context()->object_function());
- Handle<Map> initial_map(constructor->initial_map(), isolate);
- result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
- reason);
- initial_map->DeprecateTransitionTree(isolate);
- Handle<Object> prototype(result->prototype(), isolate);
- JSFunction::SetInitialMap(constructor, result, prototype);
-
- // Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kInitialMapChangedGroup);
- if (!result->EquivalentToForNormalization(*map,
- CLEAR_INOBJECT_PROPERTIES)) {
- result =
- Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
- }
- } else {
- result = Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
- }
- }
-
- return result;
-}
-
-Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
- int descriptor, PropertyKind kind,
- PropertyAttributes attributes) {
- // Dictionaries have to be reconfigured in-place.
- DCHECK(!map->is_dictionary_map());
-
- if (!map->GetBackPointer()->IsMap()) {
- // There is no benefit from reconstructing transition tree for maps without
- // back pointers.
- return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
- descriptor, kind, attributes,
- "GenAll_AttributesMismatchProtoMap");
- }
-
- if (FLAG_trace_generalization) {
- map->PrintReconfiguration(isolate, stdout, descriptor, kind, attributes);
- }
-
- MapUpdater mu(isolate, map);
- DCHECK_EQ(kData, kind); // Only kData case is supported so far.
- Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, kDefaultFieldConstness, Representation::None(),
- FieldType::None(isolate));
- return new_map;
-}
-
-Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
- Handle<Name> name, int descriptor,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- RuntimeCallTimerScope stats_scope(
- isolate,
- map->is_prototype_map()
- ? RuntimeCallCounterId::kPrototypeMap_TransitionToAccessorProperty
- : RuntimeCallCounterId::kMap_TransitionToAccessorProperty);
-
- // At least one of the accessors needs to be a new value.
- DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
- DCHECK(name->IsUniqueName());
-
- // Dictionary maps can always have additional data properties.
- if (map->is_dictionary_map()) return map;
-
- // Migrate to the newest map before transitioning to the new property.
- map = Update(isolate, map);
-
- PropertyNormalizationMode mode = map->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
-
- Map maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kAccessor, attributes);
- if (!maybe_transition.is_null()) {
- Handle<Map> transition(maybe_transition, isolate);
- DescriptorArray descriptors = transition->instance_descriptors();
- int descriptor = transition->LastAdded();
- DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
-
- DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
- DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
-
- Handle<Object> maybe_pair(descriptors->GetStrongValue(descriptor), isolate);
- if (!maybe_pair->IsAccessorPair()) {
- return Map::Normalize(isolate, map, mode,
- "TransitionToAccessorFromNonPair");
- }
-
- Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
- if (!pair->Equals(*getter, *setter)) {
- return Map::Normalize(isolate, map, mode,
- "TransitionToDifferentAccessor");
- }
-
- return transition;
- }
-
- Handle<AccessorPair> pair;
- DescriptorArray old_descriptors = map->instance_descriptors();
- if (descriptor != DescriptorArray::kNotFound) {
- if (descriptor != map->LastAdded()) {
- return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
- }
- PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
- if (old_details.kind() != kAccessor) {
- return Map::Normalize(isolate, map, mode,
- "AccessorsOverwritingNonAccessors");
- }
-
- if (old_details.attributes() != attributes) {
- return Map::Normalize(isolate, map, mode, "AccessorsWithAttributes");
- }
-
- Handle<Object> maybe_pair(old_descriptors->GetStrongValue(descriptor),
- isolate);
- if (!maybe_pair->IsAccessorPair()) {
- return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonPair");
- }
-
- Handle<AccessorPair> current_pair = Handle<AccessorPair>::cast(maybe_pair);
- if (current_pair->Equals(*getter, *setter)) return map;
-
- bool overwriting_accessor = false;
- if (!getter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
- current_pair->get(ACCESSOR_GETTER) != *getter) {
- overwriting_accessor = true;
- }
- if (!setter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
- current_pair->get(ACCESSOR_SETTER) != *setter) {
- overwriting_accessor = true;
- }
- if (overwriting_accessor) {
- return Map::Normalize(isolate, map, mode,
- "AccessorsOverwritingAccessors");
- }
-
- pair = AccessorPair::Copy(isolate, Handle<AccessorPair>::cast(maybe_pair));
- } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
- map->TooManyFastProperties(StoreOrigin::kNamed)) {
- return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
- "TooManyAccessors");
- } else {
- pair = isolate->factory()->NewAccessorPair();
- }
-
- pair->SetComponents(*getter, *setter);
-
- TransitionFlag flag = INSERT_TRANSITION;
- Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
- return Map::CopyInsertDescriptor(isolate, map, &d, flag);
-}
-
-Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
- Descriptor* descriptor,
- TransitionFlag flag) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
-
- // Share descriptors only if map owns descriptors and it not an initial map.
- if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
- !map->GetBackPointer()->IsUndefined(isolate) &&
- TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
- return ShareDescriptor(isolate, map, descriptors, descriptor);
- }
-
- int nof = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> new_descriptors =
- DescriptorArray::CopyUpTo(isolate, descriptors, nof, 1);
- new_descriptors->Append(descriptor);
-
- Handle<LayoutDescriptor> new_layout_descriptor =
- FLAG_unbox_double_fields
- ? LayoutDescriptor::New(isolate, map, new_descriptors, nof + 1)
- : handle(LayoutDescriptor::FastPointerLayout(), isolate);
-
- return CopyReplaceDescriptors(
- isolate, map, new_descriptors, new_layout_descriptor, flag,
- descriptor->GetKey(), "CopyAddDescriptor", SIMPLE_PROPERTY_TRANSITION);
-}
-
-Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
- Descriptor* descriptor,
- TransitionFlag flag) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
-
- // We replace the key if it is already present.
- int index =
- old_descriptors->SearchWithCache(isolate, *descriptor->GetKey(), *map);
- if (index != DescriptorArray::kNotFound) {
- return CopyReplaceDescriptor(isolate, map, old_descriptors, descriptor,
- index, flag);
- }
- return CopyAddDescriptor(isolate, map, descriptor, flag);
-}
-
Handle<DescriptorArray> DescriptorArray::CopyUpTo(Isolate* isolate,
Handle<DescriptorArray> desc,
int enumeration_index,
@@ -10382,34 +3803,6 @@ bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
return true;
}
-Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- Descriptor* descriptor,
- int insertion_index,
- TransitionFlag flag) {
- Handle<Name> key = descriptor->GetKey();
- DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
- // This function does not support replacing property fields as
- // that would break property field counters.
- DCHECK_NE(kField, descriptor->GetDetails().location());
- DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
-
- Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- isolate, descriptors, map->NumberOfOwnDescriptors());
-
- new_descriptors->Replace(insertion_index, descriptor);
- Handle<LayoutDescriptor> new_layout_descriptor = LayoutDescriptor::New(
- isolate, map, new_descriptors, new_descriptors->number_of_descriptors());
-
- SimpleTransitionFlag simple_flag =
- (insertion_index == descriptors->number_of_descriptors() - 1)
- ? SIMPLE_PROPERTY_TRANSITION
- : PROPERTY_TRANSITION;
- return CopyReplaceDescriptors(isolate, map, new_descriptors,
- new_layout_descriptor, flag, key,
- "CopyReplaceDescriptor", simple_flag);
-}
-
Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
Handle<FixedArray> array, int index,
Handle<Object> value,
@@ -10473,16 +3866,6 @@ void FixedArray::CopyTo(int pos, FixedArray dest, int dest_pos, int len) const {
}
}
-void JSObject::PrototypeRegistryCompactionCallback(HeapObject value,
- int old_index,
- int new_index) {
- DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
- Map map = Map::cast(value);
- DCHECK(map->prototype_info()->IsPrototypeInfo());
- PrototypeInfo proto_info = PrototypeInfo::cast(map->prototype_info());
- DCHECK_EQ(old_index, proto_info->registry_slot());
- proto_info->set_registry_slot(new_index);
-}
// static
Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
@@ -10714,7 +4097,8 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
Handle<Object> receiver,
Handle<JSFunction> function,
Handle<AbstractCode> code,
- int offset, int flags) {
+ int offset, int flags,
+ Handle<FixedArray> parameters) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
Handle<FrameArray> array =
@@ -10724,6 +4108,7 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
array->SetCode(frame_count, *code);
array->SetOffset(frame_count, Smi::FromInt(offset));
array->SetFlags(frame_count, Smi::FromInt(flags));
+ array->SetParameters(frame_count, *parameters);
array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
return array;
}
@@ -10763,11 +4148,11 @@ Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int nof_descriptors,
int slack,
- PretenureFlag pretenure) {
+ AllocationType type) {
return nof_descriptors + slack == 0
? isolate->factory()->empty_descriptor_array()
: isolate->factory()->NewDescriptorArray(nof_descriptors, slack,
- pretenure);
+ type);
}
void DescriptorArray::Initialize(EnumCache enum_cache,
@@ -10912,26 +4297,6 @@ Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
return handle(accessor, isolate);
}
-Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
- int deopt_entry_count,
- PretenureFlag pretenure) {
- return Handle<DeoptimizationData>::cast(isolate->factory()->NewFixedArray(
- LengthFor(deopt_entry_count), pretenure));
-}
-
-Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
- return Handle<DeoptimizationData>::cast(
- isolate->factory()->empty_fixed_array());
-}
-
-SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
- if (index == -1) {
- return SharedFunctionInfo::cast(SharedFunctionInfo());
- } else {
- return SharedFunctionInfo::cast(LiteralArray()->get(index));
- }
-}
-
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray other) {
if (number_of_all_descriptors() != other->number_of_all_descriptors()) {
@@ -10945,42 +4310,6 @@ bool DescriptorArray::IsEqualTo(DescriptorArray other) {
#endif
// static
-Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
- TrimMode mode) {
- string = String::Flatten(isolate, string);
- int const length = string->length();
-
- // Perform left trimming if requested.
- int left = 0;
- if (mode == kTrim || mode == kTrimStart) {
- while (left < length && IsWhiteSpaceOrLineTerminator(string->Get(left))) {
- left++;
- }
- }
-
- // Perform right trimming if requested.
- int right = length;
- if (mode == kTrim || mode == kTrimEnd) {
- while (right > left &&
- IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
- right--;
- }
- }
-
- return isolate->factory()->NewSubString(string, left, right);
-}
-
-bool String::LooksValid() {
- // TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
- // basically the same logic as the way we access the heap in the first place.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
- // RO_SPACE objects should always be valid.
- if (chunk->owner()->identity() == RO_SPACE) return true;
- if (chunk->heap() == nullptr) return false;
- return chunk->heap()->Contains(*this);
-}
-
-// static
MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name) {
if (name->IsString()) return Handle<String>::cast(name);
// ES6 section 9.2.11 SetFunctionName, step 4.
@@ -11008,191 +4337,6 @@ MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name,
return builder.Finish();
}
-namespace {
-
-bool AreDigits(const uint8_t* s, int from, int to) {
- for (int i = from; i < to; i++) {
- if (s[i] < '0' || s[i] > '9') return false;
- }
-
- return true;
-}
-
-
-int ParseDecimalInteger(const uint8_t* s, int from, int to) {
- DCHECK_LT(to - from, 10); // Overflow is not possible.
- DCHECK(from < to);
- int d = s[from] - '0';
-
- for (int i = from + 1; i < to; i++) {
- d = 10 * d + (s[i] - '0');
- }
-
- return d;
-}
-
-} // namespace
-
-// static
-Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
- // Flatten {subject} string first.
- subject = String::Flatten(isolate, subject);
-
- // Fast array index case.
- uint32_t index;
- if (subject->AsArrayIndex(&index)) {
- return isolate->factory()->NewNumberFromUint(index);
- }
-
- // Fast case: short integer or some sorts of junk values.
- if (subject->IsSeqOneByteString()) {
- int len = subject->length();
- if (len == 0) return handle(Smi::kZero, isolate);
-
- DisallowHeapAllocation no_gc;
- uint8_t const* data =
- Handle<SeqOneByteString>::cast(subject)->GetChars(no_gc);
- bool minus = (data[0] == '-');
- int start_pos = (minus ? 1 : 0);
-
- if (start_pos == len) {
- return isolate->factory()->nan_value();
- } else if (data[start_pos] > '9') {
- // Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
- // or the 'I' character ('Infinity'). All of that have codes not greater
- // than '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xA0) {
- return isolate->factory()->nan_value();
- }
- } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits
- // we know it will fit into the smi-data type.
- int d = ParseDecimalInteger(data, start_pos, len);
- if (minus) {
- if (d == 0) return isolate->factory()->minus_zero_value();
- d = -d;
- } else if (!subject->HasHashCode() && len <= String::kMaxArrayIndexSize &&
- (len == 1 || data[0] != '0')) {
- // String hash is not calculated yet but all the data are present.
- // Update the hash field to speed up sequential convertions.
- uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
-#ifdef DEBUG
- subject->Hash(); // Force hash calculation.
- DCHECK_EQ(static_cast<int>(subject->hash_field()),
- static_cast<int>(hash));
-#endif
- subject->set_hash_field(hash);
- }
- return handle(Smi::FromInt(d), isolate);
- }
- }
-
- // Slower case.
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return isolate->factory()->NewNumber(StringToDouble(isolate, subject, flags));
-}
-
-String::FlatContent String::GetFlatContent(
- const DisallowHeapAllocation& no_gc) {
- USE(no_gc);
- int length = this->length();
- StringShape shape(*this);
- String string = *this;
- int offset = 0;
- if (shape.representation_tag() == kConsStringTag) {
- ConsString cons = ConsString::cast(string);
- if (cons->second()->length() != 0) {
- return FlatContent();
- }
- string = cons->first();
- shape = StringShape(string);
- } else if (shape.representation_tag() == kSlicedStringTag) {
- SlicedString slice = SlicedString::cast(string);
- offset = slice->offset();
- string = slice->parent();
- shape = StringShape(string);
- DCHECK(shape.representation_tag() != kConsStringTag &&
- shape.representation_tag() != kSlicedStringTag);
- }
- if (shape.representation_tag() == kThinStringTag) {
- ThinString thin = ThinString::cast(string);
- string = thin->actual();
- shape = StringShape(string);
- DCHECK(!shape.IsCons());
- DCHECK(!shape.IsSliced());
- }
- if (shape.encoding_tag() == kOneByteStringTag) {
- const uint8_t* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars(no_gc);
- } else {
- start = ExternalOneByteString::cast(string)->GetChars();
- }
- return FlatContent(start + offset, length);
- } else {
- DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
- const uc16* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string)->GetChars(no_gc);
- } else {
- start = ExternalTwoByteString::cast(string)->GetChars();
- }
- return FlatContent(start + offset, length);
- }
-}
-
-std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int offset, int length,
- int* length_return) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return std::unique_ptr<char[]>();
- }
- // Negative length means the to the end of the string.
- if (length < 0) length = kMaxInt - offset;
-
- // Compute the size of the UTF-8 string. Start at the specified offset.
- StringCharacterStream stream(*this, offset);
- int character_position = offset;
- int utf8_bytes = 0;
- int last = unibrow::Utf16::kNoPreviousCharacter;
- while (stream.HasMore() && character_position++ < offset + length) {
- uint16_t character = stream.GetNext();
- utf8_bytes += unibrow::Utf8::Length(character, last);
- last = character;
- }
-
- if (length_return) {
- *length_return = utf8_bytes;
- }
-
- char* result = NewArray<char>(utf8_bytes + 1);
-
- // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- stream.Reset(*this, offset);
- character_position = offset;
- int utf8_byte_position = 0;
- last = unibrow::Utf16::kNoPreviousCharacter;
- while (stream.HasMore() && character_position++ < offset + length) {
- uint16_t character = stream.GetNext();
- if (allow_nulls == DISALLOW_NULLS && character == 0) {
- character = ' ';
- }
- utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
- last = character;
- }
- result[utf8_byte_position] = 0;
- return std::unique_ptr<char[]>(result);
-}
-
-std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int* length_return) {
- return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
-}
-
void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
@@ -11242,348 +4386,8 @@ void Relocatable::Iterate(RootVisitor* v, Relocatable* top) {
}
-FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
- : Relocatable(isolate),
- str_(str.location()),
- length_(str->length()) {
- PostGarbageCollection();
-}
-
-FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
- : Relocatable(isolate),
- str_(nullptr),
- is_one_byte_(true),
- length_(input.length()),
- start_(input.start()) {}
-
-void FlatStringReader::PostGarbageCollection() {
- if (str_ == nullptr) return;
- Handle<String> str(str_);
- DCHECK(str->IsFlat());
- DisallowHeapAllocation no_gc;
- // This does not actually prevent the vector from being relocated later.
- String::FlatContent content = str->GetFlatContent(no_gc);
- DCHECK(content.IsFlat());
- is_one_byte_ = content.IsOneByte();
- if (is_one_byte_) {
- start_ = content.ToOneByteVector().start();
- } else {
- start_ = content.ToUC16Vector().start();
- }
-}
-
-void ConsStringIterator::Initialize(ConsString cons_string, int offset) {
- DCHECK(!cons_string.is_null());
- root_ = cons_string;
- consumed_ = offset;
- // Force stack blown condition to trigger restart.
- depth_ = 1;
- maximum_depth_ = kStackSize + depth_;
- DCHECK(StackBlown());
-}
-
-String ConsStringIterator::Continue(int* offset_out) {
- DCHECK_NE(depth_, 0);
- DCHECK_EQ(0, *offset_out);
- bool blew_stack = StackBlown();
- String string;
- // Get the next leaf if there is one.
- if (!blew_stack) string = NextLeaf(&blew_stack);
- // Restart search from root.
- if (blew_stack) {
- DCHECK(string.is_null());
- string = Search(offset_out);
- }
- // Ensure future calls return null immediately.
- if (string.is_null()) Reset(ConsString());
- return string;
-}
-
-String ConsStringIterator::Search(int* offset_out) {
- ConsString cons_string = root_;
- // Reset the stack, pushing the root string.
- depth_ = 1;
- maximum_depth_ = 1;
- frames_[0] = cons_string;
- const int consumed = consumed_;
- int offset = 0;
- while (true) {
- // Loop until the string is found which contains the target offset.
- String string = cons_string->first();
- int length = string->length();
- int32_t type;
- if (consumed < offset + length) {
- // Target offset is in the left branch.
- // Keep going if we're still in a ConString.
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- cons_string = ConsString::cast(string);
- PushLeft(cons_string);
- continue;
- }
- // Tell the stack we're done descending.
- AdjustMaximumDepth();
- } else {
- // Descend right.
- // Update progress through the string.
- offset += length;
- // Keep going if we're still in a ConString.
- string = cons_string->second();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- cons_string = ConsString::cast(string);
- PushRight(cons_string);
- continue;
- }
- // Need this to be updated for the current string.
- length = string->length();
- // Account for the possibility of an empty right leaf.
- // This happens only if we have asked for an offset outside the string.
- if (length == 0) {
- // Reset so future operations will return null immediately.
- Reset(ConsString());
- return String();
- }
- // Tell the stack we're done descending.
- AdjustMaximumDepth();
- // Pop stack so next iteration is in correct place.
- Pop();
- }
- DCHECK_NE(length, 0);
- // Adjust return values and exit.
- consumed_ = offset + length;
- *offset_out = consumed - offset;
- return string;
- }
- UNREACHABLE();
-}
-
-String ConsStringIterator::NextLeaf(bool* blew_stack) {
- while (true) {
- // Tree traversal complete.
- if (depth_ == 0) {
- *blew_stack = false;
- return String();
- }
- // We've lost track of higher nodes.
- if (StackBlown()) {
- *blew_stack = true;
- return String();
- }
- // Go right.
- ConsString cons_string = frames_[OffsetForDepth(depth_ - 1)];
- String string = cons_string->second();
- int32_t type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- // Pop stack so next iteration is in correct place.
- Pop();
- int length = string->length();
- // Could be a flattened ConsString.
- if (length == 0) continue;
- consumed_ += length;
- return string;
- }
- cons_string = ConsString::cast(string);
- PushRight(cons_string);
- // Need to traverse all the way left.
- while (true) {
- // Continue left.
- string = cons_string->first();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- AdjustMaximumDepth();
- int length = string->length();
- if (length == 0) break; // Skip empty left-hand sides of ConsStrings.
- consumed_ += length;
- return string;
- }
- cons_string = ConsString::cast(string);
- PushLeft(cons_string);
- }
- }
- UNREACHABLE();
-}
-
-uint16_t ConsString::ConsStringGet(int index) {
- DCHECK(index >= 0 && index < this->length());
-
- // Check for a flattened cons string
- if (second()->length() == 0) {
- String left = first();
- return left->Get(index);
- }
-
- String string = String::cast(*this);
-
- while (true) {
- if (StringShape(string).IsCons()) {
- ConsString cons_string = ConsString::cast(string);
- String left = cons_string->first();
- if (left->length() > index) {
- string = left;
- } else {
- index -= left->length();
- string = cons_string->second();
- }
- } else {
- return string->Get(index);
- }
- }
-
- UNREACHABLE();
-}
-
-uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
-
-uint16_t SlicedString::SlicedStringGet(int index) {
- return parent()->Get(offset() + index);
-}
-template <typename sinkchar>
-void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
- DisallowHeapAllocation no_gc;
- String source = src;
- int from = f;
- int to = t;
- while (true) {
- DCHECK(0 <= from && from <= to && to <= source->length());
- switch (StringShape(source).full_representation_tag()) {
- case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kExternalStringTag: {
- const uc16* data =
- ExternalTwoByteString::cast(source)->GetChars();
- CopyChars(sink,
- data + from,
- to - from);
- return;
- }
- case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqOneByteString::cast(source)->GetChars(no_gc) + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqTwoByteString::cast(source)->GetChars(no_gc) + from,
- to - from);
- return;
- }
- case kOneByteStringTag | kConsStringTag:
- case kTwoByteStringTag | kConsStringTag: {
- ConsString cons_string = ConsString::cast(source);
- String first = cons_string->first();
- int boundary = first->length();
- if (to - boundary >= boundary - from) {
- // Right hand side is longer. Recurse over left.
- if (from < boundary) {
- WriteToFlat(first, sink, from, boundary);
- if (from == 0 && cons_string->second() == first) {
- CopyChars(sink + boundary, sink, boundary);
- return;
- }
- sink += boundary - from;
- from = 0;
- } else {
- from -= boundary;
- }
- to -= boundary;
- source = cons_string->second();
- } else {
- // Left hand side is longer. Recurse over right.
- if (to > boundary) {
- String second = cons_string->second();
- // When repeatedly appending to a string, we get a cons string that
- // is unbalanced to the left, a list, essentially. We inline the
- // common case of sequential one-byte right child.
- if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqOneByteString()) {
- CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(no_gc),
- to - boundary);
- } else {
- WriteToFlat(second,
- sink + boundary - from,
- 0,
- to - boundary);
- }
- to = boundary;
- }
- source = first;
- }
- break;
- }
- case kOneByteStringTag | kSlicedStringTag:
- case kTwoByteStringTag | kSlicedStringTag: {
- SlicedString slice = SlicedString::cast(source);
- unsigned offset = slice->offset();
- WriteToFlat(slice->parent(), sink, from + offset, to + offset);
- return;
- }
- case kOneByteStringTag | kThinStringTag:
- case kTwoByteStringTag | kThinStringTag:
- source = ThinString::cast(source)->actual();
- break;
- }
- }
-}
-template <typename SourceChar>
-static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
- Vector<const SourceChar> src,
- bool include_ending_line) {
- const int src_len = src.length();
- for (int i = 0; i < src_len - 1; i++) {
- SourceChar current = src[i];
- SourceChar next = src[i + 1];
- if (IsLineTerminatorSequence(current, next)) line_ends->push_back(i);
- }
-
- if (src_len > 0 && IsLineTerminatorSequence(src[src_len - 1], 0)) {
- line_ends->push_back(src_len - 1);
- }
- if (include_ending_line) {
- // Include one character beyond the end of script. The rewriter uses that
- // position for the implicit return statement.
- line_ends->push_back(src_len);
- }
-}
-
-Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
- Handle<String> src,
- bool include_ending_line) {
- src = Flatten(isolate, src);
- // Rough estimate of line count based on a roughly estimated average
- // length of (unpacked) code.
- int line_count_estimate = src->length() >> 4;
- std::vector<int> line_ends;
- line_ends.reserve(line_count_estimate);
- { DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
- // Dispatch on type of strings.
- String::FlatContent content = src->GetFlatContent(no_allocation);
- DCHECK(content.IsFlat());
- if (content.IsOneByte()) {
- CalculateLineEndsImpl(isolate,
- &line_ends,
- content.ToOneByteVector(),
- include_ending_line);
- } else {
- CalculateLineEndsImpl(isolate,
- &line_ends,
- content.ToUC16Vector(),
- include_ending_line);
- }
- }
- int line_count = static_cast<int>(line_ends.size());
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
- for (int i = 0; i < line_count; i++) {
- array->set(i, Smi::FromInt(line_ends[i]));
- }
- return array;
-}
namespace {
@@ -11695,820 +4499,8 @@ Address JSArray::ArrayJoinConcatToSequentialString(Isolate* isolate,
return dest->ptr();
}
-// Compares the contents of two strings by reading and comparing
-// int-sized blocks of characters.
-template <typename Char>
-static inline bool CompareRawStringContents(const Char* const a,
- const Char* const b,
- int length) {
- return CompareChars(a, b, length) == 0;
-}
-
-
-template<typename Chars1, typename Chars2>
-class RawStringComparator : public AllStatic {
- public:
- static inline bool compare(const Chars1* a, const Chars2* b, int len) {
- DCHECK(sizeof(Chars1) != sizeof(Chars2));
- for (int i = 0; i < len; i++) {
- if (a[i] != b[i]) {
- return false;
- }
- }
- return true;
- }
-};
-
-
-template<>
-class RawStringComparator<uint16_t, uint16_t> {
- public:
- static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
- return CompareRawStringContents(a, b, len);
- }
-};
-
-
-template<>
-class RawStringComparator<uint8_t, uint8_t> {
- public:
- static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
- return CompareRawStringContents(a, b, len);
- }
-};
-
-
-class StringComparator {
- class State {
- public:
- State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
-
- void Init(String string) {
- ConsString cons_string = String::VisitFlat(this, string);
- iter_.Reset(cons_string);
- if (!cons_string.is_null()) {
- int offset;
- string = iter_.Next(&offset);
- String::VisitFlat(this, string, offset);
- }
- }
-
- inline void VisitOneByteString(const uint8_t* chars, int length) {
- is_one_byte_ = true;
- buffer8_ = chars;
- length_ = length;
- }
-
- inline void VisitTwoByteString(const uint16_t* chars, int length) {
- is_one_byte_ = false;
- buffer16_ = chars;
- length_ = length;
- }
-
- void Advance(int consumed) {
- DCHECK(consumed <= length_);
- // Still in buffer.
- if (length_ != consumed) {
- if (is_one_byte_) {
- buffer8_ += consumed;
- } else {
- buffer16_ += consumed;
- }
- length_ -= consumed;
- return;
- }
- // Advance state.
- int offset;
- String next = iter_.Next(&offset);
- DCHECK_EQ(0, offset);
- DCHECK(!next.is_null());
- String::VisitFlat(this, next);
- }
-
- ConsStringIterator iter_;
- bool is_one_byte_;
- int length_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
-
- private:
- DISALLOW_COPY_AND_ASSIGN(State);
- };
-
- public:
- inline StringComparator() = default;
-
- template<typename Chars1, typename Chars2>
- static inline bool Equals(State* state_1, State* state_2, int to_check) {
- const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
- const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
- return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
- }
-
- bool Equals(String string_1, String string_2) {
- int length = string_1->length();
- state_1_.Init(string_1);
- state_2_.Init(string_2);
- while (true) {
- int to_check = Min(state_1_.length_, state_2_.length_);
- DCHECK(to_check > 0 && to_check <= length);
- bool is_equal;
- if (state_1_.is_one_byte_) {
- if (state_2_.is_one_byte_) {
- is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
- } else {
- is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
- }
- } else {
- if (state_2_.is_one_byte_) {
- is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
- } else {
- is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
- }
- }
- // Looping done.
- if (!is_equal) return false;
- length -= to_check;
- // Exit condition. Strings are equal.
- if (length == 0) return true;
- state_1_.Advance(to_check);
- state_2_.Advance(to_check);
- }
- }
-
- private:
- State state_1_;
- State state_2_;
-
- DISALLOW_COPY_AND_ASSIGN(StringComparator);
-};
-
-bool String::SlowEquals(String other) {
- DisallowHeapAllocation no_gc;
- // Fast check: negative check with lengths.
- int len = length();
- if (len != other->length()) return false;
- if (len == 0) return true;
-
- // Fast check: if at least one ThinString is involved, dereference it/them
- // and restart.
- if (this->IsThinString() || other->IsThinString()) {
- if (other->IsThinString()) other = ThinString::cast(other)->actual();
- if (this->IsThinString()) {
- return ThinString::cast(*this)->actual()->Equals(other);
- } else {
- return this->Equals(other);
- }
- }
-
- // Fast check: if hash code is computed for both strings
- // a fast negative check can be performed.
- if (HasHashCode() && other->HasHashCode()) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- if (Hash() != other->Hash()) {
- bool found_difference = false;
- for (int i = 0; i < len; i++) {
- if (Get(i) != other->Get(i)) {
- found_difference = true;
- break;
- }
- }
- DCHECK(found_difference);
- }
- }
-#endif
- if (Hash() != other->Hash()) return false;
- }
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != other->Get(0)) return false;
-
- if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
- const uint8_t* str1 = SeqOneByteString::cast(*this)->GetChars(no_gc);
- const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(no_gc);
- return CompareRawStringContents(str1, str2, len);
- }
-
- StringComparator comparator;
- return comparator.Equals(*this, other);
-}
-
-bool String::SlowEquals(Isolate* isolate, Handle<String> one,
- Handle<String> two) {
- // Fast check: negative check with lengths.
- int one_length = one->length();
- if (one_length != two->length()) return false;
- if (one_length == 0) return true;
-
- // Fast check: if at least one ThinString is involved, dereference it/them
- // and restart.
- if (one->IsThinString() || two->IsThinString()) {
- if (one->IsThinString())
- one = handle(ThinString::cast(*one)->actual(), isolate);
- if (two->IsThinString())
- two = handle(ThinString::cast(*two)->actual(), isolate);
- return String::Equals(isolate, one, two);
- }
-
- // Fast check: if hash code is computed for both strings
- // a fast negative check can be performed.
- if (one->HasHashCode() && two->HasHashCode()) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- if (one->Hash() != two->Hash()) {
- bool found_difference = false;
- for (int i = 0; i < one_length; i++) {
- if (one->Get(i) != two->Get(i)) {
- found_difference = true;
- break;
- }
- }
- DCHECK(found_difference);
- }
- }
-#endif
- if (one->Hash() != two->Hash()) return false;
- }
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (one->Get(0) != two->Get(0)) return false;
-
- one = String::Flatten(isolate, one);
- two = String::Flatten(isolate, two);
-
- DisallowHeapAllocation no_gc;
- String::FlatContent flat1 = one->GetFlatContent(no_gc);
- String::FlatContent flat2 = two->GetFlatContent(no_gc);
-
- if (flat1.IsOneByte() && flat2.IsOneByte()) {
- return CompareRawStringContents(flat1.ToOneByteVector().start(),
- flat2.ToOneByteVector().start(),
- one_length);
- } else {
- for (int i = 0; i < one_length; i++) {
- if (flat1.Get(i) != flat2.Get(i)) return false;
- }
- return true;
- }
-}
-
-
-// static
-ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
- Handle<String> y) {
- // A few fast case tests before we flatten.
- if (x.is_identical_to(y)) {
- return ComparisonResult::kEqual;
- } else if (y->length() == 0) {
- return x->length() == 0 ? ComparisonResult::kEqual
- : ComparisonResult::kGreaterThan;
- } else if (x->length() == 0) {
- return ComparisonResult::kLessThan;
- }
-
- int const d = x->Get(0) - y->Get(0);
- if (d < 0) {
- return ComparisonResult::kLessThan;
- } else if (d > 0) {
- return ComparisonResult::kGreaterThan;
- }
-
- // Slow case.
- x = String::Flatten(isolate, x);
- y = String::Flatten(isolate, y);
-
- DisallowHeapAllocation no_gc;
- ComparisonResult result = ComparisonResult::kEqual;
- int prefix_length = x->length();
- if (y->length() < prefix_length) {
- prefix_length = y->length();
- result = ComparisonResult::kGreaterThan;
- } else if (y->length() > prefix_length) {
- result = ComparisonResult::kLessThan;
- }
- int r;
- String::FlatContent x_content = x->GetFlatContent(no_gc);
- String::FlatContent y_content = y->GetFlatContent(no_gc);
- if (x_content.IsOneByte()) {
- Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
- if (y_content.IsOneByte()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- } else {
- Vector<const uc16> x_chars = x_content.ToUC16Vector();
- if (y_content.IsOneByte()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- }
- if (r < 0) {
- result = ComparisonResult::kLessThan;
- } else if (r > 0) {
- result = ComparisonResult::kGreaterThan;
- }
- return result;
-}
-
-Object String::IndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position) {
- if (receiver->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "String.prototype.indexOf")));
- }
- Handle<String> receiver_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
- Object::ToString(isolate, receiver));
-
- Handle<String> search_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
- Object::ToString(isolate, search));
-
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToInteger(isolate, position));
-
- uint32_t index = receiver_string->ToValidIndex(*position);
- return Smi::FromInt(
- String::IndexOf(isolate, receiver_string, search_string, index));
-}
-
-namespace {
-
-template <typename T>
-int SearchString(Isolate* isolate, String::FlatContent receiver_content,
- Vector<T> pat_vector, int start_index) {
- if (receiver_content.IsOneByte()) {
- return SearchString(isolate, receiver_content.ToOneByteVector(), pat_vector,
- start_index);
- }
- return SearchString(isolate, receiver_content.ToUC16Vector(), pat_vector,
- start_index);
-}
-
-} // namespace
-
-int String::IndexOf(Isolate* isolate, Handle<String> receiver,
- Handle<String> search, int start_index) {
- DCHECK_LE(0, start_index);
- DCHECK(start_index <= receiver->length());
-
- uint32_t search_length = search->length();
- if (search_length == 0) return start_index;
-
- uint32_t receiver_length = receiver->length();
- if (start_index + search_length > receiver_length) return -1;
-
- receiver = String::Flatten(isolate, receiver);
- search = String::Flatten(isolate, search);
-
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before getting encoding.
- String::FlatContent receiver_content = receiver->GetFlatContent(no_gc);
- String::FlatContent search_content = search->GetFlatContent(no_gc);
-
- // dispatch on type of strings
- if (search_content.IsOneByte()) {
- Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
- return SearchString<const uint8_t>(isolate, receiver_content, pat_vector,
- start_index);
- }
- Vector<const uc16> pat_vector = search_content.ToUC16Vector();
- return SearchString<const uc16>(isolate, receiver_content, pat_vector,
- start_index);
-}
-
-MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
- Handle<String> replacement,
- int start_index) {
- DCHECK_GE(start_index, 0);
-
- Factory* factory = isolate->factory();
-
- const int replacement_length = replacement->length();
- const int captures_length = match->CaptureCount();
-
- replacement = String::Flatten(isolate, replacement);
-
- Handle<String> dollar_string =
- factory->LookupSingleCharacterStringFromCode('$');
- int next_dollar_ix =
- String::IndexOf(isolate, replacement, dollar_string, start_index);
- if (next_dollar_ix < 0) {
- return replacement;
- }
-
- IncrementalStringBuilder builder(isolate);
-
- if (next_dollar_ix > 0) {
- builder.AppendString(factory->NewSubString(replacement, 0, next_dollar_ix));
- }
-
- while (true) {
- const int peek_ix = next_dollar_ix + 1;
- if (peek_ix >= replacement_length) {
- builder.AppendCharacter('$');
- return builder.Finish();
- }
-
- int continue_from_ix = -1;
- const uint16_t peek = replacement->Get(peek_ix);
- switch (peek) {
- case '$': // $$
- builder.AppendCharacter('$');
- continue_from_ix = peek_ix + 1;
- break;
- case '&': // $& - match
- builder.AppendString(match->GetMatch());
- continue_from_ix = peek_ix + 1;
- break;
- case '`': // $` - prefix
- builder.AppendString(match->GetPrefix());
- continue_from_ix = peek_ix + 1;
- break;
- case '\'': // $' - suffix
- builder.AppendString(match->GetSuffix());
- continue_from_ix = peek_ix + 1;
- break;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
- int scaled_index = (peek - '0');
- int advance = 1;
-
- if (peek_ix + 1 < replacement_length) {
- const uint16_t next_peek = replacement->Get(peek_ix + 1);
- if (next_peek >= '0' && next_peek <= '9') {
- const int new_scaled_index = scaled_index * 10 + (next_peek - '0');
- if (new_scaled_index < captures_length) {
- scaled_index = new_scaled_index;
- advance = 2;
- }
- }
- }
-
- if (scaled_index == 0 || scaled_index >= captures_length) {
- builder.AppendCharacter('$');
- continue_from_ix = peek_ix;
- break;
- }
-
- bool capture_exists;
- Handle<String> capture;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, capture, match->GetCapture(scaled_index, &capture_exists),
- String);
- if (capture_exists) builder.AppendString(capture);
- continue_from_ix = peek_ix + advance;
- break;
- }
- case '<': { // $<name> - named capture
- typedef String::Match::CaptureState CaptureState;
-
- if (!match->HasNamedCaptures()) {
- builder.AppendCharacter('$');
- continue_from_ix = peek_ix;
- break;
- }
-
- Handle<String> bracket_string =
- factory->LookupSingleCharacterStringFromCode('>');
- const int closing_bracket_ix =
- String::IndexOf(isolate, replacement, bracket_string, peek_ix + 1);
-
- if (closing_bracket_ix == -1) {
- // No closing bracket was found, treat '$<' as a string literal.
- builder.AppendCharacter('$');
- continue_from_ix = peek_ix;
- break;
- }
-
- Handle<String> capture_name =
- factory->NewSubString(replacement, peek_ix + 1, closing_bracket_ix);
- Handle<String> capture;
- CaptureState capture_state;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, capture,
- match->GetNamedCapture(capture_name, &capture_state), String);
-
- switch (capture_state) {
- case CaptureState::INVALID:
- case CaptureState::UNMATCHED:
- break;
- case CaptureState::MATCHED:
- builder.AppendString(capture);
- break;
- }
-
- continue_from_ix = closing_bracket_ix + 1;
- break;
- }
- default:
- builder.AppendCharacter('$');
- continue_from_ix = peek_ix;
- break;
- }
-
- // Go the the next $ in the replacement.
- // TODO(jgruber): Single-char lookups could be much more efficient.
- DCHECK_NE(continue_from_ix, -1);
- next_dollar_ix =
- String::IndexOf(isolate, replacement, dollar_string, continue_from_ix);
-
- // Return if there are no more $ characters in the replacement. If we
- // haven't reached the end, we need to append the suffix.
- if (next_dollar_ix < 0) {
- if (continue_from_ix < replacement_length) {
- builder.AppendString(factory->NewSubString(
- replacement, continue_from_ix, replacement_length));
- }
- return builder.Finish();
- }
-
- // Append substring between the previous and the next $ character.
- if (next_dollar_ix > continue_from_ix) {
- builder.AppendString(
- factory->NewSubString(replacement, continue_from_ix, next_dollar_ix));
- }
- }
-
- UNREACHABLE();
-}
-
-namespace { // for String.Prototype.lastIndexOf
-
-template <typename schar, typename pchar>
-int StringMatchBackwards(Vector<const schar> subject,
- Vector<const pchar> pattern, int idx) {
- int pattern_length = pattern.length();
- DCHECK_GE(pattern_length, 1);
- DCHECK(idx + pattern_length <= subject.length());
-
- if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
- for (int i = 0; i < pattern_length; i++) {
- uc16 c = pattern[i];
- if (c > String::kMaxOneByteCharCode) {
- return -1;
- }
- }
- }
-
- pchar pattern_first_char = pattern[0];
- for (int i = idx; i >= 0; i--) {
- if (subject[i] != pattern_first_char) continue;
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i + j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
- }
- }
- return -1;
-}
-
-} // namespace
-
-Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position) {
- if (receiver->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "String.prototype.lastIndexOf")));
- }
- Handle<String> receiver_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
- Object::ToString(isolate, receiver));
-
- Handle<String> search_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
- Object::ToString(isolate, search));
-
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToNumber(isolate, position));
-
- uint32_t start_index;
-
- if (position->IsNaN()) {
- start_index = receiver_string->length();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToInteger(isolate, position));
- start_index = receiver_string->ToValidIndex(*position);
- }
-
- uint32_t pattern_length = search_string->length();
- uint32_t receiver_length = receiver_string->length();
-
- if (start_index + pattern_length > receiver_length) {
- start_index = receiver_length - pattern_length;
- }
-
- if (pattern_length == 0) {
- return Smi::FromInt(start_index);
- }
-
- receiver_string = String::Flatten(isolate, receiver_string);
- search_string = String::Flatten(isolate, search_string);
-
- int last_index = -1;
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
-
- String::FlatContent receiver_content = receiver_string->GetFlatContent(no_gc);
- String::FlatContent search_content = search_string->GetFlatContent(no_gc);
-
- if (search_content.IsOneByte()) {
- Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
- if (receiver_content.IsOneByte()) {
- last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
- pat_vector, start_index);
- } else {
- last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
- pat_vector, start_index);
- }
- } else {
- Vector<const uc16> pat_vector = search_content.ToUC16Vector();
- if (receiver_content.IsOneByte()) {
- last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
- pat_vector, start_index);
- } else {
- last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
- pat_vector, start_index);
- }
- }
- return Smi::FromInt(last_index);
-}
-
-bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
- int slen = length();
- // Can't check exact length equality, but we can check bounds.
- int str_len = str.length();
- if (!allow_prefix_match &&
- (str_len < slen ||
- str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
- return false;
- }
- int i = 0;
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
- while (i < slen && !it.Done()) {
- if (Get(i++) != *it) return false;
- ++it;
- }
- return (allow_prefix_match || i == slen) && it.Done();
-}
-
-template <>
-bool String::IsEqualTo(Vector<const uint8_t> str) {
- return IsOneByteEqualTo(str);
-}
-
-template <>
-bool String::IsEqualTo(Vector<const uc16> str) {
- return IsTwoByteEqualTo(str);
-}
-
-bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
- int slen = length();
- if (str.length() != slen) return false;
- DisallowHeapAllocation no_gc;
- FlatContent content = GetFlatContent(no_gc);
- if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(),
- str.start(), slen) == 0;
- }
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
-}
-
-
-bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
- int slen = length();
- if (str.length() != slen) return false;
- DisallowHeapAllocation no_gc;
- FlatContent content = GetFlatContent(no_gc);
- if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
- 0;
- }
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
-}
-
-uint32_t String::ComputeAndSetHash(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- // Should only be called if hash code has not yet been computed.
- DCHECK(!HasHashCode());
-
- // Store the hash code in the object.
- uint32_t field =
- IteratingStringHasher::Hash(*this, isolate->heap()->HashSeed());
- set_hash_field(field);
-
- // Check the hash code is there.
- DCHECK(HasHashCode());
- uint32_t result = field >> kHashShift;
- DCHECK_NE(result, 0); // Ensure that the hash value of 0 is never computed.
- return result;
-}
-
-
-bool String::ComputeArrayIndex(uint32_t* index) {
- int length = this->length();
- if (length == 0 || length > kMaxArrayIndexSize) return false;
- StringCharacterStream stream(*this);
- return StringToArrayIndex(&stream, index);
-}
-
-
-bool String::SlowAsArrayIndex(uint32_t* index) {
- DisallowHeapAllocation no_gc;
- if (length() <= kMaxCachedArrayIndexLength) {
- Hash(); // force computation of hash code
- uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) return false;
- // Isolate the array index form the full hash field.
- *index = ArrayIndexValueBits::decode(field);
- return true;
- } else {
- return ComputeArrayIndex(index);
- }
-}
-
-
-Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
- if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
-
- int new_size, old_size;
- int old_length = string->length();
- if (old_length <= new_length) return string;
-
- if (string->IsSeqOneByteString()) {
- old_size = SeqOneByteString::SizeFor(old_length);
- new_size = SeqOneByteString::SizeFor(new_length);
- } else {
- DCHECK(string->IsSeqTwoByteString());
- old_size = SeqTwoByteString::SizeFor(old_length);
- new_size = SeqTwoByteString::SizeFor(new_length);
- }
-
- int delta = old_size - new_size;
-
- Address start_of_string = string->address();
- DCHECK_OBJECT_ALIGNED(start_of_string);
- DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
-
- Heap* heap = Heap::FromWritableHeapObject(*string);
- // Sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- heap->CreateFillerObjectAt(start_of_string + new_size, delta,
- ClearRecordedSlots::kNo);
- // We are storing the new length using release store after creating a filler
- // for the left-over space to avoid races with the sweeper thread.
- string->synchronized_set_length(new_length);
-
- return string;
-}
-
-void SeqOneByteString::clear_padding() {
- int data_size = SeqString::kHeaderSize + length() * kOneByteSize;
- memset(reinterpret_cast<void*>(address() + data_size), 0,
- SizeFor(length()) - data_size);
-}
-
-void SeqTwoByteString::clear_padding() {
- int data_size = SeqString::kHeaderSize + length() * kUC16Size;
- memset(reinterpret_cast<void*>(address() + data_size), 0,
- SizeFor(length()) - data_size);
-}
-
-int ExternalString::ExternalPayloadSize() const {
- int length_multiplier = IsTwoByteRepresentation() ? i::kShortSize : kCharSize;
- return length() * length_multiplier;
-}
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
@@ -12596,7 +4588,7 @@ void IteratingStringHasher::VisitConsString(ConsString cons_string) {
// Slow case.
const int max_length = String::kMaxHashCalcLength;
int length = std::min(cons_string->length(), max_length);
- if (cons_string->HasOnlyOneByteChars()) {
+ if (cons_string->IsOneByteRepresentation()) {
uint8_t* buffer = new uint8_t[length];
String::WriteToFlat(cons_string, buffer, 0, length);
AddCharacters(buffer, length);
@@ -12609,551 +4601,6 @@ void IteratingStringHasher::VisitConsString(ConsString cons_string) {
}
}
-void String::PrintOn(FILE* file) {
- int length = this->length();
- for (int i = 0; i < length; i++) {
- PrintF(file, "%c", Get(i));
- }
-}
-
-
-int Map::Hash() {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2. For predictability reasons we
- // use objects' offsets in respective pages for hashing instead of raw
- // addresses.
-
- // Shift away the tag.
- int hash = ObjectAddressForHashing(GetConstructor().ptr()) >> 2;
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype bits relatively to the constructor.
- hash ^= ObjectAddressForHashing(prototype().ptr()) << (32 - kPageSizeBits);
-
- return hash ^ (hash >> 16) ^ bit_field2();
-}
-
-
-namespace {
-
-bool CheckEquivalent(const Map first, const Map second) {
- return first->GetConstructor() == second->GetConstructor() &&
- first->prototype() == second->prototype() &&
- first->instance_type() == second->instance_type() &&
- first->bit_field() == second->bit_field() &&
- first->is_extensible() == second->is_extensible() &&
- first->new_target_is_base() == second->new_target_is_base() &&
- first->has_hidden_prototype() == second->has_hidden_prototype();
-}
-
-} // namespace
-
-bool Map::EquivalentToForTransition(const Map other) const {
- if (!CheckEquivalent(*this, other)) return false;
- if (instance_type() == JS_FUNCTION_TYPE) {
- // JSFunctions require more checks to ensure that sloppy function is
- // not equivalent to strict function.
- int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
- return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
- nof);
- }
- return true;
-}
-
-bool Map::EquivalentToForElementsKindTransition(const Map other) const {
- if (!EquivalentToForTransition(other)) return false;
-#ifdef DEBUG
- // Ensure that we don't try to generate elements kind transitions from maps
- // with fields that may be generalized in-place. This must already be handled
- // during addition of a new field.
- DescriptorArray descriptors = instance_descriptors();
- int nof = NumberOfOwnDescriptors();
- for (int i = 0; i < nof; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() == kField) {
- DCHECK(!IsInplaceGeneralizableField(details.constness(),
- details.representation(),
- descriptors->GetFieldType(i)));
- }
- }
-#endif
- return true;
-}
-
-bool Map::EquivalentToForNormalization(const Map other,
- PropertyNormalizationMode mode) const {
- int properties =
- mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
- return CheckEquivalent(*this, other) && bit_field2() == other->bit_field2() &&
- GetInObjectProperties() == properties &&
- JSObject::GetEmbedderFieldCount(*this) ==
- JSObject::GetEmbedderFieldCount(other);
-}
-
-
-void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
- Isolate* isolate = GetIsolate();
- if (!isolate->concurrent_recompilation_enabled() ||
- isolate->bootstrapper()->IsActive()) {
- mode = ConcurrencyMode::kNotConcurrent;
- }
-
- DCHECK(!is_compiled() || IsInterpreted());
- DCHECK(shared()->IsInterpreted());
- DCHECK(!IsOptimized());
- DCHECK(!HasOptimizedCode());
- DCHECK(shared()->allows_lazy_compilation() ||
- !shared()->optimization_disabled());
-
- if (mode == ConcurrencyMode::kConcurrent) {
- if (IsInOptimizationQueue()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Not marking ");
- ShortPrint();
- PrintF(" -- already in optimization queue.\n");
- }
- return;
- }
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Marking ");
- ShortPrint();
- PrintF(" for concurrent recompilation.\n");
- }
- }
-
- SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
- ? OptimizationMarker::kCompileOptimizedConcurrent
- : OptimizationMarker::kCompileOptimized);
-}
-
-// static
-void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
- Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared()->is_compiled());
- DCHECK(FLAG_lite_mode || function->shared()->HasFeedbackMetadata());
- if (!function->has_feedback_vector() &&
- function->shared()->HasFeedbackMetadata()) {
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (!shared->HasAsmWasmData()) {
- DCHECK(function->shared()->HasBytecodeArray());
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, shared);
- if (function->raw_feedback_cell() ==
- isolate->heap()->many_closures_cell()) {
- Handle<FeedbackCell> feedback_cell =
- isolate->factory()->NewOneClosureCell(feedback_vector);
- function->set_raw_feedback_cell(*feedback_cell);
- } else {
- function->raw_feedback_cell()->set_value(*feedback_vector);
- }
- }
- }
-}
-
-static void GetMinInobjectSlack(Map map, void* data) {
- int slack = map->UnusedPropertyFields();
- if (*reinterpret_cast<int*>(data) > slack) {
- *reinterpret_cast<int*>(data) = slack;
- }
-}
-
-int Map::InstanceSizeFromSlack(int slack) const {
- return instance_size() - slack * kTaggedSize;
-}
-
-static void ShrinkInstanceSize(Map map, void* data) {
- int slack = *reinterpret_cast<int*>(data);
- DCHECK_GE(slack, 0);
-#ifdef DEBUG
- int old_visitor_id = Map::GetVisitorId(map);
- int new_unused = map->UnusedPropertyFields() - slack;
-#endif
- map->set_instance_size(map->InstanceSizeFromSlack(slack));
- map->set_construction_counter(Map::kNoSlackTracking);
- DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
- DCHECK_EQ(new_unused, map->UnusedPropertyFields());
-}
-
-static void StopSlackTracking(Map map, void* data) {
- map->set_construction_counter(Map::kNoSlackTracking);
-}
-
-int Map::ComputeMinObjectSlack(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- // Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
-
- int slack = UnusedPropertyFields();
- TransitionsAccessor transitions(isolate, *this, &no_gc);
- transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
- return slack;
-}
-
-void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- // Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
-
- int slack = ComputeMinObjectSlack(isolate);
- TransitionsAccessor transitions(isolate, *this, &no_gc);
- if (slack != 0) {
- // Resize the initial map and all maps in its transition tree.
- transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
- } else {
- transitions.TraverseTransitionTree(&StopSlackTracking, nullptr);
- }
-}
-
-void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
- int number_of_own_descriptors) {
- set_synchronized_instance_descriptors(descriptors);
- SetNumberOfOwnDescriptors(number_of_own_descriptors);
- MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
- number_of_own_descriptors);
-}
-
-static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
- DisallowHeapAllocation no_gc;
- if (!object->HasFastProperties()) return false;
- if (object->IsJSGlobalProxy()) return false;
- if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
- return !object->map()->is_prototype_map() ||
- !object->map()->should_be_fast_prototype_map();
-}
-
-// static
-void JSObject::MakePrototypesFast(Handle<Object> receiver,
- WhereToStart where_to_start,
- Isolate* isolate) {
- if (!receiver->IsJSReceiver()) return;
- for (PrototypeIterator iter(isolate, Handle<JSReceiver>::cast(receiver),
- where_to_start);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- if (!current->IsJSObject()) return;
- Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
- Map current_map = current_obj->map();
- if (current_map->is_prototype_map()) {
- // If the map is already marked as should be fast, we're done. Its
- // prototypes will have been marked already as well.
- if (current_map->should_be_fast_prototype_map()) return;
- Handle<Map> map(current_map, isolate);
- Map::SetShouldBeFastPrototypeMap(map, true, isolate);
- JSObject::OptimizeAsPrototype(current_obj);
- }
- }
-}
-
-// static
-void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
- bool enable_setup_mode) {
- if (object->IsJSGlobalObject()) return;
- if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
- // First normalize to ensure all JSFunctions are DATA_CONSTANT.
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
- "NormalizeAsPrototype");
- }
- if (object->map()->is_prototype_map()) {
- if (object->map()->should_be_fast_prototype_map() &&
- !object->HasFastProperties()) {
- JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
- }
- } else {
- Handle<Map> new_map = Map::Copy(object->GetIsolate(),
- handle(object->map(), object->GetIsolate()),
- "CopyAsPrototype");
- JSObject::MigrateToMap(object, new_map);
- object->map()->set_is_prototype_map(true);
-
- // Replace the pointer to the exact constructor with the Object function
- // from the same context if undetectable from JS. This is to avoid keeping
- // memory alive unnecessarily.
- Object maybe_constructor = object->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (!constructor->shared()->IsApiFunction()) {
- Context context = constructor->context()->native_context();
- JSFunction object_function = context->object_function();
- object->map()->SetConstructor(object_function);
- }
- }
- }
-}
-
-
-// static
-void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
- if (!object->map()->is_prototype_map()) return;
- if (!object->map()->should_be_fast_prototype_map()) return;
- OptimizeAsPrototype(object);
-}
-
-
-// static
-void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
- // Contract: In line with InvalidatePrototypeChains()'s requirements,
- // leaf maps don't need to register as users, only prototypes do.
- DCHECK(user->is_prototype_map());
-
- Handle<Map> current_user = user;
- Handle<PrototypeInfo> current_user_info =
- Map::GetOrCreatePrototypeInfo(user, isolate);
- for (PrototypeIterator iter(isolate, user); !iter.IsAtEnd(); iter.Advance()) {
- // Walk up the prototype chain as far as links haven't been registered yet.
- if (current_user_info->registry_slot() != PrototypeInfo::UNREGISTERED) {
- break;
- }
- Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
- // Proxies on the prototype chain are not supported. They make it
- // impossible to make any assumptions about the prototype chain anyway.
- if (maybe_proto->IsJSProxy()) return;
- Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
- Handle<PrototypeInfo> proto_info =
- Map::GetOrCreatePrototypeInfo(proto, isolate);
- Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
- Handle<WeakArrayList> registry =
- maybe_registry->IsSmi()
- ? handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(),
- isolate)
- : Handle<WeakArrayList>::cast(maybe_registry);
- int slot = 0;
- Handle<WeakArrayList> new_array =
- PrototypeUsers::Add(isolate, registry, current_user, &slot);
- current_user_info->set_registry_slot(slot);
- if (!maybe_registry.is_identical_to(new_array)) {
- proto_info->set_prototype_users(*new_array);
- }
- if (FLAG_trace_prototype_users) {
- PrintF("Registering %p as a user of prototype %p (map=%p).\n",
- reinterpret_cast<void*>(current_user->ptr()),
- reinterpret_cast<void*>(proto->ptr()),
- reinterpret_cast<void*>(proto->map()->ptr()));
- }
-
- current_user = handle(proto->map(), isolate);
- current_user_info = proto_info;
- }
-}
-
-
-// Can be called regardless of whether |user| was actually registered with
-// |prototype|. Returns true when there was a registration.
-// static
-bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
- DCHECK(user->is_prototype_map());
- // If it doesn't have a PrototypeInfo, it was never registered.
- if (!user->prototype_info()->IsPrototypeInfo()) return false;
- // If it had no prototype before, see if it had users that might expect
- // registration.
- if (!user->prototype()->IsJSObject()) {
- Object users =
- PrototypeInfo::cast(user->prototype_info())->prototype_users();
- return users->IsWeakArrayList();
- }
- Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
- Handle<PrototypeInfo> user_info =
- Map::GetOrCreatePrototypeInfo(user, isolate);
- int slot = user_info->registry_slot();
- if (slot == PrototypeInfo::UNREGISTERED) return false;
- DCHECK(prototype->map()->is_prototype_map());
- Object maybe_proto_info = prototype->map()->prototype_info();
- // User knows its registry slot, prototype info and user registry must exist.
- DCHECK(maybe_proto_info->IsPrototypeInfo());
- Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
- isolate);
- Handle<WeakArrayList> prototype_users(
- WeakArrayList::cast(proto_info->prototype_users()), isolate);
- DCHECK_EQ(prototype_users->Get(slot), HeapObjectReference::Weak(*user));
- PrototypeUsers::MarkSlotEmpty(*prototype_users, slot);
- if (FLAG_trace_prototype_users) {
- PrintF("Unregistering %p as a user of prototype %p.\n",
- reinterpret_cast<void*>(user->ptr()),
- reinterpret_cast<void*>(prototype->ptr()));
- }
- return true;
-}
-
-namespace {
-
-// This function must be kept in sync with
-// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
-// before jumping here.
-void InvalidateOnePrototypeValidityCellInternal(Map map) {
- DCHECK(map->is_prototype_map());
- if (FLAG_trace_prototype_users) {
- PrintF("Invalidating prototype map %p 's cell\n",
- reinterpret_cast<void*>(map.ptr()));
- }
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
- // Just set the value; the cell will be replaced lazily.
- Cell cell = Cell::cast(maybe_cell);
- cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
- }
-}
-
-void InvalidatePrototypeChainsInternal(Map map) {
- InvalidateOnePrototypeValidityCellInternal(map);
-
- Object maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return;
- PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
- if (!proto_info->prototype_users()->IsWeakArrayList()) {
- return;
- }
- WeakArrayList prototype_users =
- WeakArrayList::cast(proto_info->prototype_users());
- // For now, only maps register themselves as users.
- for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
- ++i) {
- HeapObject heap_object;
- if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- heap_object->IsMap()) {
- // Walk the prototype chain (backwards, towards leaf objects) if
- // necessary.
- InvalidatePrototypeChainsInternal(Map::cast(heap_object));
- }
- }
-}
-
-} // namespace
-
-// static
-Map JSObject::InvalidatePrototypeChains(Map map) {
- DisallowHeapAllocation no_gc;
- InvalidatePrototypeChainsInternal(map);
- return map;
-}
-
-// We also invalidate global objects validity cell when a new lexical
-// environment variable is added. This is necessary to ensure that
-// Load/StoreGlobalIC handlers that load/store from global object's prototype
-// get properly invalidated.
-// Note, that the normal Load/StoreICs that load/store through the global object
-// in the prototype chain are not affected by appearance of a new lexical
-// variable and therefore we don't propagate invalidation down.
-// static
-void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
- DisallowHeapAllocation no_gc;
- InvalidateOnePrototypeValidityCellInternal(global->map());
-}
-
-// static
-Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
- Isolate* isolate) {
- Object maybe_proto_info = prototype->map()->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
- return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
- }
- Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
- prototype->map()->set_prototype_info(*proto_info);
- return proto_info;
-}
-
-
-// static
-Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
- Isolate* isolate) {
- Object maybe_proto_info = prototype_map->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
- return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
- }
- Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
- prototype_map->set_prototype_info(*proto_info);
- return proto_info;
-}
-
-// static
-void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
- Isolate* isolate) {
- if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
- // "False" is the implicit default value, so there's nothing to do.
- return;
- }
- GetOrCreatePrototypeInfo(map, isolate)->set_should_be_fast_map(value);
-}
-
-// static
-Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
- Isolate* isolate) {
- Handle<Object> maybe_prototype;
- if (map->IsJSGlobalObjectMap()) {
- DCHECK(map->is_prototype_map());
- // Global object is prototype of a global proxy and therefore we can
- // use its validity cell for guarding global object's prototype change.
- maybe_prototype = isolate->global_object();
- } else {
- maybe_prototype =
- handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
- }
- if (!maybe_prototype->IsJSObject()) {
- return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
- }
- Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
- // Ensure the prototype is registered with its own prototypes so its cell
- // will be invalidated when necessary.
- JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
- isolate);
-
- Object maybe_cell = prototype->map()->prototype_validity_cell();
- // Return existing cell if it's still valid.
- if (maybe_cell->IsCell()) {
- Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
- if (cell->value() == Smi::FromInt(Map::kPrototypeChainValid)) {
- return cell;
- }
- }
- // Otherwise create a new cell.
- Handle<Cell> cell = isolate->factory()->NewCell(
- handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
- prototype->map()->set_prototype_validity_cell(*cell);
- return cell;
-}
-
-// static
-bool Map::IsPrototypeChainInvalidated(Map map) {
- DCHECK(map->is_prototype_map());
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
- Cell cell = Cell::cast(maybe_cell);
- return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
- }
- return true;
-}
-
-// static
-void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
- Handle<Object> prototype,
- bool enable_prototype_setup_mode) {
- RuntimeCallTimerScope stats_scope(isolate, *map,
- RuntimeCallCounterId::kMap_SetPrototype);
-
- bool is_hidden = false;
- if (prototype->IsJSObject()) {
- Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
- JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
-
- Object maybe_constructor = prototype_jsobj->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- Object data = constructor->shared()->function_data();
- is_hidden = (data->IsFunctionTemplateInfo() &&
- FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
- prototype->IsJSGlobalObject();
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
- is_hidden =
- FunctionTemplateInfo::cast(maybe_constructor)->hidden_prototype() ||
- prototype->IsJSGlobalObject();
- }
- }
- map->set_has_hidden_prototype(is_hidden);
-
- WriteBarrierMode wb_mode =
- prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
- map->set_prototype(*prototype, wb_mode);
-}
-
Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
@@ -13181,513 +4628,6 @@ Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
return initial_map;
}
-namespace {
-
-void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
- Handle<JSReceiver> value) {
- // Now some logic for the maps of the objects that are created by using this
- // function as a constructor.
- if (function->has_initial_map()) {
- // If the function has allocated the initial map replace it with a
- // copy containing the new prototype. Also complete any in-object
- // slack tracking that is in progress at this point because it is
- // still tracking the old copy.
- function->CompleteInobjectSlackTrackingIfActive();
-
- Handle<Map> initial_map(function->initial_map(), isolate);
-
- if (!isolate->bootstrapper()->IsActive() &&
- initial_map->instance_type() == JS_OBJECT_TYPE) {
- // Put the value in the initial map field until an initial map is needed.
- // At that point, a new initial map is created and the prototype is put
- // into the initial map where it belongs.
- function->set_prototype_or_initial_map(*value);
- } else {
- Handle<Map> new_map =
- Map::Copy(isolate, initial_map, "SetInstancePrototype");
- JSFunction::SetInitialMap(function, new_map, value);
-
- // If the function is used as the global Array function, cache the
- // updated initial maps (and transitioned versions) in the native context.
- Handle<Context> native_context(function->context()->native_context(),
- isolate);
- Handle<Object> array_function(
- native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
- if (array_function->IsJSFunction() &&
- *function == JSFunction::cast(*array_function)) {
- CacheInitialJSArrayMaps(native_context, new_map);
- }
- }
-
- // Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kInitialMapChangedGroup);
- } else {
- // Put the value in the initial map field until an initial map is
- // needed. At that point, a new initial map is created and the
- // prototype is put into the initial map where it belongs.
- function->set_prototype_or_initial_map(*value);
- if (value->IsJSObject()) {
- // Optimize as prototype to detach it from its transition tree.
- JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
- }
- }
-}
-
-} // anonymous namespace
-
-void JSFunction::SetPrototype(Handle<JSFunction> function,
- Handle<Object> value) {
- DCHECK(function->IsConstructor() ||
- IsGeneratorFunction(function->shared()->kind()));
- Isolate* isolate = function->GetIsolate();
- Handle<JSReceiver> construct_prototype;
-
- // If the value is not a JSReceiver, store the value in the map's
- // constructor field so it can be accessed. Also, set the prototype
- // used for constructing objects to the original object prototype.
- // See ECMA-262 13.2.2.
- if (!value->IsJSReceiver()) {
- // Copy the map so this does not affect unrelated functions.
- // Remove map transitions because they point to maps with a
- // different prototype.
- Handle<Map> new_map =
- Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
-
- JSObject::MigrateToMap(function, new_map);
- new_map->SetConstructor(*value);
- new_map->set_has_non_instance_prototype(true);
-
- FunctionKind kind = function->shared()->kind();
- Handle<Context> native_context(function->context()->native_context(),
- isolate);
-
- construct_prototype = Handle<JSReceiver>(
- IsGeneratorFunction(kind)
- ? IsAsyncFunction(kind)
- ? native_context->initial_async_generator_prototype()
- : native_context->initial_generator_prototype()
- : native_context->initial_object_prototype(),
- isolate);
- } else {
- construct_prototype = Handle<JSReceiver>::cast(value);
- function->map()->set_has_non_instance_prototype(false);
- }
-
- SetInstancePrototype(isolate, function, construct_prototype);
-}
-
-void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
- Handle<Object> prototype) {
- if (map->prototype() != *prototype)
- Map::SetPrototype(function->GetIsolate(), map, prototype);
- function->set_prototype_or_initial_map(*map);
- map->SetConstructor(*function);
- if (FLAG_trace_maps) {
- LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
- function->shared()->DebugName()));
- }
-}
-
-
-#ifdef DEBUG
-namespace {
-
-bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
- switch (instance_type) {
- case JS_API_OBJECT_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_ARRAY_TYPE:
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_DATE_TYPE:
- case JS_FUNCTION_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
-#ifdef V8_INTL_SUPPORT
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
-#endif
- case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MAP_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
- case JS_PROMISE_TYPE:
- case JS_REGEXP_TYPE:
- case JS_SET_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_VALUE_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
- return true;
-
- case BIGINT_TYPE:
- case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- case BYTECODE_ARRAY_TYPE:
- case BYTE_ARRAY_TYPE:
- case CELL_TYPE:
- case CODE_TYPE:
- case FILLER_TYPE:
- case FIXED_ARRAY_TYPE:
- case SCRIPT_CONTEXT_TABLE_TYPE:
- case FIXED_DOUBLE_ARRAY_TYPE:
- case FEEDBACK_METADATA_TYPE:
- case FOREIGN_TYPE:
- case FREE_SPACE_TYPE:
- case HASH_TABLE_TYPE:
- case ORDERED_HASH_MAP_TYPE:
- case ORDERED_HASH_SET_TYPE:
- case ORDERED_NAME_DICTIONARY_TYPE:
- case NAME_DICTIONARY_TYPE:
- case GLOBAL_DICTIONARY_TYPE:
- case NUMBER_DICTIONARY_TYPE:
- case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case STRING_TABLE_TYPE:
- case HEAP_NUMBER_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_PROXY_TYPE:
- case MAP_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- case ODDBALL_TYPE:
- case PROPERTY_CELL_TYPE:
- case SHARED_FUNCTION_INFO_TYPE:
- case SYMBOL_TYPE:
- case ALLOCATION_SITE_TYPE:
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE:
-#undef TYPED_ARRAY_CASE
-
-#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- // We must not end up here for these instance types at all.
- UNREACHABLE();
- // Fall through.
- default:
- return false;
- }
-}
-
-} // namespace
-#endif
-
-
-void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
- DCHECK(function->has_prototype_slot());
- DCHECK(function->IsConstructor() ||
- IsResumableFunction(function->shared()->kind()));
- if (function->has_initial_map()) return;
- Isolate* isolate = function->GetIsolate();
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- InstanceType instance_type;
- if (IsResumableFunction(function->shared()->kind())) {
- instance_type = IsAsyncGeneratorFunction(function->shared()->kind())
- ? JS_ASYNC_GENERATOR_OBJECT_TYPE
- : JS_GENERATOR_OBJECT_TYPE;
- } else {
- instance_type = JS_OBJECT_TYPE;
- }
-
- // The constructor should be compiled for the optimization hints to be
- // available.
- int expected_nof_properties = 0;
- IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
- if (is_compiled_scope.is_compiled() ||
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
- DCHECK(function->shared()->is_compiled());
- expected_nof_properties = function->shared()->expected_nof_properties();
- }
-
- int instance_size;
- int inobject_properties;
- CalculateInstanceSizeHelper(instance_type, false, 0, expected_nof_properties,
- &instance_size, &inobject_properties);
-
- Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size,
- TERMINAL_FAST_ELEMENTS_KIND,
- inobject_properties);
-
- // Fetch or allocate prototype.
- Handle<Object> prototype;
- if (function->has_instance_prototype()) {
- prototype = handle(function->instance_prototype(), isolate);
- } else {
- prototype = isolate->factory()->NewFunctionPrototype(function);
- }
- DCHECK(map->has_fast_object_elements());
-
- // Finally link initial map and constructor function.
- DCHECK(prototype->IsJSReceiver());
- JSFunction::SetInitialMap(function, map, prototype);
- map->StartInobjectSlackTracking();
-}
-
-namespace {
-bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
- Handle<JSFunction> constructor,
- Handle<Map> constructor_initial_map) {
- // Use the default intrinsic prototype instead.
- if (!new_target->has_prototype_slot()) return false;
- // Check that |function|'s initial map still in sync with the |constructor|,
- // otherwise we must create a new initial map for |function|.
- if (new_target->has_initial_map() &&
- new_target->initial_map()->GetConstructor() == *constructor) {
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
- return true;
- }
- InstanceType instance_type = constructor_initial_map->instance_type();
- DCHECK(CanSubclassHaveInobjectProperties(instance_type));
- // Create a new map with the size and number of in-object properties
- // suggested by |function|.
-
- // Link initial map and constructor function if the new.target is actually a
- // subclass constructor.
- if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
-
- int instance_size;
- int in_object_properties;
- int embedder_fields =
- JSObject::GetEmbedderFieldCount(*constructor_initial_map);
- bool success = JSFunction::CalculateInstanceSizeForDerivedClass(
- new_target, instance_type, embedder_fields, &instance_size,
- &in_object_properties);
-
- Handle<Map> map;
- if (success) {
- int pre_allocated = constructor_initial_map->GetInObjectProperties() -
- constructor_initial_map->UnusedPropertyFields();
- CHECK_LE(constructor_initial_map->UsedInstanceSize(), instance_size);
- int unused_property_fields = in_object_properties - pre_allocated;
- map = Map::CopyInitialMap(isolate, constructor_initial_map, instance_size,
- in_object_properties, unused_property_fields);
- } else {
- map = Map::CopyInitialMap(isolate, constructor_initial_map);
- }
- map->set_new_target_is_base(false);
- Handle<Object> prototype(new_target->instance_prototype(), isolate);
- JSFunction::SetInitialMap(new_target, map, prototype);
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
- map->SetConstructor(*constructor);
- map->set_construction_counter(Map::kNoSlackTracking);
- map->StartInobjectSlackTracking();
- return true;
-}
-
-} // namespace
-
-// static
-MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
- Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target) {
- EnsureHasInitialMap(constructor);
-
- Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
- if (*new_target == *constructor) return constructor_initial_map;
-
- Handle<Map> result_map;
- // Fast case, new.target is a subclass of constructor. The map is cacheable
- // (and may already have been cached). new.target.prototype is guaranteed to
- // be a JSReceiver.
- if (new_target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
- if (FastInitializeDerivedMap(isolate, function, constructor,
- constructor_initial_map)) {
- return handle(function->initial_map(), isolate);
- }
- }
-
- // Slow path, new.target is either a proxy or can't cache the map.
- // new.target.prototype is not guaranteed to be a JSReceiver, and may need to
- // fall back to the intrinsicDefaultProto.
- Handle<Object> prototype;
- if (new_target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
- if (function->has_prototype_slot()) {
- // Make sure the new.target.prototype is cached.
- EnsureHasInitialMap(function);
- prototype = handle(function->prototype(), isolate);
- } else {
- // No prototype property, use the intrinsict default proto further down.
- prototype = isolate->factory()->undefined_value();
- }
- } else {
- Handle<String> prototype_string = isolate->factory()->prototype_string();
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, prototype,
- JSReceiver::GetProperty(isolate, new_target, prototype_string), Map);
- // The above prototype lookup might change the constructor and its
- // prototype, hence we have to reload the initial map.
- EnsureHasInitialMap(constructor);
- constructor_initial_map = handle(constructor->initial_map(), isolate);
- }
-
- // If prototype is not a JSReceiver, fetch the intrinsicDefaultProto from the
- // correct realm. Rather than directly fetching the .prototype, we fetch the
- // constructor that points to the .prototype. This relies on
- // constructor.prototype being FROZEN for those constructors.
- if (!prototype->IsJSReceiver()) {
- Handle<Context> context;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, context,
- JSReceiver::GetFunctionRealm(new_target), Map);
- DCHECK(context->IsNativeContext());
- Handle<Object> maybe_index = JSReceiver::GetDataProperty(
- constructor, isolate->factory()->native_context_index_symbol());
- int index = maybe_index->IsSmi() ? Smi::ToInt(*maybe_index)
- : Context::OBJECT_FUNCTION_INDEX;
- Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)),
- isolate);
- prototype = handle(realm_constructor->prototype(), isolate);
- }
-
- Handle<Map> map = Map::CopyInitialMap(isolate, constructor_initial_map);
- map->set_new_target_is_base(false);
- CHECK(prototype->IsJSReceiver());
- if (map->prototype() != *prototype)
- Map::SetPrototype(isolate, map, prototype);
- map->SetConstructor(*constructor);
- return map;
-}
-
-int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
- CHECK(has_initial_map());
- if (initial_map()->IsInobjectSlackTrackingInProgress()) {
- int slack = initial_map()->ComputeMinObjectSlack(isolate);
- return initial_map()->InstanceSizeFromSlack(slack);
- }
- return initial_map()->instance_size();
-}
-
-void JSFunction::PrintName(FILE* out) {
- std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
- PrintF(out, "%s", name.get());
-}
-
-
-Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- Handle<Object> name =
- JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
- if (name->IsString()) return Handle<String>::cast(name);
- return handle(function->shared()->DebugName(), isolate);
-}
-
-
-Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- Handle<Object> name = JSReceiver::GetDataProperty(
- function, isolate->factory()->display_name_string());
- if (name->IsString()) return Handle<String>::cast(name);
- return JSFunction::GetName(function);
-}
-
-bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
- Handle<String> prefix) {
- Isolate* isolate = function->GetIsolate();
- Handle<String> function_name;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name,
- Name::ToFunctionName(isolate, name), false);
- if (prefix->length() > 0) {
- IncrementalStringBuilder builder(isolate);
- builder.AppendString(prefix);
- builder.AppendCharacter(' ');
- builder.AppendString(function_name);
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name, builder.Finish(),
- false);
- }
- RETURN_ON_EXCEPTION_VALUE(
- isolate,
- JSObject::DefinePropertyOrElementIgnoreAttributes(
- function, isolate->factory()->name_string(), function_name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY)),
- false);
- return true;
-}
-
-namespace {
-
-Handle<String> NativeCodeFunctionSourceString(
- Handle<SharedFunctionInfo> shared_info) {
- Isolate* const isolate = shared_info->GetIsolate();
- IncrementalStringBuilder builder(isolate);
- builder.AppendCString("function ");
- builder.AppendString(handle(shared_info->Name(), isolate));
- builder.AppendCString("() { [native code] }");
- return builder.Finish().ToHandleChecked();
-}
-
-} // namespace
-
-
-// static
-Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
- Isolate* const isolate = function->GetIsolate();
- return isolate->factory()->function_native_code_string();
-}
-
-
-// static
-Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
- Isolate* const isolate = function->GetIsolate();
- Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
-
- // Check if {function} should hide its source code.
- if (!shared_info->IsUserJavaScript()) {
- return NativeCodeFunctionSourceString(shared_info);
- }
-
- // Check if we should print {function} as a class.
- Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
- function, isolate->factory()->class_positions_symbol());
- if (maybe_class_positions->IsTuple2()) {
- Tuple2 class_positions = Tuple2::cast(*maybe_class_positions);
- int start_position = Smi::ToInt(class_positions->value1());
- int end_position = Smi::ToInt(class_positions->value2());
- Handle<String> script_source(
- String::cast(Script::cast(shared_info->script())->source()), isolate);
- return isolate->factory()->NewSubString(script_source, start_position,
- end_position);
- }
-
- // Check if we have source code for the {function}.
- if (!shared_info->HasSourceCode()) {
- return NativeCodeFunctionSourceString(shared_info);
- }
-
- if (shared_info->function_token_position() == kNoSourcePosition) {
- // If the function token position isn't valid, return [native code] to
- // ensure calling eval on the returned source code throws rather than
- // giving inconsistent call behaviour.
- isolate->CountUsage(
- v8::Isolate::UseCounterFeature::kFunctionTokenOffsetTooLongForToString);
- return NativeCodeFunctionSourceString(shared_info);
- }
- return Handle<String>::cast(
- SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
-}
-
STATIC_ASSERT(Oddball::kToNumberRawOffset == HeapNumber::kValueOffset);
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
@@ -13910,7 +4850,7 @@ Object Script::GetNameOrSourceURL() {
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Isolate* isolate, const FunctionLiteral* fun) {
- CHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
+ CHECK_NE(fun->function_literal_id(), kFunctionLiteralIdInvalid);
// If this check fails, the problem is most probably the function id
// renumbering done by AstFunctionLiteralIdReindexer; in particular, that
// AstTraversalVisitor doesn't recurse properly in the construct which
@@ -13936,6 +4876,15 @@ Script Script::Iterator::Next() {
return Script();
}
+uint32_t SharedFunctionInfo::Hash() {
+ // Hash SharedFunctionInfo based on its start position and script id. Note: we
+ // don't use the function's literal id since getting that is slow for compiled
+ // funcitons.
+ int start_pos = StartPosition();
+ int script_id = script()->IsScript() ? Script::cast(script())->id() : 0;
+ return static_cast<uint32_t>(base::hash_combine(start_pos, script_id));
+}
+
Code SharedFunctionInfo::GetCode() const {
// ======
// NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
@@ -14251,34 +5200,63 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
return builder.Finish().ToHandleChecked();
}
+namespace {
+void TraceInlining(SharedFunctionInfo shared, const char* msg) {
+ if (FLAG_trace_turbo_inlining) {
+ StdoutStream os;
+ os << Brief(shared) << ": IsInlineable? " << msg << "\n";
+ }
+}
+} // namespace
+
bool SharedFunctionInfo::IsInlineable() {
- // Check that the function has a script associated with it.
- if (!script()->IsScript()) return false;
+ if (!script()->IsScript()) {
+ TraceInlining(*this, "false (no Script associated with it)");
+ return false;
+ }
if (GetIsolate()->is_precise_binary_code_coverage() &&
!has_reported_binary_coverage()) {
// We may miss invocations if this function is inlined.
+ TraceInlining(*this, "false (requires precise binary coverage)");
return false;
}
- if (optimization_disabled()) return false;
+ if (optimization_disabled()) {
+ TraceInlining(*this, "false (optimization disabled)");
+ return false;
+ }
// Built-in functions are handled by the JSCallReducer.
- if (HasBuiltinFunctionId()) return false;
+ if (HasBuiltinFunctionId()) {
+ TraceInlining(*this, "false (is a builtin)");
+ return false;
+ }
- // Only choose user code for inlining.
- if (!IsUserJavaScript()) return false;
+ if (!IsUserJavaScript()) {
+ TraceInlining(*this, "false (is not user code)");
+ return false;
+ }
// If there is no bytecode array, it is either not compiled or it is compiled
// with WebAssembly for the asm.js pipeline. In either case we don't want to
// inline.
- if (!HasBytecodeArray()) return false;
+ if (!HasBytecodeArray()) {
+ TraceInlining(*this, "false (has no BytecodeArray)");
+ return false;
+ }
- // Quick check on the size of the bytecode to avoid inlining large functions.
if (GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
+ TraceInlining(*this, "false (length > FLAG_max_inlined_bytecode_size)");
return false;
}
+ if (HasBreakInfo()) {
+ TraceInlining(*this, "false (may contain break points)");
+ return false;
+ }
+
+ TraceInlining(*this, "true");
return true;
}
@@ -14288,7 +5266,7 @@ int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
DisallowHeapAllocation no_gc;
Object script_obj = script();
- if (!script_obj->IsScript()) return FunctionLiteral::kIdTypeInvalid;
+ if (!script_obj->IsScript()) return kFunctionLiteralIdInvalid;
WeakFixedArray shared_info_list =
Script::cast(script_obj)->shared_function_infos();
@@ -14303,81 +5281,7 @@ int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
}
}
- return FunctionLiteral::kIdTypeInvalid;
-}
-
-void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
- bool has_prototype_slot,
- int requested_embedder_fields,
- int requested_in_object_properties,
- int* instance_size,
- int* in_object_properties) {
- DCHECK_LE(static_cast<unsigned>(requested_embedder_fields),
- JSObject::kMaxEmbedderFields);
- int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
- if (requested_embedder_fields) {
- // If there are embedder fields, then the embedder fields start offset must
- // be properly aligned (embedder fields are located between object header
- // and inobject fields).
- header_size = RoundUp<kSystemPointerSize>(header_size);
- requested_embedder_fields *= kEmbedderDataSlotSizeInTaggedSlots;
- }
- int max_nof_fields =
- (JSObject::kMaxInstanceSize - header_size) >> kTaggedSizeLog2;
- CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
- CHECK_LE(static_cast<unsigned>(requested_embedder_fields),
- static_cast<unsigned>(max_nof_fields));
- *in_object_properties = Min(requested_in_object_properties,
- max_nof_fields - requested_embedder_fields);
- *instance_size =
- header_size +
- ((requested_embedder_fields + *in_object_properties) << kTaggedSizeLog2);
- CHECK_EQ(*in_object_properties,
- ((*instance_size - header_size) >> kTaggedSizeLog2) -
- requested_embedder_fields);
- CHECK_LE(static_cast<unsigned>(*instance_size),
- static_cast<unsigned>(JSObject::kMaxInstanceSize));
-}
-
-// static
-bool JSFunction::CalculateInstanceSizeForDerivedClass(
- Handle<JSFunction> function, InstanceType instance_type,
- int requested_embedder_fields, int* instance_size,
- int* in_object_properties) {
- Isolate* isolate = function->GetIsolate();
- int expected_nof_properties = 0;
- for (PrototypeIterator iter(isolate, function, kStartAtReceiver);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<JSReceiver> current =
- PrototypeIterator::GetCurrent<JSReceiver>(iter);
- if (!current->IsJSFunction()) break;
- Handle<JSFunction> func(Handle<JSFunction>::cast(current));
- // The super constructor should be compiled for the number of expected
- // properties to be available.
- Handle<SharedFunctionInfo> shared(func->shared(), isolate);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
- if (is_compiled_scope.is_compiled() ||
- Compiler::Compile(func, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
- DCHECK(shared->is_compiled());
- int count = shared->expected_nof_properties();
- // Check that the estimate is sane.
- if (expected_nof_properties <= JSObject::kMaxInObjectProperties - count) {
- expected_nof_properties += count;
- } else {
- expected_nof_properties = JSObject::kMaxInObjectProperties;
- }
- } else {
- // In case there was a compilation error for the constructor we will
- // throw an error during instantiation. Hence we directly return 0;
- return false;
- }
- if (!IsDerivedConstructor(shared->kind())) break;
- }
- CalculateInstanceSizeHelper(instance_type, true, requested_embedder_fields,
- expected_nof_properties, instance_size,
- in_object_properties);
- return true;
+ return kFunctionLiteralIdInvalid;
}
@@ -14478,6 +5382,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_length(lit->function_length());
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(lit);
+ shared_info->set_is_safe_to_skip_arguments_adaptor(
+ lit->SafeToSkipArgumentsAdaptor());
DCHECK_NULL(lit->produced_preparse_data());
// If we're about to eager compile, we'll have the function literal
// available, so there's no need to wastefully allocate an uncompiled data.
@@ -14489,6 +5395,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// value after compiling, but avoid overwriting values set manually by the
// bootstrapper.
shared_info->set_length(SharedFunctionInfo::kInvalidLength);
+ shared_info->set_is_safe_to_skip_arguments_adaptor(false);
ProducedPreparseData* scope_data = lit->produced_preparse_data();
if (scope_data != nullptr) {
Handle<PreparseData> preparse_data =
@@ -14518,10 +5425,6 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
// to be added later.
if (estimate == 0) estimate = 2;
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- estimate += 8;
-
// Limit actual estimate to fit in a 8 bit field, we will never allocate
// more than this in any case.
STATIC_ASSERT(JSObject::kMaxInObjectProperties <= kMaxUInt8);
@@ -14582,7 +5485,7 @@ int SharedFunctionInfo::EndPosition() const {
int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
// Fast path for the common case when the SFI is uncompiled and so the
// function literal id is already in the uncompiled data.
- if (HasUncompiledData()) {
+ if (HasUncompiledData() && uncompiled_data()->has_function_literal_id()) {
int id = uncompiled_data()->function_literal_id();
// Make sure the id is what we should have found with the slow path.
DCHECK_EQ(id, FindIndexInScript(isolate));
@@ -14590,7 +5493,7 @@ int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
}
// Otherwise, search for the function in the SFI's script's function list,
- // and return its index in that list.e
+ // and return its index in that list.
return FindIndexInScript(isolate);
}
@@ -14614,846 +5517,23 @@ void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
}
}
-void Map::StartInobjectSlackTracking() {
- DCHECK(!IsInobjectSlackTrackingInProgress());
- if (UnusedPropertyFields() == 0) return;
- set_construction_counter(Map::kSlackTrackingCounterStart);
-}
-
-void ObjectVisitor::VisitRelocInfo(RelocIterator* it) {
- for (; !it->done(); it->next()) {
- it->rinfo()->Visit(this);
- }
-}
-
-void Code::ClearEmbeddedObjects(Heap* heap) {
- HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
- }
- }
- set_embedded_objects_cleared(true);
-}
-
-
-void Code::Relocate(intptr_t delta) {
- for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
- it.rinfo()->apply(delta);
- }
- FlushICache();
-}
-
-void Code::FlushICache() const {
- Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
-}
-
-void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
- // Copy code.
- CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
- static_cast<size_t>(desc.instr_size));
-
- // Copy unwinding info, if any.
- if (desc.unwinding_info) {
- DCHECK_GT(desc.unwinding_info_size, 0);
- set_unwinding_info_size(desc.unwinding_info_size);
- CopyBytes(reinterpret_cast<byte*>(unwinding_info_start()),
- desc.unwinding_info,
- static_cast<size_t>(desc.unwinding_info_size));
- }
-
- // Copy reloc info.
- CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
-
- // Unbox handles and relocate.
- Assembler* origin = desc.origin;
- AllowDeferredHandleDereference embedding_raw_address;
- const int mode_mask = RelocInfo::PostCodegenRelocationMask();
- for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- // Rewrite code handles to direct pointers to the first instruction in the
- // code object.
- Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code code = Code::cast(*p);
- it.rinfo()->set_target_address(code->raw_instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- } else {
- intptr_t delta =
- raw_instruction_start() - reinterpret_cast<Address>(desc.buffer);
- it.rinfo()->apply(delta);
- }
- }
-}
-
-
-SafepointEntry Code::GetSafepointEntry(Address pc) {
- SafepointTable table(*this);
- return table.FindEntry(pc);
-}
-
-int Code::OffHeapInstructionSize() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_size();
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.InstructionSizeOfBuiltin(builtin_index());
-}
-
-Address Code::OffHeapInstructionStart() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_start();
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.InstructionStartOfBuiltin(builtin_index());
-}
-
-Address Code::OffHeapInstructionEnd() const {
- DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_end();
- EmbeddedData d = EmbeddedData::FromBlob();
- return d.InstructionStartOfBuiltin(builtin_index()) +
- d.InstructionSizeOfBuiltin(builtin_index());
-}
-
-namespace {
-template <typename Code>
-void SetStackFrameCacheCommon(Isolate* isolate, Handle<Code> code,
- Handle<SimpleNumberDictionary> cache) {
- Handle<Object> maybe_table(code->source_position_table(), isolate);
- if (maybe_table->IsSourcePositionTableWithFrameCache()) {
- Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
- ->set_stack_frame_cache(*cache);
- return;
- }
- DCHECK(maybe_table->IsByteArray());
- Handle<ByteArray> table(Handle<ByteArray>::cast(maybe_table));
- Handle<SourcePositionTableWithFrameCache> table_with_cache =
- isolate->factory()->NewSourcePositionTableWithFrameCache(table, cache);
- code->set_source_position_table(*table_with_cache);
-}
-} // namespace
-
// static
-void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<SimpleNumberDictionary> cache) {
- if (abstract_code->IsCode()) {
- SetStackFrameCacheCommon(
- abstract_code->GetIsolate(),
- handle(abstract_code->GetCode(), abstract_code->GetIsolate()), cache);
- } else {
- SetStackFrameCacheCommon(
- abstract_code->GetIsolate(),
- handle(abstract_code->GetBytecodeArray(), abstract_code->GetIsolate()),
- cache);
- }
-}
-
-namespace {
-template <typename Code>
-void DropStackFrameCacheCommon(Code code) {
- i::Object maybe_table = code->source_position_table();
- if (maybe_table->IsByteArray()) return;
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
- code->set_source_position_table(
- i::SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table());
-}
-} // namespace
-
-void AbstractCode::DropStackFrameCache() {
- if (IsCode()) {
- DropStackFrameCacheCommon(GetCode());
- } else {
- DropStackFrameCacheCommon(GetBytecodeArray());
- }
-}
-
-int AbstractCode::SourcePosition(int offset) {
- int position = 0;
- // Subtract one because the current PC is one instruction after the call site.
- if (IsCode()) offset--;
- for (SourcePositionTableIterator iterator(source_position_table());
- !iterator.done() && iterator.code_offset() <= offset;
- iterator.Advance()) {
- position = iterator.source_position().ScriptOffset();
- }
- return position;
-}
-
-int AbstractCode::SourceStatementPosition(int offset) {
- // First find the closest position.
- int position = SourcePosition(offset);
- // Now find the closest statement position before the position.
- int statement_position = 0;
- for (SourcePositionTableIterator it(source_position_table()); !it.done();
- it.Advance()) {
- if (it.is_statement()) {
- int p = it.source_position().ScriptOffset();
- if (statement_position < p && p <= position) {
- statement_position = p;
- }
- }
- }
- return statement_position;
-}
-
-void JSFunction::ClearTypeFeedbackInfo() {
- ResetIfBytecodeFlushed();
- if (has_feedback_vector()) {
- FeedbackVector vector = feedback_vector();
- Isolate* isolate = GetIsolate();
- if (vector->ClearSlots(isolate)) {
- IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
- "ClearTypeFeedbackInfo");
- }
- }
-}
-
-void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*this, pc);
- class SourcePosition pos = info.position;
- if (info.deopt_reason != DeoptimizeReason::kUnknown || pos.IsKnown()) {
- PrintF(out, "%s", str);
- OFStream outstr(out);
- pos.Print(outstr, *this);
- PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
- }
-}
-
-
-bool Code::CanDeoptAt(Address pc) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(deoptimization_data());
- Address code_start_address = InstructionStart();
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address address = code_start_address + deopt_data->Pc(i)->value();
- if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) {
- return true;
- }
- }
- return false;
-}
-
-
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
- switch (kind) {
-#define CASE(name) case name: return #name;
- CODE_KIND_LIST(CASE)
-#undef CASE
- case NUMBER_OF_KINDS: break;
- }
- UNREACHABLE();
-}
-
-// Identify kind of code.
-const char* AbstractCode::Kind2String(Kind kind) {
- if (kind < AbstractCode::INTERPRETED_FUNCTION)
- return Code::Kind2String(static_cast<Code::Kind>(kind));
- if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
- UNREACHABLE();
-}
-
-bool Code::IsIsolateIndependent(Isolate* isolate) {
- constexpr int all_real_modes_mask =
- (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
- constexpr int mode_mask = all_real_modes_mask &
- ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
- ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
- ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
- STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
- STATIC_ASSERT(mode_mask ==
- (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
-
- bool is_process_independent = true;
- for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
- defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
- defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
- // On these platforms we emit relative builtin-to-builtin
- // jumps for isolate independent builtins in the snapshot. They are later
- // rewritten as pc-relative jumps to the off-heap instruction stream and are
- // thus process-independent. See also: FinalizeEmbeddedCodeTargets.
- if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
- Address target_address = it.rinfo()->target_address();
- if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
-
- Code target = Code::GetCodeFromTargetAddress(target_address);
- CHECK(target->IsCode());
- if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
- }
-#endif
- is_process_independent = false;
- }
-
- return is_process_independent;
-}
-
-bool Code::Inlines(SharedFunctionInfo sfi) {
- // We can only check for inlining for optimized code.
- DCHECK(is_optimized_code());
- DisallowHeapAllocation no_gc;
- DeoptimizationData const data =
- DeoptimizationData::cast(deoptimization_data());
- if (data->length() == 0) return false;
- if (data->SharedFunctionInfo() == sfi) return true;
- FixedArray const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
- }
- return false;
-}
-
-Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
- isolate_ = isolate;
- Object list = isolate->heap()->native_contexts_list();
- next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list);
-}
-
-Code Code::OptimizedCodeIterator::Next() {
- do {
- Object next;
- if (!current_code_.is_null()) {
- // Get next code in the linked list.
- next = current_code_->next_code_link();
- } else if (!next_context_.is_null()) {
- // Linked list of code exhausted. Get list of next context.
- next = next_context_->OptimizedCodeListHead();
- Object next_context = next_context_->next_context_link();
- next_context_ = next_context->IsUndefined(isolate_)
- ? Context()
- : Context::cast(next_context);
- } else {
- // Exhausted contexts.
- return Code();
- }
- current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next);
- } while (current_code_.is_null());
- DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind());
- return current_code_;
-}
-
-#ifdef ENABLE_DISASSEMBLER
-
-namespace {
-void print_pc(std::ostream& os, int pc) {
- if (pc == -1) {
- os << "NA";
- } else {
- os << std::hex << pc << std::dec;
- }
-}
-} // anonymous namespace
-
-void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
- if (length() == 0) {
- os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
- return;
- }
-
- disasm::NameConverter converter;
- int const inlined_function_count = InlinedFunctionCount()->value();
- os << "Inlined functions (count = " << inlined_function_count << ")\n";
- for (int id = 0; id < inlined_function_count; ++id) {
- Object info = LiteralArray()->get(id);
- os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
- }
- os << "\n";
- int deopt_count = DeoptCount();
- os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
- if (0 != deopt_count) {
- os << " index bytecode-offset pc";
- if (FLAG_print_code_verbose) os << " commands";
- os << "\n";
- }
- for (int i = 0; i < deopt_count; i++) {
- os << std::setw(6) << i << " " << std::setw(15)
- << BytecodeOffset(i).ToInt() << " " << std::setw(4);
- print_pc(os, Pc(i)->value());
- os << std::setw(2);
-
- if (!FLAG_print_code_verbose) {
- os << "\n";
- continue;
- }
-
- // Print details of the frame translation.
- int translation_index = TranslationIndex(i)->value();
- TranslationIterator iterator(TranslationByteArray(), translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- DCHECK(Translation::BEGIN == opcode);
- int frame_count = iterator.Next();
- int jsframe_count = iterator.Next();
- int update_feedback_count = iterator.Next();
- os << " " << Translation::StringFor(opcode)
- << " {frame count=" << frame_count
- << ", js frame count=" << jsframe_count
- << ", update_feedback_count=" << update_feedback_count << "}\n";
-
- while (iterator.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- os << std::setw(31) << " " << Translation::StringFor(opcode) << " ";
-
- switch (opcode) {
- case Translation::BEGIN:
- UNREACHABLE();
- break;
-
- case Translation::INTERPRETED_FRAME: {
- int bytecode_offset = iterator.Next();
- int shared_info_id = iterator.Next();
- unsigned height = iterator.Next();
- int return_value_offset = iterator.Next();
- int return_value_count = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
- os << "{bytecode_offset=" << bytecode_offset << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << ", retval=@" << return_value_offset
- << "(#" << return_value_count << ")}";
- break;
- }
-
- case Translation::CONSTRUCT_STUB_FRAME: {
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::ARGUMENTS_ADAPTOR_FRAME: {
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (int32)}";
- break;
- }
-
- case Translation::INT64_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (int64)}";
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (uint32)}";
- break;
- }
-
- case Translation::BOOL_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (bool)}";
- break;
- }
-
- case Translation::FLOAT_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << FloatRegister::from_code(reg_code) << "}";
- break;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
- break;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << "}";
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (int32)}";
- break;
- }
-
- case Translation::INT64_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (int64)}";
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (uint32)}";
- break;
- }
-
- case Translation::BOOL_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (bool)}";
- break;
- }
-
- case Translation::FLOAT_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << "}";
- break;
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator.Next();
- Object literal_value = LiteralArray()->get(literal_index);
- os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
- << ")}";
- break;
- }
-
- case Translation::DUPLICATED_OBJECT: {
- int object_index = iterator.Next();
- os << "{object_index=" << object_index << "}";
- break;
- }
-
- case Translation::ARGUMENTS_ELEMENTS:
- case Translation::ARGUMENTS_LENGTH: {
- CreateArgumentsType arguments_type =
- static_cast<CreateArgumentsType>(iterator.Next());
- os << "{arguments_type=" << arguments_type << "}";
- break;
- }
-
- case Translation::CAPTURED_OBJECT: {
- int args_length = iterator.Next();
- os << "{length=" << args_length << "}";
- break;
- }
-
- case Translation::UPDATE_FEEDBACK: {
- int literal_index = iterator.Next();
- FeedbackSlot slot(iterator.Next());
- os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
- << "}}";
- break;
- }
- }
- os << "\n";
- }
- }
-}
-
-const char* Code::GetName(Isolate* isolate) const {
- if (kind() == BYTECODE_HANDLER) {
- return isolate->interpreter()->LookupNameOfBytecodeHandler(*this);
- } else {
- // There are some handlers and ICs that we can also find names for with
- // Builtins::Lookup.
- return isolate->builtins()->Lookup(raw_instruction_start());
- }
-}
-
-namespace {
-
-inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
- Address begin, size_t size,
- Address current_pc) {
- Address end = begin + size;
- // TODO(mstarzinger): Refactor CodeReference to avoid the
- // unhandlified->handlified transition.
- AllowHandleAllocation allow_handles;
- DisallowHeapAllocation no_gc;
- HandleScope handle_scope(isolate);
- Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
- reinterpret_cast<byte*>(end),
- CodeReference(handle(code, isolate)), current_pc);
-}
-
-} // namespace
-
-void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
- Isolate* isolate = GetIsolate();
- os << "kind = " << Kind2String(kind()) << "\n";
- if (name == nullptr) {
- name = GetName(isolate);
- }
- if ((name != nullptr) && (name[0] != '\0')) {
- os << "name = " << name << "\n";
- }
- if (kind() == OPTIMIZED_FUNCTION) {
- os << "stack_slots = " << stack_slots() << "\n";
- }
- os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
- os << "address = " << static_cast<const void*>(this) << "\n\n";
-
- if (is_off_heap_trampoline()) {
- int trampoline_size = raw_instruction_size();
- os << "Trampoline (size = " << trampoline_size << ")\n";
- DisassembleCodeRange(isolate, os, *this, raw_instruction_start(),
- trampoline_size, current_pc);
- os << "\n";
- }
-
- {
- int size = InstructionSize();
- int safepoint_offset =
- has_safepoint_info() ? safepoint_table_offset() : size;
- int const_pool_offset = constant_pool_offset();
- int handler_offset = handler_table_offset() ? handler_table_offset() : size;
- int comments_offset = code_comments_offset();
-
- // Stop before reaching any embedded tables
- int code_size = std::min(
- {handler_offset, safepoint_offset, const_pool_offset, comments_offset});
- os << "Instructions (size = " << code_size << ")\n";
- DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
- current_pc);
-
- if (int pool_size = constant_pool_size()) {
- DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
- os << "\nConstant Pool (size = " << pool_size << ")\n";
- Vector<char> buf = Vector<char>::New(50);
- intptr_t* ptr =
- reinterpret_cast<intptr_t*>(InstructionStart() + const_pool_offset);
- for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
- SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
- os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
- }
- }
- }
- os << "\n";
-
- {
- SourcePositionTableIterator it(
- SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
- if (!it.done()) {
- os << "Source positions:\n pc offset position\n";
- for (; !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
- }
- os << "\n";
- }
- }
-
- {
- SourcePositionTableIterator it(SourcePositionTable(),
- SourcePositionTableIterator::kExternalOnly);
- if (!it.done()) {
- os << "External Source positions:\n pc offset fileid line\n";
- for (; !it.done(); it.Advance()) {
- DCHECK(it.source_position().IsExternal());
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ExternalFileId()
- << std::setw(10) << it.source_position().ExternalLine() << "\n";
- }
- os << "\n";
- }
- }
-
- if (kind() == OPTIMIZED_FUNCTION) {
- DeoptimizationData data =
- DeoptimizationData::cast(this->deoptimization_data());
- data->DeoptimizationDataPrint(os);
- }
- os << "\n";
-
- if (has_safepoint_info()) {
- SafepointTable table(*this);
- os << "Safepoints (size = " << table.size() << ")\n";
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- os << reinterpret_cast<const void*>(InstructionStart() + pc_offset)
- << " ";
- os << std::setw(6) << std::hex << pc_offset << " " << std::setw(4);
- int trampoline_pc = table.GetTrampolinePcOffset(i);
- print_pc(os, trampoline_pc);
- os << std::dec << " ";
- table.PrintEntry(i, os);
- os << " (sp -> fp) ";
- SafepointEntry entry = table.GetEntry(i);
- if (entry.has_deoptimization_index()) {
- os << std::setw(6) << entry.deoptimization_index();
- } else {
- os << "<none>";
- }
- os << "\n";
- }
- os << "\n";
- }
-
- if (handler_table_offset() > 0) {
- HandlerTable table(*this);
- os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
- if (kind() == OPTIMIZED_FUNCTION) {
- table.HandlerTableReturnPrint(os);
- }
- os << "\n";
- }
-
- os << "RelocInfo (size = " << relocation_size() << ")\n";
- for (RelocIterator it(*this); !it.done(); it.next()) {
- it.rinfo()->Print(isolate, os);
- }
- os << "\n";
-
- if (has_unwinding_info()) {
- os << "UnwindingInfo (size = " << unwinding_info_size() << ")\n";
- EhFrameDisassembler eh_frame_disassembler(
- reinterpret_cast<byte*>(unwinding_info_start()),
- reinterpret_cast<byte*>(unwinding_info_end()));
- eh_frame_disassembler.DisassembleToStream(os);
- os << "\n";
- }
-
- if (code_comments_offset() < InstructionSize()) {
- PrintCodeCommentsSection(os, code_comments());
- }
-}
-#endif // ENABLE_DISASSEMBLER
-
-void BytecodeArray::Disassemble(std::ostream& os) {
- DisallowHeapAllocation no_gc;
-
- os << "Parameter count " << parameter_count() << "\n";
- os << "Frame size " << frame_size() << "\n";
-
- Address base_address = GetFirstBytecodeAddress();
- SourcePositionTableIterator source_positions(SourcePositionTable());
-
- // Storage for backing the handle passed to the iterator. This handle won't be
- // updated by the gc, but that's ok because we've disallowed GCs anyway.
- BytecodeArray handle_storage = *this;
- Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
- interpreter::BytecodeArrayIterator iterator(handle);
- while (!iterator.done()) {
- if (!source_positions.done() &&
- iterator.current_offset() == source_positions.code_offset()) {
- os << std::setw(5) << source_positions.source_position().ScriptOffset();
- os << (source_positions.is_statement() ? " S> " : " E> ");
- source_positions.Advance();
- } else {
- os << " ";
- }
- Address current_address = base_address + iterator.current_offset();
- os << reinterpret_cast<const void*>(current_address) << " @ "
- << std::setw(4) << iterator.current_offset() << " : ";
- interpreter::BytecodeDecoder::Decode(
- os, reinterpret_cast<byte*>(current_address), parameter_count());
- if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
- Address jump_target = base_address + iterator.GetJumpTargetOffset();
- os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
- << iterator.GetJumpTargetOffset() << ")";
- }
- if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
- os << " {";
- bool first_entry = true;
- for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
- if (first_entry) {
- first_entry = false;
- } else {
- os << ",";
- }
- os << " " << entry.case_value << ": @" << entry.target_offset;
- }
- os << " }";
- }
- os << std::endl;
- iterator.Advance();
- }
-
- os << "Constant pool (size = " << constant_pool()->length() << ")\n";
-#ifdef OBJECT_PRINT
- if (constant_pool()->length() > 0) {
- constant_pool()->Print();
- }
-#endif
-
- os << "Handler Table (size = " << handler_table()->length() << ")\n";
-#ifdef ENABLE_DISASSEMBLER
- if (handler_table()->length() > 0) {
- HandlerTable table(*this);
- table.HandlerTableRangePrint(os);
+void SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
+ if (FLAG_enable_lazy_source_positions && shared_info->HasBytecodeArray() &&
+ !shared_info->GetBytecodeArray()->HasSourcePositionTable()) {
+ Compiler::CollectSourcePositions(isolate, shared_info);
}
-#endif
}
-void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
- BytecodeArray from = *this;
- DCHECK_EQ(from->length(), to->length());
- CopyBytes(reinterpret_cast<byte*>(to->GetFirstBytecodeAddress()),
- reinterpret_cast<byte*>(from->GetFirstBytecodeAddress()),
- from->length());
-}
+bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
+ if (length() != other->length()) return false;
-void BytecodeArray::MakeOlder() {
- // BytecodeArray is aged in concurrent marker.
- // The word must be completely within the byte code array.
- Address age_addr = address() + kBytecodeAgeOffset;
- DCHECK_LE(RoundDown(age_addr, kSystemPointerSize) + kSystemPointerSize,
- address() + Size());
- Age age = bytecode_age();
- if (age < kLastBytecodeAge) {
- base::AsAtomic8::Release_CompareAndSwap(reinterpret_cast<byte*>(age_addr),
- age, age + 1);
+ for (int i = 0; i < length(); ++i) {
+ if (get(i) != other->get(i)) return false;
}
- DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
- DCHECK_LE(bytecode_age(), kLastBytecodeAge);
-}
-
-bool BytecodeArray::IsOld() const {
- return bytecode_age() >= kIsOldBytecodeAge;
+ return true;
}
// static
@@ -15472,222 +5552,6 @@ void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
array->GetElementsAccessor()->SetLength(array, new_length);
}
-DependentCode DependentCode::GetDependentCode(Handle<HeapObject> object) {
- if (object->IsMap()) {
- return Handle<Map>::cast(object)->dependent_code();
- } else if (object->IsPropertyCell()) {
- return Handle<PropertyCell>::cast(object)->dependent_code();
- } else if (object->IsAllocationSite()) {
- return Handle<AllocationSite>::cast(object)->dependent_code();
- }
- UNREACHABLE();
-}
-
-void DependentCode::SetDependentCode(Handle<HeapObject> object,
- Handle<DependentCode> dep) {
- if (object->IsMap()) {
- Handle<Map>::cast(object)->set_dependent_code(*dep);
- } else if (object->IsPropertyCell()) {
- Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
- } else if (object->IsAllocationSite()) {
- Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
- } else {
- UNREACHABLE();
- }
-}
-
-void DependentCode::InstallDependency(Isolate* isolate,
- const MaybeObjectHandle& code,
- Handle<HeapObject> object,
- DependencyGroup group) {
- Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
- isolate);
- Handle<DependentCode> new_deps =
- InsertWeakCode(isolate, old_deps, group, code);
- // Update the list head if necessary.
- if (!new_deps.is_identical_to(old_deps))
- DependentCode::SetDependentCode(object, new_deps);
-}
-
-Handle<DependentCode> DependentCode::InsertWeakCode(
- Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
- const MaybeObjectHandle& code) {
- if (entries->length() == 0 || entries->group() > group) {
- // There is no such group.
- return DependentCode::New(isolate, group, code, entries);
- }
- if (entries->group() < group) {
- // The group comes later in the list.
- Handle<DependentCode> old_next(entries->next_link(), isolate);
- Handle<DependentCode> new_next =
- InsertWeakCode(isolate, old_next, group, code);
- if (!old_next.is_identical_to(new_next)) {
- entries->set_next_link(*new_next);
- }
- return entries;
- }
- DCHECK_EQ(group, entries->group());
- int count = entries->count();
- // Check for existing entry to avoid duplicates.
- for (int i = 0; i < count; i++) {
- if (entries->object_at(i) == *code) return entries;
- }
- if (entries->length() < kCodesStartIndex + count + 1) {
- entries = EnsureSpace(isolate, entries);
- // Count could have changed, reload it.
- count = entries->count();
- }
- entries->set_object_at(count, *code);
- entries->set_count(count + 1);
- return entries;
-}
-
-Handle<DependentCode> DependentCode::New(Isolate* isolate,
- DependencyGroup group,
- const MaybeObjectHandle& object,
- Handle<DependentCode> next) {
- Handle<DependentCode> result = Handle<DependentCode>::cast(
- isolate->factory()->NewWeakFixedArray(kCodesStartIndex + 1, TENURED));
- result->set_next_link(*next);
- result->set_flags(GroupField::encode(group) | CountField::encode(1));
- result->set_object_at(0, *object);
- return result;
-}
-
-Handle<DependentCode> DependentCode::EnsureSpace(
- Isolate* isolate, Handle<DependentCode> entries) {
- if (entries->Compact()) return entries;
- int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
- int grow_by = capacity - entries->length();
- return Handle<DependentCode>::cast(
- isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by, TENURED));
-}
-
-
-bool DependentCode::Compact() {
- int old_count = count();
- int new_count = 0;
- for (int i = 0; i < old_count; i++) {
- MaybeObject obj = object_at(i);
- if (!obj->IsCleared()) {
- if (i != new_count) {
- copy(i, new_count);
- }
- new_count++;
- }
- }
- set_count(new_count);
- for (int i = new_count; i < old_count; i++) {
- clear_at(i);
- }
- return new_count < old_count;
-}
-
-bool DependentCode::MarkCodeForDeoptimization(
- Isolate* isolate,
- DependentCode::DependencyGroup group) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return false;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- return next_link()->MarkCodeForDeoptimization(isolate, group);
- }
- DCHECK_EQ(group, this->group());
- DisallowHeapAllocation no_allocation_scope;
- // Mark all the code that needs to be deoptimized.
- bool marked = false;
- int count = this->count();
- for (int i = 0; i < count; i++) {
- MaybeObject obj = object_at(i);
- if (obj->IsCleared()) continue;
- Code code = Code::cast(obj->GetHeapObjectAssumeWeak());
- if (!code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization(DependencyGroupName(group));
- marked = true;
- }
- }
- for (int i = 0; i < count; i++) {
- clear_at(i);
- }
- set_count(0);
- return marked;
-}
-
-
-void DependentCode::DeoptimizeDependentCodeGroup(
- Isolate* isolate,
- DependentCode::DependencyGroup group) {
- DisallowHeapAllocation no_allocation_scope;
- bool marked = MarkCodeForDeoptimization(isolate, group);
- if (marked) {
- DCHECK(AllowCodeDependencyChange::IsAllowed());
- Deoptimizer::DeoptimizeMarkedCode(isolate);
- }
-}
-
-void Code::SetMarkedForDeoptimization(const char* reason) {
- set_marked_for_deoptimization(true);
- if (FLAG_trace_deopt &&
- (deoptimization_data() != GetReadOnlyRoots().empty_fixed_array())) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(deoptimization_data());
- CodeTracer::Scope scope(GetHeap()->isolate()->GetCodeTracer());
- PrintF(scope.file(),
- "[marking dependent code " V8PRIxPTR_FMT
- " (opt #%d) for deoptimization, reason: %s]\n",
- ptr(), deopt_data->OptimizationId()->value(), reason);
- }
-}
-
-
-const char* DependentCode::DependencyGroupName(DependencyGroup group) {
- switch (group) {
- case kTransitionGroup:
- return "transition";
- case kPrototypeCheckGroup:
- return "prototype-check";
- case kPropertyCellChangedGroup:
- return "property-cell-changed";
- case kFieldOwnerGroup:
- return "field-owner";
- case kInitialMapChangedGroup:
- return "initial-map-changed";
- case kAllocationSiteTenuringChangedGroup:
- return "allocation-site-tenuring-changed";
- case kAllocationSiteTransitionChangedGroup:
- return "allocation-site-transition-changed";
- }
- UNREACHABLE();
-}
-
-Handle<Map> Map::TransitionToPrototype(Isolate* isolate, Handle<Map> map,
- Handle<Object> prototype) {
- Handle<Map> new_map =
- TransitionsAccessor(isolate, map).GetPrototypeTransition(prototype);
- if (new_map.is_null()) {
- new_map = Copy(isolate, map, "TransitionToPrototype");
- TransitionsAccessor(isolate, map)
- .PutPrototypeTransition(prototype, new_map);
- Map::SetPrototype(isolate, new_map, prototype);
- }
- return new_map;
-}
-
-
-Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
- Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw) {
- if (object->IsJSProxy()) {
- return JSProxy::SetPrototype(Handle<JSProxy>::cast(object), value,
- from_javascript, should_throw);
- }
- return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
- from_javascript, should_throw);
-}
-
-
// ES6: 9.5.2 [[SetPrototypeOf]] (V)
// static
Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
@@ -15760,278 +5624,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
}
-Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
- Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw) {
- Isolate* isolate = object->GetIsolate();
-
-#ifdef DEBUG
- int size = object->Size();
-#endif
-
- if (from_javascript) {
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kNoAccess));
- }
- } else {
- DCHECK(!object->IsAccessCheckNeeded());
- }
-
- // Silently ignore the change if value is not a JSObject or null.
- // SpiderMonkey behaves this way.
- if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
-
- bool all_extensible = object->map()->is_extensible();
- Handle<JSObject> real_receiver = object;
- if (from_javascript) {
- // Find the first object in the chain whose prototype object is not
- // hidden.
- PrototypeIterator iter(isolate, real_receiver, kStartAtPrototype,
- PrototypeIterator::END_AT_NON_HIDDEN);
- while (!iter.IsAtEnd()) {
- // Casting to JSObject is fine because hidden prototypes are never
- // JSProxies.
- real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
- iter.Advance();
- all_extensible = all_extensible && real_receiver->map()->is_extensible();
- }
- }
- Handle<Map> map(real_receiver->map(), isolate);
-
- // Nothing to do if prototype is already set.
- if (map->prototype() == *value) return Just(true);
-
- bool immutable_proto = map->is_immutable_proto();
- if (immutable_proto) {
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kImmutablePrototypeSet, object));
- }
-
- // From 8.6.2 Object Internal Methods
- // ...
- // In addition, if [[Extensible]] is false the value of the [[Class]] and
- // [[Prototype]] internal properties of the object may not be modified.
- // ...
- // Implementation specific extensions that modify [[Class]], [[Prototype]]
- // or [[Extensible]] must not violate the invariants defined in the preceding
- // paragraph.
- if (!all_extensible) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kNonExtensibleProto, object));
- }
-
- // Before we can set the prototype we need to be sure prototype cycles are
- // prevented. It is sufficient to validate that the receiver is not in the
- // new prototype chain.
- if (value->IsJSReceiver()) {
- for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
- kStartAtReceiver);
- !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent<JSReceiver>() == *object) {
- // Cycle detected.
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kCyclicProto));
- }
- }
- }
-
- // Set the new prototype of the object.
-
- isolate->UpdateNoElementsProtectorOnSetPrototype(real_receiver);
-
- Handle<Map> new_map = Map::TransitionToPrototype(isolate, map, value);
- DCHECK(new_map->prototype() == *value);
- JSObject::MigrateToMap(real_receiver, new_map);
-
- DCHECK(size == object->Size());
- return Just(true);
-}
-
-// static
-void JSObject::SetImmutableProto(Handle<JSObject> object) {
- DCHECK(!object->IsAccessCheckNeeded()); // Never called from JS
- Handle<Map> map(object->map(), object->GetIsolate());
-
- // Nothing to do if prototype is already set.
- if (map->is_immutable_proto()) return;
-
- Handle<Map> new_map =
- Map::TransitionToImmutableProto(object->GetIsolate(), map);
- object->synchronized_set_map(*new_map);
-}
-
-void JSObject::EnsureCanContainElements(Handle<JSObject> object,
- Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode) {
- // Elements in |Arguments| are ordered backwards (because they're on the
- // stack), but the method that's called here iterates over them in forward
- // direction.
- return EnsureCanContainElements(
- object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
-}
-
-
-ElementsAccessor* JSObject::GetElementsAccessor() {
- return ElementsAccessor::ForKind(GetElementsKind());
-}
-
-void JSObject::ValidateElements(JSObject object) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- object->GetElementsAccessor()->Validate(object);
- }
-#endif
-}
-
-static bool ShouldConvertToSlowElements(JSObject object, uint32_t capacity,
- uint32_t index,
- uint32_t* new_capacity) {
- STATIC_ASSERT(JSObject::kMaxUncheckedOldFastElementsLength <=
- JSObject::kMaxUncheckedFastElementsLength);
- if (index < capacity) {
- *new_capacity = capacity;
- return false;
- }
- if (index - capacity >= JSObject::kMaxGap) return true;
- *new_capacity = JSObject::NewElementsCapacity(index + 1);
- DCHECK_LT(index, *new_capacity);
- if (*new_capacity <= JSObject::kMaxUncheckedOldFastElementsLength ||
- (*new_capacity <= JSObject::kMaxUncheckedFastElementsLength &&
- Heap::InNewSpace(object))) {
- return false;
- }
- // If the fast-case backing storage takes up much more memory than a
- // dictionary backing storage would, the object should have slow elements.
- int used_elements = object->GetFastElementsUsage();
- uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
- NumberDictionary::ComputeCapacity(used_elements) *
- NumberDictionary::kEntrySize;
- return size_threshold <= *new_capacity;
-}
-
-bool JSObject::WouldConvertToSlowElements(uint32_t index) {
- if (!HasFastElements()) return false;
- uint32_t capacity = static_cast<uint32_t>(elements()->length());
- uint32_t new_capacity;
- return ShouldConvertToSlowElements(*this, capacity, index, &new_capacity);
-}
-
-static ElementsKind BestFittingFastElementsKind(JSObject object) {
- if (!object->map()->CanHaveFastTransitionableElementsKind()) {
- return HOLEY_ELEMENTS;
- }
- if (object->HasSloppyArgumentsElements()) {
- return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
- }
- if (object->HasStringWrapperElements()) {
- return FAST_STRING_WRAPPER_ELEMENTS;
- }
- DCHECK(object->HasDictionaryElements());
- NumberDictionary dictionary = object->element_dictionary();
- ElementsKind kind = HOLEY_SMI_ELEMENTS;
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- Object value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return HOLEY_ELEMENTS;
- if (!value->IsSmi()) {
- if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
- kind = HOLEY_DOUBLE_ELEMENTS;
- }
- }
- }
- return kind;
-}
-
-static bool ShouldConvertToFastElements(JSObject object,
- NumberDictionary dictionary,
- uint32_t index,
- uint32_t* new_capacity) {
- // If properties with non-standard attributes or accessors were added, we
- // cannot go back to fast elements.
- if (dictionary->requires_slow_elements()) return false;
-
- // Adding a property with this index will require slow elements.
- if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
- if (object->IsJSArray()) {
- Object length = JSArray::cast(object)->length();
- if (!length->IsSmi()) return false;
- *new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
- } else if (object->IsJSSloppyArgumentsObject()) {
- return false;
- } else {
- *new_capacity = dictionary->max_number_key() + 1;
- }
- *new_capacity = Max(index + 1, *new_capacity);
-
- uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
- NumberDictionary::kEntrySize;
-
- // Turn fast if the dictionary only saves 50% space.
- return 2 * dictionary_size >= *new_capacity;
-}
-
-// static
-void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes) {
- DCHECK(object->map()->is_extensible());
-
- Isolate* isolate = object->GetIsolate();
-
- uint32_t old_length = 0;
- uint32_t new_capacity = 0;
-
- if (object->IsJSArray()) {
- CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
- }
-
- ElementsKind kind = object->GetElementsKind();
- FixedArrayBase elements = object->elements();
- ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
- if (IsSloppyArgumentsElementsKind(kind)) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
- dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
- } else if (IsStringWrapperElementsKind(kind)) {
- dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
- }
-
- if (attributes != NONE) {
- kind = dictionary_kind;
- } else if (elements->IsNumberDictionary()) {
- kind = ShouldConvertToFastElements(
- *object, NumberDictionary::cast(elements), index, &new_capacity)
- ? BestFittingFastElementsKind(*object)
- : dictionary_kind;
- } else if (ShouldConvertToSlowElements(
- *object, static_cast<uint32_t>(elements->length()), index,
- &new_capacity)) {
- kind = dictionary_kind;
- }
-
- ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
- to = GetHoleyElementsKind(to);
- kind = GetHoleyElementsKind(kind);
- }
- to = GetMoreGeneralElementsKind(kind, to);
- ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
- accessor->Add(object, index, value, attributes, new_capacity);
-
- if (object->IsJSArray() && index >= old_length) {
- Handle<Object> new_length =
- isolate->factory()->NewNumberFromUint(index + 1);
- JSArray::cast(*object)->set_length(*new_length);
- }
-}
bool JSArray::SetLengthWouldNormalize(uint32_t new_length) {
@@ -16072,63 +5665,6 @@ bool AllocationSite::IsNested() {
return false;
}
-template <AllocationSiteUpdateMode update_or_check>
-bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
- ElementsKind to_kind) {
- Isolate* isolate = site->GetIsolate();
- bool result = false;
-
- if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) {
- Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
- ElementsKind kind = boilerplate->GetElementsKind();
- // if kind is holey ensure that to_kind is as well.
- if (IsHoleyElementsKind(kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- }
- if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
- // If the array is huge, it's not likely to be defined in a local
- // function, so we shouldn't make new instances of it very often.
- uint32_t length = 0;
- CHECK(boilerplate->length()->ToArrayLength(&length));
- if (length <= kMaximumArrayBytesToPretransition) {
- if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
- return true;
- }
- if (FLAG_trace_track_allocation_sites) {
- bool is_nested = site->IsNested();
- PrintF("AllocationSite: JSArray %p boilerplate %supdated %s->%s\n",
- reinterpret_cast<void*>(site->ptr()),
- is_nested ? "(nested)" : " ", ElementsKindToString(kind),
- ElementsKindToString(to_kind));
- }
- JSObject::TransitionElementsKind(boilerplate, to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
- result = true;
- }
- }
- } else {
- // The AllocationSite is for a constructed Array.
- ElementsKind kind = site->GetElementsKind();
- // if kind is holey ensure that to_kind is as well.
- if (IsHoleyElementsKind(kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- }
- if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
- if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) return true;
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
- reinterpret_cast<void*>(site->ptr()), ElementsKindToString(kind),
- ElementsKindToString(to_kind));
- }
- site->SetElementsKind(to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
- result = true;
- }
- }
- return result;
-}
bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
return IsSmiElementsKind(from) &&
@@ -16147,69 +5683,6 @@ const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
return nullptr;
}
-template <AllocationSiteUpdateMode update_or_check>
-bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
- ElementsKind to_kind) {
- if (!object->IsJSArray()) return false;
-
- if (!Heap::InNewSpace(*object)) return false;
-
- Handle<AllocationSite> site;
- {
- DisallowHeapAllocation no_allocation;
-
- Heap* heap = object->GetHeap();
- AllocationMemento memento =
- heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
- if (memento.is_null()) return false;
-
- // Walk through to the Allocation Site
- site = handle(memento->GetAllocationSite(), heap->isolate());
- }
- return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
- to_kind);
-}
-
-template bool
-JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
- Handle<JSObject> object, ElementsKind to_kind);
-
-template bool JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kUpdate>(
- Handle<JSObject> object, ElementsKind to_kind);
-
-void JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- ElementsKind from_kind = object->GetElementsKind();
-
- if (IsHoleyElementsKind(from_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- }
-
- if (from_kind == to_kind) return;
-
- // This method should never be called for any other case.
- DCHECK(IsFastElementsKind(from_kind));
- DCHECK(IsFastElementsKind(to_kind));
- DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
-
- UpdateAllocationSite(object, to_kind);
- if (object->elements() == object->GetReadOnlyRoots().empty_fixed_array() ||
- IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
- // No change is needed to the elements() buffer, the transition
- // only requires a map change.
- Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
- MigrateToMap(object, new_map);
- if (FLAG_trace_elements_transitions) {
- Handle<FixedArrayBase> elms(object->elements(), object->GetIsolate());
- PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
- }
- } else {
- DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
- (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
- uint32_t c = static_cast<uint32_t>(object->elements()->length());
- ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
- }
-}
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
@@ -16238,49 +5711,6 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
return false;
}
-template <typename BackingStore>
-static int HoleyElementsUsage(JSObject object, BackingStore store) {
- Isolate* isolate = object->GetIsolate();
- int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
- : store->length();
- int used = 0;
- for (int i = 0; i < limit; ++i) {
- if (!store->is_the_hole(isolate, i)) ++used;
- }
- return used;
-}
-
-int JSObject::GetFastElementsUsage() {
- FixedArrayBase store = elements();
- switch (GetElementsKind()) {
- case PACKED_SMI_ELEMENTS:
- case PACKED_DOUBLE_ELEMENTS:
- case PACKED_ELEMENTS:
- return IsJSArray() ? Smi::ToInt(JSArray::cast(*this)->length())
- : store->length();
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- store = SloppyArgumentsElements::cast(store)->arguments();
- V8_FALLTHROUGH;
- case HOLEY_SMI_ELEMENTS:
- case HOLEY_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- return HoleyElementsUsage(*this, FixedArray::cast(store));
- case HOLEY_DOUBLE_ELEMENTS:
- if (elements()->length() == 0) return 0;
- return HoleyElementsUsage(*this, FixedDoubleArray::cast(store));
-
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NO_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- UNREACHABLE();
- }
- return 0;
-}
// Certain compilers request function template instantiation when they
@@ -16317,37 +5747,6 @@ void Dictionary<Derived, Shape>::Print() {
#endif
-MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
- bool* done) {
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- return GetPropertyWithInterceptorInternal(it, it->GetInterceptor(), done);
-}
-
-Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
- Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- return HasProperty(&it);
-}
-
-
-Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
- uint32_t index) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, object,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- return HasProperty(&it);
-}
-
-
-Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
- Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- return maybe_result.IsJust() ? Just(it.state() == LookupIterator::ACCESSOR)
- : Nothing<bool>();
-}
int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
return ((kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
@@ -16358,23 +5757,6 @@ bool FixedArrayBase::IsCowArray() const {
return map() == GetReadOnlyRoots().fixed_cow_array_map();
}
-bool JSObject::IsApiWrapper() {
- // These object types can carry information relevant for embedders. The
- // *_API_* types are generated through templates which can have embedder
- // fields. The other types have their embedder fields added at compile time.
- auto instance_type = map()->instance_type();
- return instance_type == JS_API_OBJECT_TYPE ||
- instance_type == JS_ARRAY_BUFFER_TYPE ||
- instance_type == JS_DATA_VIEW_TYPE ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
- instance_type == JS_TYPED_ARRAY_TYPE;
-}
-
-bool JSObject::IsDroppableApiWrapper() {
- auto instance_type = map()->instance_type();
- return instance_type == JS_API_OBJECT_TYPE ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE;
-}
const char* Symbol::PrivateSymbolToName() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
@@ -16498,7 +5880,7 @@ Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
Isolate* const isolate = promise->GetIsolate();
// 1. Assert: The value of promise.[[PromiseState]] is "pending".
- DCHECK_EQ(Promise::kPending, promise->status());
+ CHECK_EQ(Promise::kPending, promise->status());
// 2. Let reactions be promise.[[PromiseFulfillReactions]].
Handle<Object> reactions(promise->reactions(), isolate);
@@ -16526,7 +5908,7 @@ Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
isolate->factory()->undefined_value());
// 1. Assert: The value of promise.[[PromiseState]] is "pending".
- DCHECK_EQ(Promise::kPending, promise->status());
+ CHECK_EQ(Promise::kPending, promise->status());
// 2. Let reactions be promise.[[PromiseRejectReactions]].
Handle<Object> reactions(promise->reactions(), isolate);
@@ -16614,7 +5996,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// Mark the dependency of the new {promise} on the {resolution}.
Object::SetProperty(isolate, resolution,
isolate->factory()->promise_handled_by_symbol(),
- promise, LanguageMode::kStrict)
+ promise)
.Check();
}
isolate->native_context()->microtask_queue()->EnqueueMicrotask(*task);
@@ -16628,7 +6010,7 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
Handle<Object> reactions,
Handle<Object> argument,
PromiseReaction::Type type) {
- DCHECK(reactions->IsSmi() || reactions->IsPromiseReaction());
+ CHECK(reactions->IsSmi() || reactions->IsPromiseReaction());
// We need to reverse the {reactions} here, since we record them
// on the JSPromise in the reverse order.
@@ -16652,15 +6034,25 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
reactions = handle(reaction->next(), isolate);
+ Handle<NativeContext> handler_context;
+
STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
static_cast<int>(PromiseReactionJobTask::kSize));
if (type == PromiseReaction::kFulfill) {
+ Handle<HeapObject> handler = handle(reaction->fulfill_handler(), isolate);
+ if (handler->IsJSReceiver()) {
+ JSReceiver::GetContextForMicrotask(Handle<JSReceiver>::cast(handler))
+ .ToHandle(&handler_context);
+ }
+ if (handler_context.is_null())
+ handler_context = isolate->native_context();
+
task->synchronized_set_map(
ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
*argument);
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
- *isolate->native_context());
+ *handler_context);
STATIC_ASSERT(
static_cast<int>(PromiseReaction::kFulfillHandlerOffset) ==
static_cast<int>(PromiseFulfillReactionJobTask::kHandlerOffset));
@@ -16670,20 +6062,26 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
PromiseFulfillReactionJobTask::kPromiseOrCapabilityOffset));
} else {
DisallowHeapAllocation no_gc;
- HeapObject handler = reaction->reject_handler();
+ Handle<HeapObject> handler = handle(reaction->reject_handler(), isolate);
+ if (handler->IsJSReceiver()) {
+ JSReceiver::GetContextForMicrotask(Handle<JSReceiver>::cast(handler))
+ .ToHandle(&handler_context);
+ }
+ if (handler_context.is_null())
+ handler_context = isolate->native_context();
task->synchronized_set_map(
ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map());
Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
- *isolate->native_context());
- Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(handler);
+ *handler_context);
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(*handler);
STATIC_ASSERT(
static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
static_cast<int>(
PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset));
}
- isolate->native_context()->microtask_queue()->EnqueueMicrotask(
+ handler_context->microtask_queue()->EnqueueMicrotask(
*Handle<PromiseReactionJobTask>::cast(task));
}
@@ -16937,8 +6335,7 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
RETURN_ON_EXCEPTION(
isolate,
Object::SetProperty(isolate, regexp, factory->lastIndex_string(),
- Handle<Smi>(Smi::zero(), isolate),
- LanguageMode::kStrict),
+ Handle<Smi>(Smi::zero(), isolate)),
JSRegExp);
}
@@ -17079,7 +6476,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived new_table) {
+void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots, Derived new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
@@ -17092,12 +6489,11 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived new_table) {
// Rehash the elements.
int capacity = this->Capacity();
- ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
Object k = this->get(from_index);
if (!Shape::IsLive(roots, k)) continue;
- uint32_t hash = Shape::HashForObject(isolate, k);
+ uint32_t hash = Shape::HashForObject(roots, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
for (int j = 0; j < Shape::kEntrySize; j++) {
@@ -17109,10 +6505,10 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived new_table) {
}
template <typename Derived, typename Shape>
-uint32_t HashTable<Derived, Shape>::EntryForProbe(Isolate* isolate, Object k,
+uint32_t HashTable<Derived, Shape>::EntryForProbe(ReadOnlyRoots roots, Object k,
int probe,
uint32_t expected) {
- uint32_t hash = Shape::HashForObject(isolate, k);
+ uint32_t hash = Shape::HashForObject(roots, k);
uint32_t capacity = this->Capacity();
uint32_t entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
@@ -17140,10 +6536,9 @@ void HashTable<Derived, Shape>::Swap(uint32_t entry1, uint32_t entry2,
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(Isolate* isolate) {
+void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
- ReadOnlyRoots roots(isolate);
uint32_t capacity = Capacity();
bool done = false;
for (int probe = 1; !done; probe++) {
@@ -17153,11 +6548,11 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate) {
for (uint32_t current = 0; current < capacity; current++) {
Object current_key = KeyAt(current);
if (!Shape::IsLive(roots, current_key)) continue;
- uint32_t target = EntryForProbe(isolate, current_key, probe, current);
+ uint32_t target = EntryForProbe(roots, current_key, probe, current);
if (current == target) continue;
Object target_key = KeyAt(target);
if (!Shape::IsLive(roots, target_key) ||
- EntryForProbe(isolate, target_key, probe, target) != target) {
+ EntryForProbe(roots, target_key, probe, target) != target) {
// Put the current element into the correct position.
Swap(current, target, mode);
// The other element will be processed on the next iteration.
@@ -17190,12 +6585,12 @@ Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
const int kMinCapacityForPretenure = 256;
bool should_pretenure =
- pretenure == TENURED ||
- ((capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(*table));
+ pretenure == TENURED || ((capacity > kMinCapacityForPretenure) &&
+ !Heap::InYoungGeneration(*table));
Handle<Derived> new_table = HashTable::New(
isolate, new_nof, should_pretenure ? TENURED : NOT_TENURED);
- table->Rehash(isolate, *new_table);
+ table->Rehash(ReadOnlyRoots(isolate), *new_table);
return new_table;
}
@@ -17239,12 +6634,12 @@ Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
const int kMinCapacityForPretenure = 256;
bool pretenure = (at_least_room_for > kMinCapacityForPretenure) &&
- !Heap::InNewSpace(*table);
+ !Heap::InYoungGeneration(*table);
Handle<Derived> new_table =
HashTable::New(isolate, new_capacity, pretenure ? TENURED : NOT_TENURED,
USE_CUSTOM_MINIMUM_CAPACITY);
- table->Rehash(isolate, *new_table);
+ table->Rehash(ReadOnlyRoots(isolate), *new_table);
return new_table;
}
@@ -17262,50 +6657,6 @@ uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
return entry;
}
-void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
- Handle<Name> name) {
- // Regardless of whether the property is there or not invalidate
- // Load/StoreGlobalICs that load/store through global object's prototype.
- JSObject::InvalidatePrototypeValidityCell(*global);
-
- DCHECK(!global->HasFastProperties());
- auto dictionary = handle(global->global_dictionary(), global->GetIsolate());
- int entry = dictionary->FindEntry(global->GetIsolate(), name);
- if (entry == GlobalDictionary::kNotFound) return;
- PropertyCell::InvalidateEntry(global->GetIsolate(), dictionary, entry);
-}
-
-Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
- Handle<JSGlobalObject> global, Handle<Name> name,
- PropertyCellType cell_type, int* entry_out) {
- Isolate* isolate = global->GetIsolate();
- DCHECK(!global->HasFastProperties());
- Handle<GlobalDictionary> dictionary(global->global_dictionary(), isolate);
- int entry = dictionary->FindEntry(isolate, name);
- Handle<PropertyCell> cell;
- if (entry != GlobalDictionary::kNotFound) {
- if (entry_out) *entry_out = entry;
- cell = handle(dictionary->CellAt(entry), isolate);
- PropertyCellType original_cell_type = cell->property_details().cell_type();
- DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
- original_cell_type == PropertyCellType::kUninitialized);
- DCHECK(cell->value()->IsTheHole(isolate));
- if (original_cell_type == PropertyCellType::kInvalidated) {
- cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
- }
- PropertyDetails details(kData, NONE, cell_type);
- cell->set_property_details(details);
- return cell;
- }
- cell = isolate->factory()->NewPropertyCell(name);
- PropertyDetails details(kData, NONE, cell_type);
- dictionary = GlobalDictionary::Add(isolate, dictionary, name, cell, details,
- entry_out);
- // {*entry_out} is initialized inside GlobalDictionary::Add().
- global->SetProperties(*dictionary);
- return cell;
-}
-
// This class is used for looking up two character strings in the string table.
// If we don't have a hit we don't want to waste much time so we unroll the
@@ -17367,7 +6718,7 @@ MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists(
Isolate* isolate,
uint16_t c1,
uint16_t c2) {
- TwoCharHashTableKey key(c1, c2, isolate->heap()->HashSeed());
+ TwoCharHashTableKey key(c1, c2, HashSeed(isolate));
Handle<StringTable> string_table = isolate->factory()->string_table();
int entry = string_table->FindEntry(isolate, &key);
if (entry == kNotFound) return MaybeHandle<String>();
@@ -17538,7 +6889,7 @@ class StringTableNoAllocateKey : public StringTableKey {
StringTableNoAllocateKey(String string, uint64_t seed)
: StringTableKey(0), string_(string) {
StringShape shape(string);
- one_byte_ = shape.HasOnlyOneByteChars();
+ one_byte_ = shape.encoding_tag() == kOneByteStringTag;
DCHECK(!shape.IsInternalized());
DCHECK(!shape.IsThin());
int length = string->length();
@@ -17672,7 +7023,7 @@ Address StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
Heap* heap = isolate->heap();
StringTable table = heap->string_table();
- StringTableNoAllocateKey key(string, heap->HashSeed());
+ StringTableNoAllocateKey key(string, HashSeed(isolate));
// String could be an array index.
uint32_t hash = string->hash_field();
@@ -18260,10 +7611,8 @@ template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
bool operator()(Tagged_t a, Tagged_t b) {
- // TODO(ishell): revisit the code below
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- PropertyDetails da(dict->DetailsAt(Smi(a).value()));
- PropertyDetails db(dict->DetailsAt(Smi(b).value()));
+ PropertyDetails da(dict->DetailsAt(Smi(static_cast<Address>(a)).value()));
+ PropertyDetails db(dict->DetailsAt(Smi(static_cast<Address>(b)).value()));
return da.dictionary_index() < db.dictionary_index();
}
Dictionary dict;
@@ -18500,7 +7849,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
// Rehash if more than 33% of the entries are deleted entries.
// TODO(jochen): Consider to shrink the fixed array in place.
if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
- table->Rehash(isolate);
+ table->Rehash(roots);
}
// If we're out of luck, we didn't get a GC recently, and so rehashing
// isn't enough to avoid a crash.
@@ -18512,7 +7861,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Isolate* isolate,
isolate->heap()->CollectAllGarbage(
Heap::kNoGCFlags, GarbageCollectionReason::kFullHashtable);
}
- table->Rehash(isolate);
+ table->Rehash(roots);
}
}
@@ -18674,223 +8023,6 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
return isolate->factory()->NewJSArrayWithElements(entries);
}
-// static
-MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target, double tv) {
- Isolate* const isolate = constructor->GetIsolate();
- Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
- JSDate);
- if (-DateCache::kMaxTimeInMs <= tv && tv <= DateCache::kMaxTimeInMs) {
- tv = DoubleToInteger(tv) + 0.0;
- } else {
- tv = std::numeric_limits<double>::quiet_NaN();
- }
- Handle<Object> value = isolate->factory()->NewNumber(tv);
- Handle<JSDate>::cast(result)->SetValue(*value, std::isnan(tv));
- return Handle<JSDate>::cast(result);
-}
-
-
-// static
-double JSDate::CurrentTimeValue(Isolate* isolate) {
- if (FLAG_log_internal_timer_events) LOG(isolate, CurrentTimeEvent());
-
- // According to ECMA-262, section 15.9.1, page 117, the precision of
- // the number in a Date object representing a particular instant in
- // time is milliseconds. Therefore, we floor the result of getting
- // the OS time.
- return Floor(V8::GetCurrentPlatform()->CurrentClockTimeMillis());
-}
-
-
-// static
-Address JSDate::GetField(Address raw_object, Address smi_index) {
- Object object(raw_object);
- Smi index(smi_index);
- return JSDate::cast(object)
- ->DoGetField(static_cast<FieldIndex>(index->value()))
- ->ptr();
-}
-
-Object JSDate::DoGetField(FieldIndex index) {
- DCHECK_NE(index, kDateValue);
-
- DateCache* date_cache = GetIsolate()->date_cache();
-
- if (index < kFirstUncachedField) {
- Object stamp = cache_stamp();
- if (stamp != date_cache->stamp() && stamp->IsSmi()) {
- // Since the stamp is not NaN, the value is also not NaN.
- int64_t local_time_ms =
- date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
- SetCachedFields(local_time_ms, date_cache);
- }
- switch (index) {
- case kYear: return year();
- case kMonth: return month();
- case kDay: return day();
- case kWeekday: return weekday();
- case kHour: return hour();
- case kMinute: return min();
- case kSecond: return sec();
- default: UNREACHABLE();
- }
- }
-
- if (index >= kFirstUTCField) {
- return GetUTCField(index, value()->Number(), date_cache);
- }
-
- double time = value()->Number();
- if (std::isnan(time)) return GetReadOnlyRoots().nan_value();
-
- int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
- int days = DateCache::DaysFromTime(local_time_ms);
-
- if (index == kDays) return Smi::FromInt(days);
-
- int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
- if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
- DCHECK_EQ(index, kTimeInDay);
- return Smi::FromInt(time_in_day_ms);
-}
-
-Object JSDate::GetUTCField(FieldIndex index, double value,
- DateCache* date_cache) {
- DCHECK_GE(index, kFirstUTCField);
-
- if (std::isnan(value)) return GetReadOnlyRoots().nan_value();
-
- int64_t time_ms = static_cast<int64_t>(value);
-
- if (index == kTimezoneOffset) {
- GetIsolate()->CountUsage(v8::Isolate::kDateGetTimezoneOffset);
- return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
- }
-
- int days = DateCache::DaysFromTime(time_ms);
-
- if (index == kWeekdayUTC) return Smi::FromInt(date_cache->Weekday(days));
-
- if (index <= kDayUTC) {
- int year, month, day;
- date_cache->YearMonthDayFromDays(days, &year, &month, &day);
- if (index == kYearUTC) return Smi::FromInt(year);
- if (index == kMonthUTC) return Smi::FromInt(month);
- DCHECK_EQ(index, kDayUTC);
- return Smi::FromInt(day);
- }
-
- int time_in_day_ms = DateCache::TimeInDay(time_ms, days);
- switch (index) {
- case kHourUTC: return Smi::FromInt(time_in_day_ms / (60 * 60 * 1000));
- case kMinuteUTC: return Smi::FromInt((time_in_day_ms / (60 * 1000)) % 60);
- case kSecondUTC: return Smi::FromInt((time_in_day_ms / 1000) % 60);
- case kMillisecondUTC: return Smi::FromInt(time_in_day_ms % 1000);
- case kDaysUTC: return Smi::FromInt(days);
- case kTimeInDayUTC: return Smi::FromInt(time_in_day_ms);
- default: UNREACHABLE();
- }
-
- UNREACHABLE();
-}
-
-// static
-Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
- Isolate* const isolate = date->GetIsolate();
- Handle<Object> value = isolate->factory()->NewNumber(v);
- bool value_is_nan = std::isnan(v);
- date->SetValue(*value, value_is_nan);
- return value;
-}
-
-void JSDate::SetValue(Object value, bool is_value_nan) {
- set_value(value);
- if (is_value_nan) {
- HeapNumber nan = GetReadOnlyRoots().nan_value();
- set_cache_stamp(nan, SKIP_WRITE_BARRIER);
- set_year(nan, SKIP_WRITE_BARRIER);
- set_month(nan, SKIP_WRITE_BARRIER);
- set_day(nan, SKIP_WRITE_BARRIER);
- set_hour(nan, SKIP_WRITE_BARRIER);
- set_min(nan, SKIP_WRITE_BARRIER);
- set_sec(nan, SKIP_WRITE_BARRIER);
- set_weekday(nan, SKIP_WRITE_BARRIER);
- } else {
- set_cache_stamp(Smi::FromInt(DateCache::kInvalidStamp), SKIP_WRITE_BARRIER);
- }
-}
-
-void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
- int days = DateCache::DaysFromTime(local_time_ms);
- int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
- int year, month, day;
- date_cache->YearMonthDayFromDays(days, &year, &month, &day);
- int weekday = date_cache->Weekday(days);
- int hour = time_in_day_ms / (60 * 60 * 1000);
- int min = (time_in_day_ms / (60 * 1000)) % 60;
- int sec = (time_in_day_ms / 1000) % 60;
- set_cache_stamp(date_cache->stamp());
- set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER);
- set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER);
- set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER);
- set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER);
- set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER);
- set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER);
- set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
-}
-
-int JSMessageObject::GetLineNumber() const {
- if (start_position() == -1) return Message::kNoLineNumberInfo;
-
- Handle<Script> the_script(script(), GetIsolate());
-
- Script::PositionInfo info;
- const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!Script::GetPositionInfo(the_script, start_position(), &info,
- offset_flag)) {
- return Message::kNoLineNumberInfo;
- }
-
- return info.line + 1;
-}
-
-int JSMessageObject::GetColumnNumber() const {
- if (start_position() == -1) return -1;
-
- Handle<Script> the_script(script(), GetIsolate());
-
- Script::PositionInfo info;
- const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!Script::GetPositionInfo(the_script, start_position(), &info,
- offset_flag)) {
- return -1;
- }
-
- return info.column; // Note: No '+1' in contrast to GetLineNumber.
-}
-
-Handle<String> JSMessageObject::GetSourceLine() const {
- Isolate* isolate = GetIsolate();
- Handle<Script> the_script(script(), isolate);
-
- if (the_script->type() == Script::TYPE_WASM) {
- return isolate->factory()->empty_string();
- }
-
- Script::PositionInfo info;
- const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!Script::GetPositionInfo(the_script, start_position(), &info,
- offset_flag)) {
- return isolate->factory()->empty_string();
- }
-
- Handle<String> src = handle(String::cast(the_script->source()), isolate);
- return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
-}
Handle<PropertyCell> PropertyCell::InvalidateEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry) {
@@ -19039,6 +8171,7 @@ void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
int JSGeneratorObject::source_position() const {
CHECK(is_suspended());
DCHECK(function()->shared()->HasBytecodeArray());
+ DCHECK(function()->shared()->GetBytecodeArray()->HasSourcePositionTable());
int code_offset = Smi::ToInt(input_or_debug_pos());
@@ -19075,24 +8208,6 @@ AccessCheckInfo AccessCheckInfo::Get(Isolate* isolate,
return AccessCheckInfo::cast(data_obj);
}
-bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
- for (PrototypeIterator iter(isolate, *this, kStartAtReceiver,
- PrototypeIterator::END_AT_NULL);
- !iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
- if (iter.GetCurrent()->IsJSProxy()) return true;
- }
- return false;
-}
-
-bool JSReceiver::HasComplexElements() {
- if (IsJSProxy()) return true;
- JSObject this_object = JSObject::cast(*this);
- if (this_object->HasIndexedInterceptor()) {
- return true;
- }
- if (!this_object->HasDictionaryElements()) return false;
- return this_object->element_dictionary()->HasComplexElements();
-}
MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
Isolate* isolate, Handle<Object> getter) {
@@ -19131,11 +8246,15 @@ Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
// architectures using 32-bit Smis.
uint32_t x_scaled = x_value;
uint32_t y_scaled = y_value;
- if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(-1).ptr();
- if (x_value >= 0) return Smi::FromInt(1).ptr();
- x_scaled = -x_value;
- y_scaled = -y_value;
+ if (x_value < 0) {
+ if (y_value >= 0) {
+ return Smi::FromInt(-1).ptr();
+ } else {
+ y_scaled = base::NegateWithWraparound(y_value);
+ }
+ x_scaled = base::NegateWithWraparound(x_value);
+ } else if (y_value < 0) {
+ return Smi::FromInt(1).ptr();
}
// clang-format off
@@ -19250,7 +8369,7 @@ BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::Add(
PropertyDetails, int*);
template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash(
- Isolate* isolate);
+ ReadOnlyRoots roots);
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
@@ -19282,25 +8401,26 @@ template void
BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
Handle<NameDictionary> dictionary, KeyAccumulator* keys);
-void JSWeakFactory::Cleanup(Handle<JSWeakFactory> weak_factory,
- Isolate* isolate) {
+void JSFinalizationGroup::Cleanup(
+ Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
// It's possible that the cleared_cells list is empty, since
- // WeakCell.clear() was called on all its elements before this task ran. In
- // that case, don't call the cleanup function.
- if (!weak_factory->cleared_cells()->IsUndefined(isolate)) {
+ // FinalizationGroup.unregister() removed all its elements before this task
+ // ran. In that case, don't call the cleanup function.
+ if (!finalization_group->cleared_cells()->IsUndefined(isolate)) {
// Construct the iterator.
- Handle<JSWeakFactoryCleanupIterator> iterator;
+ Handle<JSFinalizationGroupCleanupIterator> iterator;
{
Handle<Map> cleanup_iterator_map(
- isolate->native_context()->js_weak_factory_cleanup_iterator_map(),
+ isolate->native_context()
+ ->js_finalization_group_cleanup_iterator_map(),
isolate);
- iterator = Handle<JSWeakFactoryCleanupIterator>::cast(
+ iterator = Handle<JSFinalizationGroupCleanupIterator>::cast(
isolate->factory()->NewJSObjectFromMap(
cleanup_iterator_map, NOT_TENURED,
Handle<AllocationSite>::null()));
- iterator->set_factory(*weak_factory);
+ iterator->set_finalization_group(*finalization_group);
}
- Handle<Object> cleanup(weak_factory->cleanup(), isolate);
+ Handle<Object> cleanup(finalization_group->cleanup(), isolate);
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
v8::Local<v8::Value> result;
@@ -19320,5 +8440,36 @@ void JSWeakFactory::Cleanup(Handle<JSWeakFactory> weak_factory,
}
}
+MaybeHandle<FixedArray> JSReceiver::GetPrivateEntries(
+ Isolate* isolate, Handle<JSReceiver> receiver) {
+ PropertyFilter key_filter = static_cast<PropertyFilter>(PRIVATE_NAMES_ONLY);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, key_filter,
+ GetKeysConversion::kConvertToString),
+ MaybeHandle<FixedArray>());
+
+ Handle<FixedArray> entries =
+ isolate->factory()->NewFixedArray(keys->length() * 2);
+ int length = 0;
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> obj_key = handle(keys->get(i), isolate);
+ Handle<Symbol> key(Symbol::cast(*obj_key), isolate);
+ CHECK(key->is_private_name());
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetProperty(isolate, receiver, key),
+ MaybeHandle<FixedArray>());
+
+ entries->set(length++, *key);
+ entries->set(length++, *value);
+ }
+ DCHECK_EQ(length, entries->length());
+ return FixedArray::ShrinkOrEmpty(isolate, entries, length);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 72d3511c6f..4a9b5f33e2 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -159,6 +159,7 @@
// - BreakPoint
// - BreakPointInfo
// - StackFrameInfo
+// - StackTraceFrame
// - SourcePositionTableWithFrameCache
// - CodeCache
// - PrototypeInfo
@@ -257,6 +258,7 @@ class AccessorPair;
class AccessCheckInfo;
class AllocationSite;
class ByteArray;
+class CachedTemplateObject;
class Cell;
class ConsString;
class DependentCode;
@@ -271,6 +273,7 @@ class JSAsyncGeneratorObject;
class JSGlobalProxy;
class JSPromise;
class JSProxy;
+class JSProxyRevocableResult;
class KeyAccumulator;
class LayoutDescriptor;
class LookupIterator;
@@ -334,6 +337,7 @@ class ZoneForwardList;
V(BreakPointInfo) \
V(ByteArray) \
V(BytecodeArray) \
+ V(CachedTemplateObject) \
V(CallHandlerInfo) \
V(Callable) \
V(Cell) \
@@ -419,11 +423,10 @@ class ZoneForwardList;
V(JSStringIterator) \
V(JSTypedArray) \
V(JSValue) \
- V(JSWeakCell) \
V(JSWeakRef) \
V(JSWeakCollection) \
- V(JSWeakFactory) \
- V(JSWeakFactoryCleanupIterator) \
+ V(JSFinalizationGroup) \
+ V(JSFinalizationGroupCleanupIterator) \
V(JSWeakMap) \
V(JSWeakSet) \
V(LoadHandler) \
@@ -489,7 +492,8 @@ class ZoneForwardList;
V(WasmModuleObject) \
V(WasmTableObject) \
V(WeakFixedArray) \
- V(WeakArrayList)
+ V(WeakArrayList) \
+ V(WeakCell)
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
@@ -531,6 +535,9 @@ class ZoneForwardList;
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
+// TODO(mythria): Move this to a better place.
+ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw);
+
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -631,23 +638,16 @@ class Object {
inline bool FitsRepresentation(Representation representation);
- // Checks whether two valid primitive encodings of a property name resolve to
- // the same logical property. E.g., the smi 1, the string "1" and the double
- // 1 all refer to the same property, so this helper will return true.
- inline bool KeyEquals(Object other);
-
inline bool FilterKey(PropertyFilter filter);
Handle<FieldType> OptimalType(Isolate* isolate,
Representation representation);
- inline static Handle<Object> NewStorageFor(Isolate* isolate,
- Handle<Object> object,
- Representation representation);
+ static Handle<Object> NewStorageFor(Isolate* isolate, Handle<Object> object,
+ Representation representation);
- inline static Handle<Object> WrapForRead(Isolate* isolate,
- Handle<Object> object,
- Representation representation);
+ static Handle<Object> WrapForRead(Isolate* isolate, Handle<Object> object,
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -655,6 +655,7 @@ class Object {
// ECMA-262 9.2.
bool BooleanValue(Isolate* isolate);
+ Object ToBoolean(Isolate* isolate);
// ES6 section 7.2.11 Abstract Relational Comparison
V8_WARN_UNUSED_RESULT static Maybe<ComparisonResult> Compare(
@@ -678,8 +679,8 @@ class Object {
V8_WARN_UNUSED_RESULT static inline MaybeHandle<JSReceiver> ToObject(
Isolate* isolate, Handle<Object> object,
const char* method_name = nullptr);
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ToObject(
- Isolate* isolate, Handle<Object> object, Handle<Context> native_context,
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ToObjectImpl(
+ Isolate* isolate, Handle<Object> object,
const char* method_name = nullptr);
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
@@ -784,37 +785,39 @@ class Object {
// argument. These cases are either in accordance with the spec or not
// covered by it (eg., concerning API callbacks).
V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreOrigin store_origin);
+ LookupIterator* it, Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetProperty(
Isolate* isolate, Handle<Object> object, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode,
- StoreOrigin store_origin = StoreOrigin::kMaybeKeyed);
+ Handle<Object> value, StoreOrigin store_origin = StoreOrigin::kMaybeKeyed,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
Isolate* isolate, Handle<Object> object, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>(),
StoreOrigin store_origin = StoreOrigin::kMaybeKeyed);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetSuperProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreOrigin store_origin);
+ LookupIterator* it, Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
V8_WARN_UNUSED_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- Handle<Object> value, ShouldThrow should_throw);
+ Handle<Object> value, Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, ShouldThrow should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> RedefineIncompatibleProperty(
Isolate* isolate, Handle<Object> name, Handle<Object> value,
- ShouldThrow should_throw);
+ Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetDataProperty(
LookupIterator* it, Handle<Object> value);
V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw, StoreOrigin store_origin);
+ Maybe<ShouldThrow> should_throw, StoreOrigin store_origin);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Isolate* isolate, Handle<Object> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
@@ -825,20 +828,21 @@ class Object {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
LookupIterator* it);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithAccessor(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
Handle<Object> receiver, Handle<JSReceiver> getter);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
- ShouldThrow should_throw);
+ Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<Object> object, uint32_t index);
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetElement(
Isolate* isolate, Handle<Object> object, uint32_t index,
- Handle<Object> value, LanguageMode language_mode);
+ Handle<Object> value, ShouldThrow should_throw);
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
@@ -928,7 +932,8 @@ class Object {
void ShortPrint(std::ostream& os) const; // NOLINT
- DECL_CAST(Object)
+ inline static Object cast(Object object) { return object; }
+ inline static Object unchecked_cast(Object object) { return object; }
// Layout description.
static const int kHeaderSize = 0; // Object does not take up any space.
@@ -979,7 +984,7 @@ class Object {
// Helper for SetProperty and SetSuperProperty.
// Return value is only meaningful if [found] is set to true on return.
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyInternal(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
+ LookupIterator* it, Handle<Object> value, Maybe<ShouldThrow> should_throw,
StoreOrigin store_origin, bool* found);
V8_WARN_UNUSED_RESULT static MaybeHandle<Name> ConvertToName(
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 8e2dc29d9b..ac0a16c944 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/allocation-site.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -194,6 +194,64 @@ Address AllocationMemento::GetAllocationSiteUnchecked() const {
return allocation_site()->ptr();
}
+template <AllocationSiteUpdateMode update_or_check>
+bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind) {
+ Isolate* isolate = site->GetIsolate();
+ bool result = false;
+
+ if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) {
+ Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
+ ElementsKind kind = boilerplate->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+ // If the array is huge, it's not likely to be defined in a local
+ // function, so we shouldn't make new instances of it very often.
+ uint32_t length = 0;
+ CHECK(boilerplate->length()->ToArrayLength(&length));
+ if (length <= kMaximumArrayBytesToPretransition) {
+ if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
+ return true;
+ }
+ if (FLAG_trace_track_allocation_sites) {
+ bool is_nested = site->IsNested();
+ PrintF("AllocationSite: JSArray %p boilerplate %supdated %s->%s\n",
+ reinterpret_cast<void*>(site->ptr()),
+ is_nested ? "(nested)" : " ", ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ JSObject::TransitionElementsKind(boilerplate, to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ result = true;
+ }
+ }
+ } else {
+ // The AllocationSite is for a constructed Array.
+ ElementsKind kind = site->GetElementsKind();
+ // if kind is holey ensure that to_kind is as well.
+ if (IsHoleyElementsKind(kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+ if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) return true;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
+ reinterpret_cast<void*>(site->ptr()), ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ site->SetElementsKind(to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ result = true;
+ }
+ }
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 894f13ffe8..7cd08f7052 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -7,9 +7,10 @@
#include "src/objects/api-callbacks.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/js-objects-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
@@ -37,7 +38,7 @@ ACCESSORS(AccessorInfo, expected_receiver_type, Object,
ACCESSORS_CHECKED2(AccessorInfo, getter, Object, kGetterOffset, true,
Foreign::IsNormalized(value))
ACCESSORS_CHECKED2(AccessorInfo, setter, Object, kSetterOffset, true,
- Foreign::IsNormalized(value));
+ Foreign::IsNormalized(value))
ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 33f68d56fc..0bbb8ce35d 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -126,17 +126,8 @@ class AccessCheckInfo : public Struct {
static AccessCheckInfo Get(Isolate* isolate, Handle<JSObject> receiver);
-// Layout description.
-#define ACCESS_CHECK_INFO_FIELDS(V) \
- V(kCallbackOffset, kTaggedSize) \
- V(kNamedInterceptorOffset, kTaggedSize) \
- V(kIndexedInterceptorOffset, kTaggedSize) \
- V(kDataOffset, kTaggedSize) \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- ACCESS_CHECK_INFO_FIELDS)
-#undef ACCESS_CHECK_INFO_FIELDS
+ TORQUE_GENERATED_ACCESS_CHECK_INFO_FIELDS)
OBJECT_CONSTRUCTORS(AccessCheckInfo, Struct);
};
@@ -166,22 +157,8 @@ class InterceptorInfo : public Struct {
DECL_PRINTER(InterceptorInfo)
DECL_VERIFIER(InterceptorInfo)
-// Layout description.
-#define INTERCEPTOR_INFO_FIELDS(V) \
- V(kGetterOffset, kTaggedSize) \
- V(kSetterOffset, kTaggedSize) \
- V(kQueryOffset, kTaggedSize) \
- V(kDescriptorOffset, kTaggedSize) \
- V(kDeleterOffset, kTaggedSize) \
- V(kEnumeratorOffset, kTaggedSize) \
- V(kDefinerOffset, kTaggedSize) \
- V(kDataOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- INTERCEPTOR_INFO_FIELDS)
-#undef INTERCEPTOR_INFO_FIELDS
+ TORQUE_GENERATED_INTERCEPTOR_INFO_FIELDS)
static const int kCanInterceptSymbolsBit = 0;
static const int kAllCanReadBit = 1;
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 541d416e5c..4132aec04d 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -26,7 +26,8 @@ CAST_ACCESSOR(AliasedArgumentsEntry)
CAST_ACCESSOR(SloppyArgumentsElements)
CAST_ACCESSOR(JSArgumentsObject)
-SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
+SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot,
+ kAliasedContextSlotOffset)
Context SloppyArgumentsElements::context() {
return Context::cast(get(kContextIndex));
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 4d2e18ff53..242b89f7ad 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -30,14 +30,10 @@ class JSArgumentsObject : public JSObject {
// mode already. Only use the below layout with the specific initial maps.
class JSArgumentsObjectWithLength : public JSArgumentsObject {
public:
-// Layout description.
-#define JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS(V) \
- V(kLengthOffset, kTaggedSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JSARGUMENTS_OBJECT_WITH_LENGTH_FIELDS)
-#undef JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSARGUMENTS_OBJECT_WITH_LENGTH_FIELDS)
// Indices of in-object properties.
static const int kLengthIndex = 0;
@@ -145,15 +141,8 @@ class AliasedArgumentsEntry : public Struct {
DECL_PRINTER(AliasedArgumentsEntry)
DECL_VERIFIER(AliasedArgumentsEntry)
-// Layout description.
-#define ALIASED_ARGUMENTS_FIELDS(V) \
- V(kAliasedContextSlot, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- ALIASED_ARGUMENTS_FIELDS)
-#undef ALIASED_ARGUMENTS_FIELDS
+ TORQUE_GENERATED_ALIASED_ARGUMENTS_ENTRY_FIELDS)
OBJECT_CONSTRUCTORS(AliasedArgumentsEntry, Struct);
};
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index ccf48f71b7..49a8728f15 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -19,8 +19,14 @@
#include "src/objects/bigint.h"
+#include "src/conversions.h"
#include "src/double.h"
+#include "src/heap/factory.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/isolate-inl.h"
+#include "src/objects-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -184,22 +190,22 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
// Internal field setters. Non-mutable BigInts don't have these.
#include "src/objects/object-macros.h"
inline void set_sign(bool new_sign) {
- int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
bitfield = SignBits::update(bitfield, new_sign);
- RELAXED_WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
+ RELAXED_WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
}
inline void synchronized_set_length(int new_length) {
- int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
bitfield = LengthBits::update(bitfield, new_length);
- RELEASE_WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
+ RELEASE_WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
}
inline void initialize_bitfield(bool sign, int length) {
int32_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
- WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
+ WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
}
inline void set_digit(int n, digit_t value) {
SLOW_DCHECK(0 <= n && n < length());
- Address address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
+ Address address = FIELD_ADDR(*this, kDigitsOffset + n * kDigitSize);
(*reinterpret_cast<digit_t*>(address)) = value;
}
@@ -209,7 +215,7 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
NEVER_READ_ONLY_SPACE
- OBJECT_CONSTRUCTORS(MutableBigInt, FreshlyAllocatedBigInt)
+ OBJECT_CONSTRUCTORS(MutableBigInt, FreshlyAllocatedBigInt);
};
OBJECT_CONSTRUCTORS_IMPL(MutableBigInt, FreshlyAllocatedBigInt)
@@ -479,8 +485,23 @@ MaybeHandle<BigInt> BigInt::Multiply(Isolate* isolate, Handle<BigInt> x,
return MaybeHandle<BigInt>();
}
result->InitializeDigits(result_length);
+ uintptr_t work_estimate = 0;
for (int i = 0; i < x->length(); i++) {
MutableBigInt::MultiplyAccumulate(y, x->digit(i), result, i);
+
+ // Multiplication can take a long time. Check for interrupt requests
+ // every now and then (roughly every 10-20 of milliseconds -- rarely
+ // enough not to create noticeable overhead, frequently enough not to
+ // appear frozen).
+ work_estimate += y->length();
+ if (work_estimate > 5000000) {
+ work_estimate = 0;
+ StackLimitCheck interrupt_check(isolate);
+ if (interrupt_check.InterruptRequested() &&
+ isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ return MaybeHandle<BigInt>();
+ }
+ }
}
result->set_sign(x->sign() != y->sign());
return MutableBigInt::MakeImmutable(result);
@@ -1525,6 +1546,7 @@ bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate,
// Iterate over the dividend's digit (like the "grad school" algorithm).
// {vn1} is the divisor's most significant digit.
digit_t vn1 = divisor->digit(n - 1);
+ uintptr_t work_estimate = 0;
for (int j = m; j >= 0; j--) {
// D3.
// Estimate the current iteration's quotient digit (see Knuth for details).
@@ -1568,6 +1590,20 @@ bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate,
}
if (quotient != nullptr) q->set_digit(j, qhat);
+
+ // Division can take a long time. Check for interrupt requests every
+ // now and then (roughly every 10-20 of milliseconds -- rarely enough
+ // not to create noticeable overhead, frequently enough not to appear
+ // frozen).
+ work_estimate += n;
+ if (work_estimate > 5000000) {
+ work_estimate = 0;
+ StackLimitCheck interrupt_check(isolate);
+ if (interrupt_check.InterruptRequested() &&
+ isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ return false;
+ }
+ }
}
if (quotient != nullptr) {
*quotient = q; // Caller will right-trim.
@@ -2080,6 +2116,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
// In the first round, divide the input, allocating a new BigInt for
// the result == rest; from then on divide the rest in-place.
Handle<BigIntBase>* dividend = &x;
+ uintptr_t work_estimate = 0;
do {
digit_t chunk;
AbsoluteDivSmall(isolate, *dividend, chunk_divisor, &rest, &chunk);
@@ -2096,6 +2133,32 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
// We can never clear more than one digit per iteration, because
// chunk_divisor is smaller than max digit value.
DCHECK_GT(rest->digit(nonzero_digit), 0);
+
+ // String formatting can take a long time. Check for interrupt requests
+ // every now and then (roughly every 10-20 of milliseconds -- rarely
+ // enough not to create noticeable overhead, frequently enough not to
+ // appear frozen).
+ work_estimate += length;
+ if (work_estimate > 500000) {
+ work_estimate = 0;
+ StackLimitCheck interrupt_check(isolate);
+ if (interrupt_check.InterruptRequested()) {
+ {
+ AllowHeapAllocation might_throw;
+ if (isolate->stack_guard()->HandleInterrupts()->IsException(
+ isolate)) {
+ return MaybeHandle<String>();
+ }
+ }
+ // If there was an interrupt request but no termination, reload
+ // the raw characters pointer (as the string might have moved).
+ chars = result->GetChars(no_gc);
+ }
+ if (interrupt_check.InterruptRequested() &&
+ isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ return MaybeHandle<String>();
+ }
+ }
} while (nonzero_digit > 0);
last_digit = rest->digit(0);
}
@@ -2198,9 +2261,8 @@ MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
int needed_length = static_cast<int>((n + kDigitBits - 1) / kDigitBits);
if (x->length() < needed_length) return x;
int bits_in_top_digit = n % kDigitBits;
- if (bits_in_top_digit == 0) {
- if (x->length() == needed_length) return x;
- } else {
+ if (x->length() == needed_length) {
+ if (bits_in_top_digit == 0) return x;
digit_t top_digit = x->digit(needed_length - 1);
if ((top_digit >> bits_in_top_digit) == 0) return x;
}
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 096c4d3f40..ae1ffe6866 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -25,13 +25,13 @@ class ValueSerializer;
class BigIntBase : public HeapObject {
public:
inline int length() const {
- int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
// For use by the GC.
inline int synchronized_length() const {
- int32_t bitfield = ACQUIRE_READ_INT32_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = ACQUIRE_READ_INT32_FIELD(*this, kBitfieldOffset);
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
@@ -81,13 +81,13 @@ class BigIntBase : public HeapObject {
// sign() == true means negative.
inline bool sign() const {
- int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
return SignBits::decode(static_cast<uint32_t>(bitfield));
}
inline digit_t digit(int n) const {
SLOW_DCHECK(0 <= n && n < length());
- Address address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
+ Address address = FIELD_ADDR(*this, kDigitsOffset + n * kDigitSize);
return *reinterpret_cast<digit_t*>(address);
}
@@ -181,6 +181,8 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
return is_zero() ? 0 : ComputeLongHash(static_cast<uint64_t>(digit(0)));
}
+ bool IsNegative() const { return sign(); }
+
static bool EqualToString(Isolate* isolate, Handle<BigInt> x,
Handle<String> y);
static bool EqualToNumber(Handle<BigInt> x, Handle<Object> y);
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 7e32059fee..c48a82fd31 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -7,7 +7,8 @@
#include "src/objects/cell.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index 0792bc5d12..cd76dee479 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_CELL_H_
#include "src/objects/heap-object.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,14 +29,8 @@ class Cell : public HeapObject {
DECL_PRINTER(Cell)
DECL_VERIFIER(Cell)
- // Layout description.
-#define CELL_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CELL_FIELDS)
-#undef CELL_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_CELL_FIELDS)
typedef FixedBodyDescriptor<kValueOffset, kSize, kSize> BodyDescriptor;
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 476e7c5ce4..905fdd5376 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,12 +7,14 @@
#include "src/objects/code.h"
+#include "src/code-desc.h"
#include "src/interpreter/bytecode-register.h"
#include "src/isolate.h"
#include "src/objects/dictionary.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/oddball.h"
#include "src/objects/smi-inl.h"
#include "src/v8memory.h"
@@ -194,12 +196,15 @@ OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(Code)
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, safepoint_table_offset, kSafepointTableOffsetOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
-#define CODE_ACCESSORS(name, type, offset) \
- ACCESSORS_CHECKED2(Code, name, type, offset, true, !Heap::InNewSpace(value))
+INT_ACCESSORS(Code, code_comments_offset, kCodeCommentsOffsetOffset)
+#define CODE_ACCESSORS(name, type, offset) \
+ ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !ObjectInYoungGeneration(value))
#define SYNCHRONIZED_CODE_ACCESSORS(name, type, offset) \
SYNCHRONIZED_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
- !Heap::InNewSpace(value))
+ !ObjectInYoungGeneration(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
@@ -211,15 +216,17 @@ SYNCHRONIZED_CODE_ACCESSORS(code_data_container, CodeDataContainer,
#undef SYNCHRONIZED_CODE_ACCESSORS
void Code::WipeOutHeader() {
- WRITE_FIELD(this, kRelocationInfoOffset, Smi::FromInt(0));
- WRITE_FIELD(this, kDeoptimizationDataOffset, Smi::FromInt(0));
- WRITE_FIELD(this, kSourcePositionTableOffset, Smi::FromInt(0));
- WRITE_FIELD(this, kCodeDataContainerOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kDeoptimizationDataOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kSourcePositionTableOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
}
void Code::clear_padding() {
- memset(reinterpret_cast<void*>(address() + kHeaderPaddingStart), 0,
- kHeaderSize - kHeaderPaddingStart);
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
+ }
Address data_end =
has_unwinding_info() ? unwinding_info_end() : raw_instruction_end();
memset(reinterpret_cast<void*>(data_end), 0,
@@ -251,7 +258,7 @@ int Code::InstructionSize() const {
}
Address Code::raw_instruction_start() const {
- return FIELD_ADDR(this, kHeaderSize);
+ return FIELD_ADDR(*this, kHeaderSize);
}
Address Code::InstructionStart() const {
@@ -282,17 +289,17 @@ int Code::GetUnwindingInfoSizeOffset() const {
int Code::unwinding_info_size() const {
DCHECK(has_unwinding_info());
return static_cast<int>(
- READ_UINT64_FIELD(this, GetUnwindingInfoSizeOffset()));
+ READ_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset()));
}
void Code::set_unwinding_info_size(int value) {
DCHECK(has_unwinding_info());
- WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
+ WRITE_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset(), value);
}
Address Code::unwinding_info_start() const {
DCHECK(has_unwinding_info());
- return FIELD_ADDR(this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+ return FIELD_ADDR(*this, GetUnwindingInfoSizeOffset()) + kInt64Size;
}
Address Code::unwinding_info_end() const {
@@ -316,7 +323,7 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- return ByteArray::unchecked_cast(READ_FIELD(this, kRelocationInfoOffset));
+ return ByteArray::unchecked_cast(READ_FIELD(*this, kRelocationInfoOffset));
}
byte* Code::relocation_start() const {
@@ -362,7 +369,7 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
int Code::CodeSize() const { return SizeFor(body_size()); }
Code::Kind Code::kind() const {
- return KindField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+ return KindField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
}
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
@@ -375,7 +382,7 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
- WRITE_UINT32_FIELD(this, kFlagsOffset, flags);
+ WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
@@ -401,11 +408,11 @@ inline bool Code::has_tagged_params() const {
}
inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+ return HasUnwindingInfoField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
}
inline bool Code::is_turbofanned() const {
- return IsTurbofannedField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+ return IsTurbofannedField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
}
inline bool Code::can_have_weak_objects() const {
@@ -448,7 +455,7 @@ inline void Code::set_is_exception_caught(bool value) {
}
inline bool Code::is_off_heap_trampoline() const {
- return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(this, kFlagsOffset));
+ return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
}
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
@@ -458,14 +465,14 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
}
int Code::builtin_index() const {
- int index = READ_INT_FIELD(this, kBuiltinIndexOffset);
+ int index = READ_INT_FIELD(*this, kBuiltinIndexOffset);
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
- WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
+ WRITE_INT_FIELD(*this, kBuiltinIndexOffset, index);
}
bool Code::is_builtin() const { return builtin_index() != -1; }
@@ -476,19 +483,7 @@ bool Code::has_safepoint_info() const {
int Code::stack_slots() const {
DCHECK(has_safepoint_info());
- return StackSlotsField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
-}
-
-int Code::safepoint_table_offset() const {
- DCHECK(has_safepoint_info());
- return READ_INT32_FIELD(this, kSafepointTableOffsetOffset);
-}
-
-void Code::set_safepoint_table_offset(int offset) {
- CHECK_LE(0, offset);
- DCHECK(has_safepoint_info() || offset == 0); // Allow zero initialization.
- DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_INT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+ return StackSlotsField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
}
bool Code::marked_for_deoptimization() const {
@@ -538,48 +533,23 @@ bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
int Code::constant_pool_offset() const {
if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
- return READ_INT_FIELD(this, kConstantPoolOffset);
+ return READ_INT_FIELD(*this, kConstantPoolOffsetOffset);
}
void Code::set_constant_pool_offset(int value) {
if (!FLAG_enable_embedded_constant_pool) return;
DCHECK_LE(value, InstructionSize());
- WRITE_INT_FIELD(this, kConstantPoolOffset, value);
+ WRITE_INT_FIELD(*this, kConstantPoolOffsetOffset, value);
}
-int Code::constant_pool_size() const {
- if (!FLAG_enable_embedded_constant_pool) return 0;
- return code_comments_offset() - constant_pool_offset();
-}
Address Code::constant_pool() const {
- if (FLAG_enable_embedded_constant_pool) {
- int offset = constant_pool_offset();
- if (offset < code_comments_offset()) {
- return InstructionStart() + offset;
- }
- }
- return kNullAddress;
-}
-
-int Code::code_comments_offset() const {
- int offset = READ_INT_FIELD(this, kCodeCommentsOffset);
- DCHECK_LE(0, offset);
- DCHECK_LE(offset, InstructionSize());
- return offset;
-}
-
-void Code::set_code_comments_offset(int offset) {
- DCHECK_LE(0, offset);
- DCHECK_LE(offset, InstructionSize());
- WRITE_INT_FIELD(this, kCodeCommentsOffset, offset);
+ if (!has_constant_pool()) return kNullAddress;
+ return InstructionStart() + constant_pool_offset();
}
Address Code::code_comments() const {
- int offset = code_comments_offset();
- if (offset < InstructionSize()) {
- return InstructionStart() + offset;
- }
- return kNullAddress;
+ if (!has_code_comments()) return kNullAddress;
+ return InstructionStart() + code_comments_offset();
}
Code Code::GetCodeFromTargetAddress(Address address) {
@@ -635,24 +605,24 @@ void CodeDataContainer::clear_padding() {
kSize - kUnalignedSize);
}
-byte BytecodeArray::get(int index) {
+byte BytecodeArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+ return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
}
void BytecodeArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+ WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
}
void BytecodeArray::set_frame_size(int frame_size) {
DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- WRITE_INT_FIELD(this, kFrameSizeOffset, frame_size);
+ WRITE_INT_FIELD(*this, kFrameSizeOffset, frame_size);
}
int BytecodeArray::frame_size() const {
- return READ_INT_FIELD(this, kFrameSizeOffset);
+ return READ_INT_FIELD(*this, kFrameSizeOffset);
}
int BytecodeArray::register_count() const {
@@ -663,14 +633,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- WRITE_INT_FIELD(this, kParameterSizeOffset,
+ WRITE_INT_FIELD(*this, kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const {
int register_operand =
- READ_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset);
+ READ_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) {
return interpreter::Register::invalid_value();
} else {
@@ -681,38 +651,38 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) {
- WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else {
DCHECK(incoming_new_target_or_generator_register.index() <
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WRITE_INT_FIELD(this, kIncomingNewTargetOrGeneratorRegisterOffset,
+ WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand());
}
}
int BytecodeArray::interrupt_budget() const {
- return READ_INT_FIELD(this, kInterruptBudgetOffset);
+ return READ_INT_FIELD(*this, kInterruptBudgetOffset);
}
void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
DCHECK_GE(interrupt_budget, 0);
- WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
+ WRITE_INT_FIELD(*this, kInterruptBudgetOffset, interrupt_budget);
}
int BytecodeArray::osr_loop_nesting_level() const {
- return READ_INT8_FIELD(this, kOSRNestingLevelOffset);
+ return READ_INT8_FIELD(*this, kOSRNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
+ WRITE_INT8_FIELD(*this, kOSRNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
// Bytecode is aged by the concurrent marker.
- return static_cast<Age>(RELAXED_READ_INT8_FIELD(this, kBytecodeAgeOffset));
+ return static_cast<Age>(RELAXED_READ_INT8_FIELD(*this, kBytecodeAgeOffset));
}
void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
@@ -720,13 +690,13 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
DCHECK_LE(age, kLastBytecodeAge);
STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
// Bytecode is aged by the concurrent marker.
- RELAXED_WRITE_INT8_FIELD(this, kBytecodeAgeOffset, static_cast<int8_t>(age));
+ RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age));
}
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return READ_INT_FIELD(this, kParameterSizeOffset) >> kSystemPointerSizeLog2;
+ return READ_INT_FIELD(*this, kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -744,9 +714,17 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
return ptr() - kHeapObjectTag + kHeaderSize;
}
+bool BytecodeArray::HasSourcePositionTable() {
+ Object maybe_table = source_position_table();
+ return !maybe_table->IsUndefined();
+}
+
ByteArray BytecodeArray::SourcePositionTable() {
Object maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ if (maybe_table->IsUndefined(roots)) return roots.empty_byte_array();
+
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
->source_position_table();
@@ -754,7 +732,7 @@ ByteArray BytecodeArray::SourcePositionTable() {
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
Object maybe_table = source_position_table();
- if (maybe_table->IsByteArray()) return;
+ if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
->source_position_table());
@@ -770,6 +748,18 @@ int BytecodeArray::SizeIncludingMetadata() {
return size;
}
+DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
+
+DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
+
BailoutId DeoptimizationData::BytecodeOffset(int i) {
return BailoutId(BytecodeOffsetRaw(i)->value());
}
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
new file mode 100644
index 0000000000..f874855fbe
--- /dev/null
+++ b/deps/v8/src/objects/code.cc
@@ -0,0 +1,1080 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iomanip>
+
+#include "src/objects/code.h"
+
+#include "src/assembler-inl.h"
+#include "src/cpu-features.h"
+#include "src/deoptimizer.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter.h"
+#include "src/objects/allocation-site-inl.h"
+#include "src/ostreams.h"
+#include "src/reloc-info.h"
+#include "src/roots-inl.h"
+#include "src/safepoint-table.h"
+#include "src/snapshot/embedded-data.h"
+
+#ifdef ENABLE_DISASSEMBLER
+#include "src/code-comments.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/eh-frame.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+int Code::safepoint_table_size() const {
+ DCHECK_GE(handler_table_offset() - safepoint_table_offset(), 0);
+ return handler_table_offset() - safepoint_table_offset();
+}
+
+bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
+
+int Code::handler_table_size() const {
+ DCHECK_GE(constant_pool_offset() - handler_table_offset(), 0);
+ return constant_pool_offset() - handler_table_offset();
+}
+
+bool Code::has_handler_table() const { return handler_table_size() > 0; }
+
+int Code::constant_pool_size() const {
+ const int size = code_comments_offset() - constant_pool_offset();
+ DCHECK_IMPLIES(!FLAG_enable_embedded_constant_pool, size == 0);
+ DCHECK_GE(size, 0);
+ return size;
+}
+
+bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
+
+int Code::code_comments_size() const {
+ DCHECK_GE(InstructionSize() - code_comments_offset(), 0);
+ return InstructionSize() - code_comments_offset();
+}
+
+bool Code::has_code_comments() const { return code_comments_size() > 0; }
+
+int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); }
+
+void Code::ClearEmbeddedObjects(Heap* heap) {
+ HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
+ }
+ }
+ set_embedded_objects_cleared(true);
+}
+
+void Code::Relocate(intptr_t delta) {
+ for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
+ it.rinfo()->apply(delta);
+ }
+ FlushICache();
+}
+
+void Code::FlushICache() const {
+ FlushInstructionCache(raw_instruction_start(), raw_instruction_size());
+}
+
+void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
+ // Copy code.
+ CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
+ static_cast<size_t>(desc.instr_size));
+
+ // Copy unwinding info, if any.
+ if (desc.unwinding_info) {
+ DCHECK_GT(desc.unwinding_info_size, 0);
+ set_unwinding_info_size(desc.unwinding_info_size);
+ CopyBytes(reinterpret_cast<byte*>(unwinding_info_start()),
+ desc.unwinding_info,
+ static_cast<size_t>(desc.unwinding_info_size));
+ }
+
+ // Copy reloc info.
+ CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
+
+ // Unbox handles and relocate.
+ Assembler* origin = desc.origin;
+ AllowDeferredHandleDereference embedding_raw_address;
+ const int mode_mask = RelocInfo::PostCodegenRelocationMask();
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
+ it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ } else if (RelocInfo::IsCodeTargetMode(mode)) {
+ // Rewrite code handles to direct pointers to the first instruction in the
+ // code object.
+ Handle<Object> p = it.rinfo()->target_object_handle(origin);
+ Code code = Code::cast(*p);
+ it.rinfo()->set_target_address(code->raw_instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ Address p = it.rinfo()->target_runtime_entry(origin);
+ it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ } else {
+ intptr_t delta =
+ raw_instruction_start() - reinterpret_cast<Address>(desc.buffer);
+ it.rinfo()->apply(delta);
+ }
+ }
+}
+
+SafepointEntry Code::GetSafepointEntry(Address pc) {
+ SafepointTable table(*this);
+ return table.FindEntry(pc);
+}
+
+int Code::OffHeapInstructionSize() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_size();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.InstructionSizeOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapInstructionStart() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_start();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.InstructionStartOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapInstructionEnd() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_end();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.InstructionStartOfBuiltin(builtin_index()) +
+ d.InstructionSizeOfBuiltin(builtin_index());
+}
+
+namespace {
+template <typename Code>
+void SetStackFrameCacheCommon(Isolate* isolate, Handle<Code> code,
+ Handle<SimpleNumberDictionary> cache) {
+ Handle<Object> maybe_table(code->source_position_table(), isolate);
+ if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
+ ->set_stack_frame_cache(*cache);
+ return;
+ }
+ DCHECK(maybe_table->IsUndefined() || maybe_table->IsByteArray());
+ Handle<ByteArray> table(Handle<ByteArray>::cast(maybe_table));
+ Handle<SourcePositionTableWithFrameCache> table_with_cache =
+ isolate->factory()->NewSourcePositionTableWithFrameCache(table, cache);
+ code->set_source_position_table(*table_with_cache);
+}
+} // namespace
+
+// static
+void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
+ Handle<SimpleNumberDictionary> cache) {
+ if (abstract_code->IsCode()) {
+ SetStackFrameCacheCommon(
+ abstract_code->GetIsolate(),
+ handle(abstract_code->GetCode(), abstract_code->GetIsolate()), cache);
+ } else {
+ SetStackFrameCacheCommon(
+ abstract_code->GetIsolate(),
+ handle(abstract_code->GetBytecodeArray(), abstract_code->GetIsolate()),
+ cache);
+ }
+}
+
+namespace {
+template <typename Code>
+void DropStackFrameCacheCommon(Code code) {
+ i::Object maybe_table = code->source_position_table();
+ if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ code->set_source_position_table(
+ i::SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table());
+}
+} // namespace
+
+void AbstractCode::DropStackFrameCache() {
+ if (IsCode()) {
+ DropStackFrameCacheCommon(GetCode());
+ } else {
+ DropStackFrameCacheCommon(GetBytecodeArray());
+ }
+}
+
+int AbstractCode::SourcePosition(int offset) {
+ int position = 0;
+ // Subtract one because the current PC is one instruction after the call site.
+ if (IsCode()) offset--;
+ for (SourcePositionTableIterator iterator(source_position_table());
+ !iterator.done() && iterator.code_offset() <= offset;
+ iterator.Advance()) {
+ position = iterator.source_position().ScriptOffset();
+ }
+ return position;
+}
+
+int AbstractCode::SourceStatementPosition(int offset) {
+ // First find the closest position.
+ int position = SourcePosition(offset);
+ // Now find the closest statement position before the position.
+ int statement_position = 0;
+ for (SourcePositionTableIterator it(source_position_table()); !it.done();
+ it.Advance()) {
+ if (it.is_statement()) {
+ int p = it.source_position().ScriptOffset();
+ if (statement_position < p && p <= position) {
+ statement_position = p;
+ }
+ }
+ }
+ return statement_position;
+}
+
+void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*this, pc);
+ class SourcePosition pos = info.position;
+ if (info.deopt_reason != DeoptimizeReason::kUnknown || pos.IsKnown()) {
+ PrintF(out, "%s", str);
+ OFStream outstr(out);
+ pos.Print(outstr, *this);
+ PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
+ }
+}
+
+bool Code::CanDeoptAt(Address pc) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(deoptimization_data());
+ Address code_start_address = InstructionStart();
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address address = code_start_address + deopt_data->Pc(i)->value();
+ if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Identify kind of code.
+const char* Code::Kind2String(Kind kind) {
+ switch (kind) {
+#define CASE(name) \
+ case name: \
+ return #name;
+ CODE_KIND_LIST(CASE)
+#undef CASE
+ case NUMBER_OF_KINDS:
+ break;
+ }
+ UNREACHABLE();
+}
+
+// Identify kind of code.
+const char* AbstractCode::Kind2String(Kind kind) {
+ if (kind < AbstractCode::INTERPRETED_FUNCTION)
+ return Code::Kind2String(static_cast<Code::Kind>(kind));
+ if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
+ UNREACHABLE();
+}
+
+bool Code::IsIsolateIndependent(Isolate* isolate) {
+ constexpr int all_real_modes_mask =
+ (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
+ constexpr int mode_mask = all_real_modes_mask &
+ ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
+ ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(mode_mask ==
+ (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
+
+ bool is_process_independent = true;
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
+ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
+ // On these platforms we emit relative builtin-to-builtin
+ // jumps for isolate independent builtins in the snapshot. They are later
+ // rewritten as pc-relative jumps to the off-heap instruction stream and are
+ // thus process-independent. See also: FinalizeEmbeddedCodeTargets.
+ if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
+ Address target_address = it.rinfo()->target_address();
+ if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
+
+ Code target = Code::GetCodeFromTargetAddress(target_address);
+ CHECK(target->IsCode());
+ if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
+ }
+#endif
+ is_process_independent = false;
+ }
+
+ return is_process_independent;
+}
+
+bool Code::Inlines(SharedFunctionInfo sfi) {
+ // We can only check for inlining for optimized code.
+ DCHECK(is_optimized_code());
+ DisallowHeapAllocation no_gc;
+ DeoptimizationData const data =
+ DeoptimizationData::cast(deoptimization_data());
+ if (data->length() == 0) return false;
+ if (data->SharedFunctionInfo() == sfi) return true;
+ FixedArray const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
+ }
+ return false;
+}
+
+Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
+ isolate_ = isolate;
+ Object list = isolate->heap()->native_contexts_list();
+ next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list);
+}
+
+Code Code::OptimizedCodeIterator::Next() {
+ do {
+ Object next;
+ if (!current_code_.is_null()) {
+ // Get next code in the linked list.
+ next = current_code_->next_code_link();
+ } else if (!next_context_.is_null()) {
+ // Linked list of code exhausted. Get list of next context.
+ next = next_context_->OptimizedCodeListHead();
+ Object next_context = next_context_->next_context_link();
+ next_context_ = next_context->IsUndefined(isolate_)
+ ? Context()
+ : Context::cast(next_context);
+ } else {
+ // Exhausted contexts.
+ return Code();
+ }
+ current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next);
+ } while (current_code_.is_null());
+ DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind());
+ return current_code_;
+}
+
+Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
+ int deopt_entry_count,
+ PretenureFlag pretenure) {
+ return Handle<DeoptimizationData>::cast(isolate->factory()->NewFixedArray(
+ LengthFor(deopt_entry_count), pretenure));
+}
+
+Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
+ return Handle<DeoptimizationData>::cast(
+ isolate->factory()->empty_fixed_array());
+}
+
+SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
+ if (index == -1) {
+ return SharedFunctionInfo::cast(SharedFunctionInfo());
+ } else {
+ return SharedFunctionInfo::cast(LiteralArray()->get(index));
+ }
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+const char* Code::GetName(Isolate* isolate) const {
+ if (kind() == BYTECODE_HANDLER) {
+ return isolate->interpreter()->LookupNameOfBytecodeHandler(*this);
+ } else {
+ // There are some handlers and ICs that we can also find names for with
+ // Builtins::Lookup.
+ return isolate->builtins()->Lookup(raw_instruction_start());
+ }
+}
+
+namespace {
+void print_pc(std::ostream& os, int pc) {
+ if (pc == -1) {
+ os << "NA";
+ } else {
+ os << std::hex << pc << std::dec;
+ }
+}
+} // anonymous namespace
+
+void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
+ if (length() == 0) {
+ os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
+ return;
+ }
+
+ disasm::NameConverter converter;
+ int const inlined_function_count = InlinedFunctionCount()->value();
+ os << "Inlined functions (count = " << inlined_function_count << ")\n";
+ for (int id = 0; id < inlined_function_count; ++id) {
+ Object info = LiteralArray()->get(id);
+ os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
+ }
+ os << "\n";
+ int deopt_count = DeoptCount();
+ os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
+ if (0 != deopt_count) {
+ os << " index bytecode-offset pc";
+ if (FLAG_print_code_verbose) os << " commands";
+ os << "\n";
+ }
+ for (int i = 0; i < deopt_count; i++) {
+ os << std::setw(6) << i << " " << std::setw(15)
+ << BytecodeOffset(i).ToInt() << " " << std::setw(4);
+ print_pc(os, Pc(i)->value());
+ os << std::setw(2);
+
+ if (!FLAG_print_code_verbose) {
+ os << "\n";
+ continue;
+ }
+
+ // Print details of the frame translation.
+ int translation_index = TranslationIndex(i)->value();
+ TranslationIterator iterator(TranslationByteArray(), translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ DCHECK(Translation::BEGIN == opcode);
+ int frame_count = iterator.Next();
+ int jsframe_count = iterator.Next();
+ int update_feedback_count = iterator.Next();
+ os << " " << Translation::StringFor(opcode)
+ << " {frame count=" << frame_count
+ << ", js frame count=" << jsframe_count
+ << ", update_feedback_count=" << update_feedback_count << "}\n";
+
+ while (iterator.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
+ os << std::setw(31) << " " << Translation::StringFor(opcode) << " ";
+
+ switch (opcode) {
+ case Translation::BEGIN:
+ UNREACHABLE();
+ break;
+
+ case Translation::INTERPRETED_FRAME: {
+ int bytecode_offset = iterator.Next();
+ int shared_info_id = iterator.Next();
+ unsigned height = iterator.Next();
+ int return_value_offset = iterator.Next();
+ int return_value_count = iterator.Next();
+ Object shared_info = LiteralArray()->get(shared_info_id);
+ os << "{bytecode_offset=" << bytecode_offset << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << ", retval=@" << return_value_offset
+ << "(#" << return_value_count << ")}";
+ break;
+ }
+
+ case Translation::CONSTRUCT_STUB_FRAME: {
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object shared_info = LiteralArray()->get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case Translation::BUILTIN_CONTINUATION_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object shared_info = LiteralArray()->get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case Translation::ARGUMENTS_ADAPTOR_FRAME: {
+ int shared_info_id = iterator.Next();
+ Object shared_info = LiteralArray()->get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case Translation::REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
+ break;
+ }
+
+ case Translation::INT32_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (int32)}";
+ break;
+ }
+
+ case Translation::INT64_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (int64)}";
+ break;
+ }
+
+ case Translation::UINT32_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (uint32)}";
+ break;
+ }
+
+ case Translation::BOOL_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (bool)}";
+ break;
+ }
+
+ case Translation::FLOAT_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << FloatRegister::from_code(reg_code) << "}";
+ break;
+ }
+
+ case Translation::DOUBLE_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
+ break;
+ }
+
+ case Translation::STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case Translation::INT32_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (int32)}";
+ break;
+ }
+
+ case Translation::INT64_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (int64)}";
+ break;
+ }
+
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (uint32)}";
+ break;
+ }
+
+ case Translation::BOOL_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (bool)}";
+ break;
+ }
+
+ case Translation::FLOAT_STACK_SLOT:
+ case Translation::DOUBLE_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case Translation::LITERAL: {
+ int literal_index = iterator.Next();
+ Object literal_value = LiteralArray()->get(literal_index);
+ os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
+ << ")}";
+ break;
+ }
+
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator.Next();
+ os << "{object_index=" << object_index << "}";
+ break;
+ }
+
+ case Translation::ARGUMENTS_ELEMENTS:
+ case Translation::ARGUMENTS_LENGTH: {
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator.Next());
+ os << "{arguments_type=" << arguments_type << "}";
+ break;
+ }
+
+ case Translation::CAPTURED_OBJECT: {
+ int args_length = iterator.Next();
+ os << "{length=" << args_length << "}";
+ break;
+ }
+
+ case Translation::UPDATE_FEEDBACK: {
+ int literal_index = iterator.Next();
+ FeedbackSlot slot(iterator.Next());
+ os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
+ << "}}";
+ break;
+ }
+ }
+ os << "\n";
+ }
+ }
+}
+
+namespace {
+
+inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
+ Address begin, size_t size,
+ Address current_pc) {
+ Address end = begin + size;
+ // TODO(mstarzinger): Refactor CodeReference to avoid the
+ // unhandlified->handlified transition.
+ AllowHandleAllocation allow_handles;
+ DisallowHeapAllocation no_gc;
+ HandleScope handle_scope(isolate);
+ Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
+ reinterpret_cast<byte*>(end),
+ CodeReference(handle(code, isolate)), current_pc);
+}
+
+} // namespace
+
+void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
+ Isolate* isolate = GetIsolate();
+ os << "kind = " << Kind2String(kind()) << "\n";
+ if (name == nullptr) {
+ name = GetName(isolate);
+ }
+ if ((name != nullptr) && (name[0] != '\0')) {
+ os << "name = " << name << "\n";
+ }
+ if (kind() == OPTIMIZED_FUNCTION) {
+ os << "stack_slots = " << stack_slots() << "\n";
+ }
+ os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
+ os << "address = " << static_cast<const void*>(this) << "\n\n";
+
+ if (is_off_heap_trampoline()) {
+ int trampoline_size = raw_instruction_size();
+ os << "Trampoline (size = " << trampoline_size << ")\n";
+ DisassembleCodeRange(isolate, os, *this, raw_instruction_start(),
+ trampoline_size, current_pc);
+ os << "\n";
+ }
+
+ {
+ // Stop before reaching any embedded tables
+ int code_size = ExecutableInstructionSize();
+ os << "Instructions (size = " << code_size << ")\n";
+ DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
+ current_pc);
+
+ if (int pool_size = constant_pool_size()) {
+ DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
+ os << "\nConstant Pool (size = " << pool_size << ")\n";
+ Vector<char> buf = Vector<char>::New(50);
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(InstructionStart() +
+ constant_pool_offset());
+ for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
+ SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
+ os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
+ }
+ }
+ }
+ os << "\n";
+
+ {
+ SourcePositionTableIterator it(
+ SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
+ if (!it.done()) {
+ os << "Source positions:\n pc offset position\n";
+ for (; !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
+ }
+ }
+
+ {
+ SourcePositionTableIterator it(SourcePositionTable(),
+ SourcePositionTableIterator::kExternalOnly);
+ if (!it.done()) {
+ os << "External Source positions:\n pc offset fileid line\n";
+ for (; !it.done(); it.Advance()) {
+ DCHECK(it.source_position().IsExternal());
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ExternalFileId()
+ << std::setw(10) << it.source_position().ExternalLine() << "\n";
+ }
+ os << "\n";
+ }
+ }
+
+ if (kind() == OPTIMIZED_FUNCTION) {
+ DeoptimizationData data =
+ DeoptimizationData::cast(this->deoptimization_data());
+ data->DeoptimizationDataPrint(os);
+ }
+ os << "\n";
+
+ if (has_safepoint_info()) {
+ SafepointTable table(*this);
+ os << "Safepoints (size = " << table.size() << ")\n";
+ for (unsigned i = 0; i < table.length(); i++) {
+ unsigned pc_offset = table.GetPcOffset(i);
+ os << reinterpret_cast<const void*>(InstructionStart() + pc_offset)
+ << " ";
+ os << std::setw(6) << std::hex << pc_offset << " " << std::setw(4);
+ int trampoline_pc = table.GetTrampolinePcOffset(i);
+ print_pc(os, trampoline_pc);
+ os << std::dec << " ";
+ table.PrintEntry(i, os);
+ os << " (sp -> fp) ";
+ SafepointEntry entry = table.GetEntry(i);
+ if (entry.has_deoptimization_index()) {
+ os << std::setw(6) << entry.deoptimization_index();
+ } else {
+ os << "<none>";
+ }
+ os << "\n";
+ }
+ os << "\n";
+ }
+
+ if (has_handler_table()) {
+ HandlerTable table(*this);
+ os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
+ if (kind() == OPTIMIZED_FUNCTION) {
+ table.HandlerTableReturnPrint(os);
+ }
+ os << "\n";
+ }
+
+ os << "RelocInfo (size = " << relocation_size() << ")\n";
+ for (RelocIterator it(*this); !it.done(); it.next()) {
+ it.rinfo()->Print(isolate, os);
+ }
+ os << "\n";
+
+ if (has_unwinding_info()) {
+ os << "UnwindingInfo (size = " << unwinding_info_size() << ")\n";
+ EhFrameDisassembler eh_frame_disassembler(
+ reinterpret_cast<byte*>(unwinding_info_start()),
+ reinterpret_cast<byte*>(unwinding_info_end()));
+ eh_frame_disassembler.DisassembleToStream(os);
+ os << "\n";
+ }
+
+ if (has_code_comments()) {
+ PrintCodeCommentsSection(os, code_comments());
+ }
+}
+#endif // ENABLE_DISASSEMBLER
+
+void BytecodeArray::Disassemble(std::ostream& os) {
+ DisallowHeapAllocation no_gc;
+
+ os << "Parameter count " << parameter_count() << "\n";
+ os << "Register count " << register_count() << "\n";
+ os << "Frame size " << frame_size() << "\n";
+
+ Address base_address = GetFirstBytecodeAddress();
+ SourcePositionTableIterator source_positions(SourcePositionTable());
+
+ // Storage for backing the handle passed to the iterator. This handle won't be
+ // updated by the gc, but that's ok because we've disallowed GCs anyway.
+ BytecodeArray handle_storage = *this;
+ Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
+ interpreter::BytecodeArrayIterator iterator(handle);
+ while (!iterator.done()) {
+ if (!source_positions.done() &&
+ iterator.current_offset() == source_positions.code_offset()) {
+ os << std::setw(5) << source_positions.source_position().ScriptOffset();
+ os << (source_positions.is_statement() ? " S> " : " E> ");
+ source_positions.Advance();
+ } else {
+ os << " ";
+ }
+ Address current_address = base_address + iterator.current_offset();
+ os << reinterpret_cast<const void*>(current_address) << " @ "
+ << std::setw(4) << iterator.current_offset() << " : ";
+ interpreter::BytecodeDecoder::Decode(
+ os, reinterpret_cast<byte*>(current_address), parameter_count());
+ if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
+ Address jump_target = base_address + iterator.GetJumpTargetOffset();
+ os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
+ << iterator.GetJumpTargetOffset() << ")";
+ }
+ if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
+ os << " {";
+ bool first_entry = true;
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ os << ",";
+ }
+ os << " " << entry.case_value << ": @" << entry.target_offset;
+ }
+ os << " }";
+ }
+ os << std::endl;
+ iterator.Advance();
+ }
+
+ os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+#ifdef OBJECT_PRINT
+ if (constant_pool()->length() > 0) {
+ constant_pool()->Print();
+ }
+#endif
+
+ os << "Handler Table (size = " << handler_table()->length() << ")\n";
+#ifdef ENABLE_DISASSEMBLER
+ if (handler_table()->length() > 0) {
+ HandlerTable table(*this);
+ table.HandlerTableRangePrint(os);
+ }
+#endif
+}
+
+void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
+ BytecodeArray from = *this;
+ DCHECK_EQ(from->length(), to->length());
+ CopyBytes(reinterpret_cast<byte*>(to->GetFirstBytecodeAddress()),
+ reinterpret_cast<byte*>(from->GetFirstBytecodeAddress()),
+ from->length());
+}
+
+void BytecodeArray::MakeOlder() {
+ // BytecodeArray is aged in concurrent marker.
+ // The word must be completely within the byte code array.
+ Address age_addr = address() + kBytecodeAgeOffset;
+ DCHECK_LE(RoundDown(age_addr, kSystemPointerSize) + kSystemPointerSize,
+ address() + Size());
+ Age age = bytecode_age();
+ if (age < kLastBytecodeAge) {
+ base::AsAtomic8::Release_CompareAndSwap(reinterpret_cast<byte*>(age_addr),
+ age, age + 1);
+ }
+
+ DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
+ DCHECK_LE(bytecode_age(), kLastBytecodeAge);
+}
+
+bool BytecodeArray::IsOld() const {
+ return bytecode_age() >= kIsOldBytecodeAge;
+}
+
+DependentCode DependentCode::GetDependentCode(Handle<HeapObject> object) {
+ if (object->IsMap()) {
+ return Handle<Map>::cast(object)->dependent_code();
+ } else if (object->IsPropertyCell()) {
+ return Handle<PropertyCell>::cast(object)->dependent_code();
+ } else if (object->IsAllocationSite()) {
+ return Handle<AllocationSite>::cast(object)->dependent_code();
+ }
+ UNREACHABLE();
+}
+
+void DependentCode::SetDependentCode(Handle<HeapObject> object,
+ Handle<DependentCode> dep) {
+ if (object->IsMap()) {
+ Handle<Map>::cast(object)->set_dependent_code(*dep);
+ } else if (object->IsPropertyCell()) {
+ Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
+ } else if (object->IsAllocationSite()) {
+ Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void DependentCode::InstallDependency(Isolate* isolate,
+ const MaybeObjectHandle& code,
+ Handle<HeapObject> object,
+ DependencyGroup group) {
+ Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
+ isolate);
+ Handle<DependentCode> new_deps =
+ InsertWeakCode(isolate, old_deps, group, code);
+ // Update the list head if necessary.
+ if (!new_deps.is_identical_to(old_deps))
+ DependentCode::SetDependentCode(object, new_deps);
+}
+
+Handle<DependentCode> DependentCode::InsertWeakCode(
+ Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
+ const MaybeObjectHandle& code) {
+ if (entries->length() == 0 || entries->group() > group) {
+ // There is no such group.
+ return DependentCode::New(isolate, group, code, entries);
+ }
+ if (entries->group() < group) {
+ // The group comes later in the list.
+ Handle<DependentCode> old_next(entries->next_link(), isolate);
+ Handle<DependentCode> new_next =
+ InsertWeakCode(isolate, old_next, group, code);
+ if (!old_next.is_identical_to(new_next)) {
+ entries->set_next_link(*new_next);
+ }
+ return entries;
+ }
+ DCHECK_EQ(group, entries->group());
+ int count = entries->count();
+ // Check for existing entry to avoid duplicates.
+ for (int i = 0; i < count; i++) {
+ if (entries->object_at(i) == *code) return entries;
+ }
+ if (entries->length() < kCodesStartIndex + count + 1) {
+ entries = EnsureSpace(isolate, entries);
+ // Count could have changed, reload it.
+ count = entries->count();
+ }
+ entries->set_object_at(count, *code);
+ entries->set_count(count + 1);
+ return entries;
+}
+
+Handle<DependentCode> DependentCode::New(Isolate* isolate,
+ DependencyGroup group,
+ const MaybeObjectHandle& object,
+ Handle<DependentCode> next) {
+ Handle<DependentCode> result = Handle<DependentCode>::cast(
+ isolate->factory()->NewWeakFixedArray(kCodesStartIndex + 1, TENURED));
+ result->set_next_link(*next);
+ result->set_flags(GroupField::encode(group) | CountField::encode(1));
+ result->set_object_at(0, *object);
+ return result;
+}
+
+Handle<DependentCode> DependentCode::EnsureSpace(
+ Isolate* isolate, Handle<DependentCode> entries) {
+ if (entries->Compact()) return entries;
+ int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
+ int grow_by = capacity - entries->length();
+ return Handle<DependentCode>::cast(
+ isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by, TENURED));
+}
+
+bool DependentCode::Compact() {
+ int old_count = count();
+ int new_count = 0;
+ for (int i = 0; i < old_count; i++) {
+ MaybeObject obj = object_at(i);
+ if (!obj->IsCleared()) {
+ if (i != new_count) {
+ copy(i, new_count);
+ }
+ new_count++;
+ }
+ }
+ set_count(new_count);
+ for (int i = new_count; i < old_count; i++) {
+ clear_at(i);
+ }
+ return new_count < old_count;
+}
+
+bool DependentCode::MarkCodeForDeoptimization(
+ Isolate* isolate, DependentCode::DependencyGroup group) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return false;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->MarkCodeForDeoptimization(isolate, group);
+ }
+ DCHECK_EQ(group, this->group());
+ DisallowHeapAllocation no_allocation_scope;
+ // Mark all the code that needs to be deoptimized.
+ bool marked = false;
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
+ MaybeObject obj = object_at(i);
+ if (obj->IsCleared()) continue;
+ Code code = Code::cast(obj->GetHeapObjectAssumeWeak());
+ if (!code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization(DependencyGroupName(group));
+ marked = true;
+ }
+ }
+ for (int i = 0; i < count; i++) {
+ clear_at(i);
+ }
+ set_count(0);
+ return marked;
+}
+
+void DependentCode::DeoptimizeDependentCodeGroup(
+ Isolate* isolate, DependentCode::DependencyGroup group) {
+ DisallowHeapAllocation no_allocation_scope;
+ bool marked = MarkCodeForDeoptimization(isolate, group);
+ if (marked) {
+ DCHECK(AllowCodeDependencyChange::IsAllowed());
+ Deoptimizer::DeoptimizeMarkedCode(isolate);
+ }
+}
+
+void Code::SetMarkedForDeoptimization(const char* reason) {
+ set_marked_for_deoptimization(true);
+ if (FLAG_trace_deopt &&
+ (deoptimization_data() != GetReadOnlyRoots().empty_fixed_array())) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(deoptimization_data());
+ CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
+ PrintF(scope.file(),
+ "[marking dependent code " V8PRIxPTR_FMT
+ " (opt #%d) for deoptimization, reason: %s]\n",
+ ptr(), deopt_data->OptimizationId()->value(), reason);
+ }
+}
+
+const char* DependentCode::DependencyGroupName(DependencyGroup group) {
+ switch (group) {
+ case kTransitionGroup:
+ return "transition";
+ case kPrototypeCheckGroup:
+ return "prototype-check";
+ case kPropertyCellChangedGroup:
+ return "property-cell-changed";
+ case kFieldOwnerGroup:
+ return "field-owner";
+ case kInitialMapChangedGroup:
+ return "initial-map-changed";
+ case kAllocationSiteTenuringChangedGroup:
+ return "allocation-site-tenuring-changed";
+ case kAllocationSiteTransitionChangedGroup:
+ return "allocation-site-transition-changed";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 6239ef9a0b..aa4c820b95 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -21,6 +21,7 @@ namespace internal {
class ByteArray;
class BytecodeArray;
class CodeDataContainer;
+class CodeDesc;
class MaybeObject;
namespace interpreter {
@@ -69,8 +70,8 @@ class Code : public HeapObject {
// Returns the size of the native instructions, including embedded
// data such as the safepoints table. For off-heap code objects
- // this may from instruction_size in that this will return the size of the
- // off-heap instruction stream rather than the on-heap trampoline located
+ // this may differ from instruction_size in that this will return the size of
+ // the off-heap instruction stream rather than the on-heap trampoline located
// at instruction_start.
inline int InstructionSize() const;
int OffHeapInstructionSize() const;
@@ -97,17 +98,6 @@ class Code : public HeapObject {
inline Object next_code_link() const;
inline void set_next_code_link(Object value);
- // [constant_pool offset]: Offset of the constant pool.
- // Valid for FLAG_enable_embedded_constant_pool only
- inline int constant_pool_offset() const;
- inline void set_constant_pool_offset(int offset);
- inline int constant_pool_size() const;
-
- // [code_comments_offset]: Offset of the code comment section.
- inline int code_comments_offset() const;
- inline void set_code_comments_offset(int offset);
- inline Address code_comments() const;
-
// Unchecked accessors to be used during GC.
inline ByteArray unchecked_relocation_info() const;
@@ -155,11 +145,32 @@ class Code : public HeapObject {
// instruction stream where the safepoint table starts.
inline int safepoint_table_offset() const;
inline void set_safepoint_table_offset(int offset);
+ int safepoint_table_size() const;
+ bool has_safepoint_table() const;
// [handler_table_offset]: The offset in the instruction stream where the
// exception handler table starts.
inline int handler_table_offset() const;
inline void set_handler_table_offset(int offset);
+ int handler_table_size() const;
+ bool has_handler_table() const;
+
+ // [constant_pool offset]: Offset of the constant pool.
+ // Valid for FLAG_enable_embedded_constant_pool only
+ inline int constant_pool_offset() const;
+ inline void set_constant_pool_offset(int offset);
+ int constant_pool_size() const;
+ bool has_constant_pool() const;
+
+ // [code_comments_offset]: Offset of the code comment section.
+ inline int code_comments_offset() const;
+ inline void set_code_comments_offset(int offset);
+ inline Address code_comments() const;
+ int code_comments_size() const;
+ bool has_code_comments() const;
+
+ // The size of the executable instruction area, without embedded metadata.
+ int ExecutableInstructionSize() const;
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized.
@@ -209,7 +220,7 @@ class Code : public HeapObject {
inline void WipeOutHeader();
// Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
+ // is deterministic. Depending on the V8 build mode there could be no padding.
inline void clear_padding();
// Initialize the flags field. Similar to clear_padding above this ensure that
// the snapshot content is deterministic.
@@ -264,8 +275,11 @@ class Code : public HeapObject {
// | instructions |
// | ... |
// +--------------------------+
- // | relocation info |
- // | ... |
+ // | embedded metadata | <-- safepoint_table_offset()
+ // | ... | <-- handler_table_offset()
+ // | | <-- constant_pool_offset()
+ // | | <-- code_comments_offset()
+ // | |
// +--------------------------+ <-- raw_instruction_end()
//
// If has_unwinding_info() is false, raw_instruction_end() points to the first
@@ -359,25 +373,27 @@ class Code : public HeapObject {
class OptimizedCodeIterator;
// Layout description.
-#define CODE_FIELDS(V) \
- V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
- V(kCodeDataContainerOffset, kTaggedSize) \
- /* Data or code not directly visited by GC directly starts here. */ \
- /* The serializer needs to copy bytes starting from here verbatim. */ \
- /* Objects embedded into code is visited via reloc info. */ \
- V(kDataStart, 0) \
- V(kInstructionSizeOffset, kIntSize) \
- V(kFlagsOffset, kIntSize) \
- V(kSafepointTableOffsetOffset, kIntSize) \
- V(kHandlerTableOffsetOffset, kIntSize) \
- V(kConstantPoolOffset, FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
- V(kBuiltinIndexOffset, kIntSize) \
- V(kCodeCommentsOffset, kIntSize) \
- /* Add padding to align the instruction start following right after */ \
- /* the Code object header. */ \
- V(kHeaderPaddingStart, CODE_POINTER_PADDING(kHeaderPaddingStart)) \
+#define CODE_FIELDS(V) \
+ V(kRelocationInfoOffset, kTaggedSize) \
+ V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kSourcePositionTableOffset, kTaggedSize) \
+ V(kCodeDataContainerOffset, kTaggedSize) \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ /* The serializer needs to copy bytes starting from here verbatim. */ \
+ /* Objects embedded into code is visited via reloc info. */ \
+ V(kDataStart, 0) \
+ V(kInstructionSizeOffset, kIntSize) \
+ V(kFlagsOffset, kIntSize) \
+ V(kSafepointTableOffsetOffset, kIntSize) \
+ V(kHandlerTableOffsetOffset, kIntSize) \
+ V(kConstantPoolOffsetOffset, \
+ FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
+ V(kCodeCommentsOffsetOffset, kIntSize) \
+ V(kBuiltinIndexOffset, kIntSize) \
+ V(kUnalignedHeaderSize, 0) \
+ /* Add padding to align the instruction start following right after */ \
+ /* the Code object header. */ \
+ V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
@@ -387,31 +403,25 @@ class Code : public HeapObject {
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
static constexpr int kHeaderPaddingSize = 0;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 0;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_X64
static constexpr int kHeaderPaddingSize = 0;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 20;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_IA32
static constexpr int kHeaderPaddingSize = 20;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_MIPS
static constexpr int kHeaderPaddingSize = 20;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_PPC64
- // No static assert possible since padding size depends on the
- // FLAG_enable_embedded_constant_pool runtime flag.
+ static constexpr int kHeaderPaddingSize =
+ FLAG_enable_embedded_constant_pool ? 28 : 0;
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = 0;
- STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#else
#error Unknown architecture.
#endif
+ STATIC_ASSERT(FIELD_SIZE(kOptionalPaddingOffset) == kHeaderPaddingSize);
inline int GetUnwindingInfoSizeOffset() const;
@@ -469,7 +479,7 @@ class Code::OptimizedCodeIterator {
Code current_code_;
Isolate* isolate_;
- DISALLOW_HEAP_ALLOCATION(no_gc);
+ DISALLOW_HEAP_ALLOCATION(no_gc)
DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator);
};
@@ -552,8 +562,8 @@ class AbstractCode : public HeapObject {
// Returns the size of the native instructions, including embedded
// data such as the safepoints table. For off-heap code objects
- // this may from instruction_size in that this will return the size of the
- // off-heap instruction stream rather than the on-heap trampoline located
+ // this may differ from instruction_size in that this will return the size of
+ // the off-heap instruction stream rather than the on-heap trampoline located
// at instruction_start.
inline int InstructionSize();
@@ -586,7 +596,7 @@ class AbstractCode : public HeapObject {
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
- OBJECT_CONSTRUCTORS(AbstractCode, HeapObject)
+ OBJECT_CONSTRUCTORS(AbstractCode, HeapObject);
};
// Dependent code is a singly linked list of weak fixed arrays. Each array
@@ -697,7 +707,7 @@ class DependentCode : public WeakFixedArray {
class CountField : public BitField<int, 3, 27> {};
STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
- OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray)
+ OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray);
};
// BytecodeArray represents a sequence of interpreter bytecodes.
@@ -722,7 +732,7 @@ class BytecodeArray : public FixedArrayBase {
}
// Setter and getter
- inline byte get(int index);
+ inline byte get(int index) const;
inline void set(int index, byte value);
// Returns data start address.
@@ -769,6 +779,7 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(source_position_table, Object)
inline ByteArray SourcePositionTable();
+ inline bool HasSourcePositionTable();
inline void ClearFrameCacheFromSourcePositionTable();
DECL_CAST(BytecodeArray)
@@ -782,9 +793,6 @@ class BytecodeArray : public FixedArrayBase {
// bytecode, constant pool, source position table, and handler table.
inline int SizeIncludingMetadata();
- int SourcePosition(int offset);
- int SourceStatementPosition(int offset);
-
DECL_PRINTER(BytecodeArray)
DECL_VERIFIER(BytecodeArray)
@@ -800,6 +808,9 @@ class BytecodeArray : public FixedArrayBase {
// is deterministic.
inline void clear_padding();
+ // Compares only the bytecode array but not any of the header fields.
+ bool IsBytecodeEqual(const BytecodeArray other) const;
+
// Layout description.
#define BYTECODE_ARRAY_FIELDS(V) \
/* Pointer fields. */ \
@@ -913,7 +924,7 @@ class DeoptimizationData : public FixedArray {
static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
- OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray)
+ OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray);
};
class SourcePositionTableWithFrameCache : public Tuple2 {
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 07af9a6029..18491118ad 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -51,11 +51,12 @@ uint32_t CompilationCacheShape::StringSharedHash(String source,
return hash;
}
-uint32_t CompilationCacheShape::HashForObject(Isolate* isolate, Object object) {
+uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
+ Object object) {
if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
FixedArray val = FixedArray::cast(object);
- if (val->map() == val->GetReadOnlyRoots().fixed_cow_array_map()) {
+ if (val->map() == roots.fixed_cow_array_map()) {
DCHECK_EQ(4, val->length());
SharedFunctionInfo shared = SharedFunctionInfo::cast(val->get(0));
String source = String::cast(val->get(1));
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index d5dd8ddddf..d3feb1b233 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -9,6 +9,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-regexp.h"
#include "src/objects/shared-function-info.h"
+#include "src/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -33,7 +34,7 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
LanguageMode language_mode,
int position);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
static const int kEntrySize = 3;
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index dd4c5d8b12..667b19b3d4 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -35,20 +35,13 @@ class DataHandler : public Struct {
DECL_ACCESSORS(data2, MaybeObject)
DECL_ACCESSORS(data3, MaybeObject)
-// Layout description.
-#define DATA_HANDLER_FIELDS(V) \
- V(kSmiHandlerOffset, kTaggedSize) \
- V(kValidityCellOffset, kTaggedSize) \
- V(kSizeWithData0, 0) \
- V(kData1Offset, kTaggedSize) \
- V(kSizeWithData1, 0) \
- V(kData2Offset, kTaggedSize) \
- V(kSizeWithData2, 0) \
- V(kData3Offset, kTaggedSize) \
- V(kSizeWithData3, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, DATA_HANDLER_FIELDS)
-#undef DATA_HANDLER_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_DATA_HANDLER_FIELDS)
+
+ static const int kSizeWithData0 = kData1Offset;
+ static const int kSizeWithData1 = kData2Offset;
+ static const int kSizeWithData2 = kData3Offset;
+ static const int kSizeWithData3 = kSize;
DECL_CAST(DataHandler)
@@ -56,7 +49,7 @@ class DataHandler : public Struct {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(DataHandler, Struct)
+ OBJECT_CONSTRUCTORS(DataHandler, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 06709f037a..d445174cbc 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -7,9 +7,9 @@
#include "src/objects/debug-objects.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 7901c995d5..cffe280097 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -139,7 +139,7 @@ class DebugInfo : public Struct {
// Id assigned to the function for debugging.
// This could also be implemented as a weak hash table.
- DECL_INT_ACCESSORS(debugging_id);
+ DECL_INT_ACCESSORS(debugging_id)
// Bit positions in |debugger_hints|.
#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 4cc54ee050..bd96819597 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -11,7 +11,8 @@
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
-#include "src/lookup-cache.h"
+#include "src/lookup-cache-inl.h"
+#include "src/maybe-handles-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/struct-inl.h"
@@ -29,6 +30,9 @@ OBJECT_CONSTRUCTORS_IMPL(EnumCache, Tuple2)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
+ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
+ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
+
ACCESSORS(DescriptorArray, enum_cache, EnumCache, kEnumCacheOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
kNumberOfAllDescriptorsOffset)
@@ -50,7 +54,7 @@ inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
int16_t expected, int16_t value) {
return base::Relaxed_CompareAndSwap(
reinterpret_cast<base::Atomic16*>(
- FIELD_ADDR(this, kRawNumberOfMarkedDescriptorsOffset)),
+ FIELD_ADDR(*this, kRawNumberOfMarkedDescriptorsOffset)),
expected, value);
}
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 3e67e94bf1..4a6a240ba8 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -125,7 +125,7 @@ class DescriptorArray : public HeapObject {
// empty descriptor array object if number_of_descriptors is 0.
static Handle<DescriptorArray> Allocate(
Isolate* isolate, int nof_descriptors, int slack,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType type = AllocationType::kYoung);
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
int nof_descriptors, int slack);
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index 39cc1c61b8..caacde21fa 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/dictionary.h"
+#include "src/hash-seed-inl.h"
+#include "src/objects/hash-table-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell-inl.h"
@@ -136,14 +138,14 @@ bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) {
}
uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
- return ComputeSeededHash(key, isolate->heap()->HashSeed());
+ return ComputeSeededHash(key, HashSeed(isolate));
}
-uint32_t NumberDictionaryBaseShape::HashForObject(Isolate* isolate,
+uint32_t NumberDictionaryBaseShape::HashForObject(ReadOnlyRoots roots,
Object other) {
DCHECK(other->IsNumber());
return ComputeSeededHash(static_cast<uint32_t>(other->Number()),
- isolate->heap()->HashSeed());
+ HashSeed(roots));
}
Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
@@ -169,7 +171,7 @@ uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
return key->Hash();
}
-uint32_t NameDictionaryShape::HashForObject(Isolate* isolate, Object other) {
+uint32_t NameDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) {
return Name::cast(other)->Hash();
}
@@ -178,7 +180,8 @@ bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object other) {
return *key == PropertyCell::cast(other)->name();
}
-uint32_t GlobalDictionaryShape::HashForObject(Isolate* isolate, Object other) {
+uint32_t GlobalDictionaryShape::HashForObject(ReadOnlyRoots roots,
+ Object other) {
return PropertyCell::cast(other)->name()->Hash();
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 1b0c57ec7d..7670dff131 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -10,6 +10,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/property-array.h"
#include "src/objects/smi.h"
+#include "src/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -86,7 +87,7 @@ class Dictionary : public HashTable<Derived, Shape> {
Handle<Object> value,
PropertyDetails details);
- OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>)
+ OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>);
};
template <typename Key>
@@ -114,7 +115,7 @@ class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
public:
static inline bool IsMatch(Handle<Name> key, Object other);
static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
static inline RootIndex GetMapRootIndex();
static const int kPrefixSize = 2;
@@ -188,7 +189,7 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
Isolate* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details, int* entry_out = nullptr);
- OBJECT_CONSTRUCTORS(BaseNameDictionary, Dictionary<Derived, Shape>)
+ OBJECT_CONSTRUCTORS(BaseNameDictionary, Dictionary<Derived, Shape>);
};
class NameDictionary
@@ -204,13 +205,13 @@ class NameDictionary
inline int hash() const;
OBJECT_CONSTRUCTORS(NameDictionary,
- BaseNameDictionary<NameDictionary, NameDictionaryShape>)
+ BaseNameDictionary<NameDictionary, NameDictionaryShape>);
};
class GlobalDictionaryShape : public NameDictionaryShape {
public:
static inline bool IsMatch(Handle<Name> key, Object other);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kEntrySize = 1; // Overrides NameDictionaryShape::kEntrySize
@@ -241,7 +242,7 @@ class GlobalDictionary
OBJECT_CONSTRUCTORS(
GlobalDictionary,
- BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>)
+ BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>);
};
class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
@@ -250,7 +251,7 @@ class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static inline uint32_t Hash(Isolate* isolate, uint32_t key);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
};
class NumberDictionaryShape : public NumberDictionaryBaseShape {
@@ -301,7 +302,7 @@ class SimpleNumberDictionary
OBJECT_CONSTRUCTORS(
SimpleNumberDictionary,
- Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>);
};
extern template class EXPORT_TEMPLATE_DECLARE(
@@ -361,7 +362,7 @@ class NumberDictionary
static const uint32_t kPreferFastElementsSizeFactor = 3;
OBJECT_CONSTRUCTORS(NumberDictionary,
- Dictionary<NumberDictionary, NumberDictionaryShape>)
+ Dictionary<NumberDictionary, NumberDictionaryShape>);
};
} // namespace internal
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 475945f1f1..6519427b7a 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/embedder-data-array.h"
-//#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/instance-type-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/slots.h"
@@ -24,11 +24,11 @@ SMI_ACCESSORS(EmbedderDataArray, length, kLengthOffset)
OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray, HeapObject)
Address EmbedderDataArray::slots_start() {
- return FIELD_ADDR(this, OffsetOfElementAt(0));
+ return FIELD_ADDR(*this, OffsetOfElementAt(0));
}
Address EmbedderDataArray::slots_end() {
- return FIELD_ADDR(this, OffsetOfElementAt(length()));
+ return FIELD_ADDR(*this, OffsetOfElementAt(length()));
}
} // namespace internal
diff --git a/deps/v8/src/objects/embedder-data-array.cc b/deps/v8/src/objects/embedder-data-array.cc
index 665a1fa2f4..c85e0b9f31 100644
--- a/deps/v8/src/objects/embedder-data-array.cc
+++ b/deps/v8/src/objects/embedder-data-array.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/objects/embedder-data-array.h"
+
+#include "src/isolate.h"
#include "src/objects/embedder-data-array-inl.h"
namespace v8 {
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index b136fd288f..1a4d85d778 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -5,10 +5,12 @@
#ifndef V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
#define V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
+#include "src/objects/embedder-data-slot.h"
+
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
#include "src/objects/embedder-data-array.h"
-#include "src/objects/embedder-data-slot.h"
-#include "src/objects/js-objects.h"
+#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index e10c9dc9fb..371452253c 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -37,10 +37,12 @@ class EmbedderDataSlot
V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
// TODO(ishell): these offsets are currently little-endian specific.
+ // The less significant part contains tagged value and the other part
+ // contains the raw value.
+ static constexpr int kTaggedPayloadOffset = 0;
#ifdef V8_COMPRESS_POINTERS
static constexpr int kRawPayloadOffset = kTaggedSize;
#endif
- static constexpr int kTaggedPayloadOffset = 0;
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
// Opaque type used for storing raw embedder data.
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 5d8a5a5780..b3d7d196fc 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -8,6 +8,8 @@
#include "src/objects/feedback-cell.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 3bd36afd03..ad0684de1b 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -7,15 +7,18 @@
#include "src/objects/fixed-array.h"
+#include "src/base/tsan.h"
#include "src/conversions.h"
#include "src/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/slots.h"
+#include "src/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -69,7 +72,7 @@ SYNCHRONIZED_SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
SMI_ACCESSORS(WeakArrayList, length, kLengthOffset)
Object FixedArrayBase::unchecked_synchronized_length() const {
- return ACQUIRE_READ_FIELD(this, kLengthOffset);
+ return ACQUIRE_READ_FIELD(*this, kLengthOffset);
}
ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
@@ -90,7 +93,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
Object FixedArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kTaggedSize);
+ return RELAXED_READ_FIELD(*this, kHeaderSize + index * kTaggedSize);
}
Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
@@ -120,7 +123,7 @@ void FixedArray::set(int index, Smi value) {
DCHECK_LT(index, this->length());
DCHECK(Object(value).IsSmi());
int offset = kHeaderSize + index * kTaggedSize;
- RELAXED_WRITE_FIELD(this, offset, value);
+ RELAXED_WRITE_FIELD(*this, offset, value);
}
void FixedArray::set(int index, Object value) {
@@ -146,7 +149,7 @@ void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
DCHECK_LT(index, array->length());
- DCHECK(!Heap::InNewSpace(value));
+ DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
}
@@ -310,7 +313,7 @@ double FixedDoubleArray::get_scalar(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(!is_the_hole(index));
- return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+ return READ_DOUBLE_FIELD(*this, kHeaderSize + index * kDoubleSize);
}
uint64_t FixedDoubleArray::get_representation(int index) {
@@ -318,13 +321,13 @@ uint64_t FixedDoubleArray::get_representation(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
- return READ_UINT64_FIELD(this, offset);
+ return READ_UINT64_FIELD(*this, offset);
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
Isolate* isolate) {
if (array->is_the_hole(index)) {
- return isolate->factory()->the_hole_value();
+ return ReadOnlyRoots(isolate).the_hole_value_handle();
} else {
return isolate->factory()->NewNumber(array->get_scalar(index));
}
@@ -335,9 +338,9 @@ void FixedDoubleArray::set(int index, double value) {
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) {
- WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
+ WRITE_DOUBLE_FIELD(*this, offset, std::numeric_limits<double>::quiet_NaN());
} else {
- WRITE_DOUBLE_FIELD(this, offset, value);
+ WRITE_DOUBLE_FIELD(*this, offset, value);
}
DCHECK(!is_the_hole(index));
}
@@ -350,7 +353,7 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
+ WRITE_UINT64_FIELD(*this, offset, kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
@@ -464,46 +467,46 @@ int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kTaggedSize); }
byte ByteArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+ return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
}
void ByteArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+ WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
}
void ByteArray::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ Address dst_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
void ByteArray::copy_out(int index, byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ Address src_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
memcpy(buffer, reinterpret_cast<void*>(src_addr), length);
}
int ByteArray::get_int(int index) const {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
+ return READ_INT_FIELD(*this, kHeaderSize + index * kIntSize);
}
void ByteArray::set_int(int index, int value) {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
+ WRITE_INT_FIELD(*this, kHeaderSize + index * kIntSize, value);
}
uint32_t ByteArray::get_uint32(int index) const {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
+ return READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size);
}
void ByteArray::set_uint32(int index, uint32_t value) {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
+ WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value);
}
void ByteArray::clear_padding() {
@@ -550,14 +553,13 @@ int PodArray<T>::length() const {
}
void* FixedTypedArrayBase::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+ intptr_t ptr = READ_INTPTR_FIELD(*this, kExternalPointerOffset);
return reinterpret_cast<void*>(ptr);
}
-void FixedTypedArrayBase::set_external_pointer(void* value,
- WriteBarrierMode mode) {
+void FixedTypedArrayBase::set_external_pointer(void* value) {
intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+ WRITE_INTPTR_FIELD(*this, kExternalPointerOffset, ptr);
}
void* FixedTypedArrayBase::DataPtr() {
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index efb80a8ce4..a4c9d50e2c 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -9,6 +9,7 @@
#include "src/objects/instance-type.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -97,21 +98,17 @@ class FixedArrayBase : public HeapObject {
#endif // V8_HOST_ARCH_32_BIT
// Layout description.
-#define FIXED_ARRAY_BASE_FIELDS(V) \
- V(kLengthOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- FIXED_ARRAY_BASE_FIELDS)
-#undef FIXED_ARRAY_BASE_FIELDS
+ TORQUE_GENERATED_FIXED_ARRAY_BASE_FIELDS)
+
+ static const int kHeaderSize = kSize;
protected:
// Special-purpose constructor for subclasses that have fast paths where
// their ptr() is a Smi.
inline FixedArrayBase(Address ptr, AllowInlineSmiStorage allow_smi);
- OBJECT_CONSTRUCTORS(FixedArrayBase, HeapObject)
+ OBJECT_CONSTRUCTORS(FixedArrayBase, HeapObject);
};
// FixedArray describes fixed-sized arrays with element type Object.
@@ -583,27 +580,29 @@ class PodArray : public ByteArray {
class FixedTypedArrayBase : public FixedArrayBase {
public:
// [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object);
+ DECL_ACCESSORS(base_pointer, Object)
// [external_pointer]: Contains the offset between base_pointer and the start
// of the data. If the base_pointer is a nullptr, the external_pointer
// therefore points to the actual backing store.
- DECL_ACCESSORS(external_pointer, void*)
+ DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
// Dispatched behavior.
DECL_CAST(FixedTypedArrayBase)
-#define FIXED_TYPED_ARRAY_BASE_FIELDS(V) \
- V(kBasePointerOffset, kTaggedSize) \
- V(kExternalPointerOffset, kSystemPointerSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- FIXED_TYPED_ARRAY_BASE_FIELDS)
-#undef FIXED_TYPED_ARRAY_BASE_FIELDS
-
+ TORQUE_GENERATED_FIXED_TYPED_ARRAY_BASE_FIELDS)
+ static const int kHeaderSize = kSize;
+
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
+ // is only kTaggedSize aligned but we can keep using unaligned access since
+ // both x64 and arm64 architectures (where pointer compression supported)
+ // allow unaligned access to doubles.
+ STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
+#else
STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment));
+#endif
static const int kDataOffset = kHeaderSize;
@@ -632,6 +631,14 @@ class FixedTypedArrayBase : public FixedArrayBase {
inline size_t ByteLength() const;
+ static inline intptr_t ExternalPointerValueForOnHeapArray() {
+ return FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+ }
+
+ static inline void* ExternalPointerPtrForOnHeapArray() {
+ return reinterpret_cast<void*>(ExternalPointerValueForOnHeapArray());
+ }
+
private:
static inline int ElementSize(InstanceType type);
@@ -680,7 +687,7 @@ class FixedTypedArray : public FixedTypedArrayBase {
public: /* NOLINT */ \
typedef elementType ElementType; \
static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* Designator() { return #type " array"; } \
+ static const char* ArrayTypeName() { return "Fixed" #Type "Array"; } \
static inline Handle<Object> ToHandle(Isolate* isolate, \
elementType scalar); \
static inline elementType defaultValue(); \
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index f8a0488988..0ac9f652bb 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/foreign.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -26,11 +27,11 @@ bool Foreign::IsNormalized(Object value) {
}
Address Foreign::foreign_address() {
- return READ_UINTPTR_FIELD(this, kForeignAddressOffset);
+ return READ_UINTPTR_FIELD(*this, kForeignAddressOffset);
}
void Foreign::set_foreign_address(Address value) {
- WRITE_UINTPTR_FIELD(this, kForeignAddressOffset, value);
+ WRITE_UINTPTR_FIELD(*this, kForeignAddressOffset, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index c2b96c41ca..629d549b6d 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,16 +28,19 @@ class Foreign : public HeapObject {
DECL_PRINTER(Foreign)
DECL_VERIFIER(Foreign)
- // Layout description.
-#define FOREIGN_FIELDS(V) \
- V(kForeignAddressOffset, kSystemPointerSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FOREIGN_FIELDS)
-#undef FOREIGN_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_FOREIGN_FIELDS)
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): When pointer compression is enabled the
+ // kForeignAddressOffset is only kTaggedSize aligned but we can keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to full words.
+ STATIC_ASSERT(IsAligned(kForeignAddressOffset, kTaggedSize));
+#else
STATIC_ASSERT(IsAligned(kForeignAddressOffset, kSystemPointerSize));
+#endif
+
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
class BodyDescriptor;
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index bd76214464..78d08da00f 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -47,6 +47,11 @@ bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
return (flags & kIsAsmJsWasmFrame) != 0;
}
+bool FrameArray::IsAnyWasmFrame(int frame_ix) const {
+ return IsWasmFrame(frame_ix) || IsWasmInterpretedFrame(frame_ix) ||
+ IsAsmJsWasmFrame(frame_ix);
+}
+
int FrameArray::FrameCount() const {
const int frame_count = Smi::ToInt(get(kFrameCountIndex));
DCHECK_LE(0, frame_count);
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index e25ccd6542..60d3b6e20a 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -25,7 +25,8 @@ class Handle;
V(Function, JSFunction) \
V(Code, AbstractCode) \
V(Offset, Smi) \
- V(Flags, Smi)
+ V(Flags, Smi) \
+ V(Parameters, FixedArray)
// Container object for data collected during simple stack trace captures.
class FrameArray : public FixedArray {
@@ -39,6 +40,7 @@ class FrameArray : public FixedArray {
inline bool IsWasmFrame(int frame_ix) const;
inline bool IsWasmInterpretedFrame(int frame_ix) const;
inline bool IsAsmJsWasmFrame(int frame_ix) const;
+ inline bool IsAnyWasmFrame(int frame_ix) const;
inline int FrameCount() const;
void ShrinkToFit(Isolate* isolate);
@@ -59,7 +61,8 @@ class FrameArray : public FixedArray {
Handle<Object> receiver,
Handle<JSFunction> function,
Handle<AbstractCode> code, int offset,
- int flags);
+ int flags,
+ Handle<FixedArray> parameters);
static Handle<FrameArray> AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
int wasm_function_index, wasm::WasmCode* code, int offset, int flags);
@@ -86,7 +89,9 @@ class FrameArray : public FixedArray {
static const int kFlagsOffset = 4;
- static const int kElementsPerFrame = 5;
+ static const int kParametersOffset = 5;
+
+ static const int kElementsPerFrame = 6;
// Array layout indices.
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index b71a469505..b36c4e154f 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -7,7 +7,10 @@
#include "src/objects/free-space.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -24,30 +27,32 @@ int FreeSpace::Size() { return size(); }
FreeSpace FreeSpace::next() {
#ifdef DEBUG
- Heap* heap = Heap::FromWritableHeapObject(*this);
- Object free_space_map = heap->isolate()->root(RootIndex::kFreeSpaceMap);
+ Heap* heap = GetHeapFromWritableObject(*this);
+ Object free_space_map =
+ Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
- DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
+ DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
}
void FreeSpace::set_next(FreeSpace next) {
#ifdef DEBUG
- Heap* heap = Heap::FromWritableHeapObject(*this);
- Object free_space_map = heap->isolate()->root(RootIndex::kFreeSpaceMap);
+ Heap* heap = GetHeapFromWritableObject(*this);
+ Object free_space_map =
+ Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
- DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
+ DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
}
FreeSpace FreeSpace::cast(HeapObject o) {
- SLOW_DCHECK(!Heap::FromWritableHeapObject(o)->deserialization_complete() ||
+ SLOW_DCHECK(!GetHeapFromWritableObject(o)->deserialization_complete() ||
o->IsFreeSpace());
return bit_cast<FreeSpace>(o);
}
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 24eb22e018..18786d780b 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -8,7 +8,9 @@
#include "src/objects/hash-table.h"
#include "src/heap/heap.h"
+#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/roots-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -17,6 +19,36 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(HashTableBase, FixedArray)
+
+template <typename Derived, typename Shape>
+HashTable<Derived, Shape>::HashTable(Address ptr) : HashTableBase(ptr) {
+ SLOW_DCHECK(IsHashTable());
+}
+
+template <typename Derived, typename Shape>
+ObjectHashTableBase<Derived, Shape>::ObjectHashTableBase(Address ptr)
+ : HashTable<Derived, Shape>(ptr) {}
+
+ObjectHashTable::ObjectHashTable(Address ptr)
+ : ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>(ptr) {
+ SLOW_DCHECK(IsObjectHashTable());
+}
+
+EphemeronHashTable::EphemeronHashTable(Address ptr)
+ : ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>(ptr) {
+ SLOW_DCHECK(IsEphemeronHashTable());
+}
+
+ObjectHashSet::ObjectHashSet(Address ptr)
+ : HashTable<ObjectHashSet, ObjectHashSetShape>(ptr) {
+ SLOW_DCHECK(IsObjectHashSet());
+}
+
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(EphemeronHashTable)
+CAST_ACCESSOR(ObjectHashSet)
+
int HashTableBase::NumberOfElements() const {
return Smi::ToInt(get(kNumberOfElementsIndex));
}
@@ -139,7 +171,8 @@ uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
return Smi::ToInt(key->GetHash());
}
-uint32_t ObjectHashTableShape::HashForObject(Isolate* isolate, Object other) {
+uint32_t ObjectHashTableShape::HashForObject(ReadOnlyRoots roots,
+ Object other) {
return Smi::ToInt(other->GetHash());
}
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 5bb529a121..5d5f4e0dd1 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -37,7 +37,7 @@ namespace internal {
// // Returns the hash value for key.
// static uint32_t Hash(Isolate* isolate, Key key);
// // Returns the hash value for object.
-// static uint32_t HashForObject(Isolate* isolate, Object object);
+// static uint32_t HashForObject(ReadOnlyRoots roots, Object object);
// // Convert key to an object.
// static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
// // The prefix size indicates number of elements in the beginning
@@ -126,7 +126,7 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
return (last + number) & (size - 1);
}
- OBJECT_CONSTRUCTORS(HashTableBase, FixedArray)
+ OBJECT_CONSTRUCTORS(HashTableBase, FixedArray);
};
template <typename Derived, typename Shape>
@@ -150,7 +150,7 @@ class HashTable : public HashTableBase {
int FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
- void Rehash(Isolate* isolate);
+ void Rehash(ReadOnlyRoots roots);
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
@@ -176,13 +176,7 @@ class HashTable : public HashTableBase {
// Don't shrink a HashTable below this capacity.
static const int kMinShrinkCapacity = 16;
- // Maximum length to create a regular HashTable (aka. non large object).
-#if V8_HOST_ARCH_PPC
- // Reduced kMaxRegularCapacity due to reduced kMaxRegularHeapObjectSize
- static const int kMaxRegularCapacity = 16384 / 2;
-#else
- static const int kMaxRegularCapacity = 16384;
-#endif
+ static const int kMaxRegularCapacity = kMaxRegularHeapObjectSize / 32;
// Returns the index for an entry (of the key)
static constexpr inline int EntryToIndex(int entry) {
@@ -233,15 +227,15 @@ class HashTable : public HashTableBase {
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
- uint32_t EntryForProbe(Isolate* isolate, Object k, int probe,
+ uint32_t EntryForProbe(ReadOnlyRoots roots, Object k, int probe,
uint32_t expected);
void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
// Rehashes this hash-table into the new table.
- void Rehash(Isolate* isolate, Derived new_table);
+ void Rehash(ReadOnlyRoots roots, Derived new_table);
- OBJECT_CONSTRUCTORS(HashTable, HashTableBase)
+ OBJECT_CONSTRUCTORS(HashTable, HashTableBase);
};
// HashTableKey is an abstract superclass for virtual key behavior.
@@ -271,7 +265,7 @@ class ObjectHashTableShape : public BaseShape<Handle<Object>> {
public:
static inline bool IsMatch(Handle<Object> key, Object other);
static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntryValueIndex = 1;
@@ -318,7 +312,7 @@ class ObjectHashTableBase : public HashTable<Derived, Shape> {
void AddEntry(int entry, Object key, Object value);
void RemoveEntry(int entry);
- OBJECT_CONSTRUCTORS(ObjectHashTableBase, HashTable<Derived, Shape>)
+ OBJECT_CONSTRUCTORS(ObjectHashTableBase, HashTable<Derived, Shape>);
};
// ObjectHashTable maps keys that are arbitrary objects to object values by
@@ -331,7 +325,7 @@ class ObjectHashTable
OBJECT_CONSTRUCTORS(
ObjectHashTable,
- ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>)
+ ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>);
};
class EphemeronHashTableShape : public ObjectHashTableShape {
@@ -354,7 +348,7 @@ class EphemeronHashTable
OBJECT_CONSTRUCTORS(
EphemeronHashTable,
- ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>)
+ ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>);
};
class ObjectHashSetShape : public ObjectHashTableShape {
@@ -374,7 +368,7 @@ class ObjectHashSet : public HashTable<ObjectHashSet, ObjectHashSetShape> {
DECL_CAST(ObjectHashSet)
OBJECT_CONSTRUCTORS(ObjectHashSet,
- HashTable<ObjectHashSet, ObjectHashSetShape>)
+ HashTable<ObjectHashSet, ObjectHashSetShape>);
};
} // namespace internal
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 80a49d0e1d..ad82296bce 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -7,7 +7,8 @@
#include "src/objects/heap-number.h"
-#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/heap-object-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,29 +24,29 @@ CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(MutableHeapNumber)
double HeapNumberBase::value() const {
- return READ_DOUBLE_FIELD(this, kValueOffset);
+ return READ_DOUBLE_FIELD(*this, kValueOffset);
}
void HeapNumberBase::set_value(double value) {
- WRITE_DOUBLE_FIELD(this, kValueOffset, value);
+ WRITE_DOUBLE_FIELD(*this, kValueOffset, value);
}
uint64_t HeapNumberBase::value_as_bits() const {
- return READ_UINT64_FIELD(this, kValueOffset);
+ return READ_UINT64_FIELD(*this, kValueOffset);
}
void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(this, kValueOffset, bits);
+ WRITE_UINT64_FIELD(*this, kValueOffset, bits);
}
int HeapNumberBase::get_exponent() {
- return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
+ return ((READ_INT_FIELD(*this, kExponentOffset) & kExponentMask) >>
kExponentShift) -
kExponentBias;
}
int HeapNumberBase::get_sign() {
- return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
+ return READ_INT_FIELD(*this, kExponentOffset) & kSignMask;
}
} // namespace internal
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
index 169b1acd87..fbdcb0f6ec 100644
--- a/deps/v8/src/objects/heap-object-inl.h
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -8,6 +8,8 @@
#include "src/objects/heap-object.h"
#include "src/heap/heap-write-barrier-inl.h"
+// TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere.
+#include "src/isolate.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +17,9 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
+CAST_ACCESSOR(HeapObject)
+
HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
: Object(ptr) {
SLOW_DCHECK(
@@ -32,7 +37,7 @@ Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
}
Isolate* NeverReadOnlySpaceObject::GetIsolate(const HeapObject object) {
- return GetHeap(object)->isolate();
+ return Isolate::FromHeap(GetHeap(object));
}
} // namespace internal
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 61817c2d61..69a8463943 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_HEAP_OBJECT_H_
#include "src/globals.h"
+#include "src/roots.h"
#include "src/objects.h"
@@ -62,7 +63,7 @@ class HeapObject : public Object {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsExternal(Isolate* isolate) const;
+ bool IsExternal(Isolate* isolate) const;
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
@@ -121,7 +122,7 @@ class HeapObject : public Object {
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
// GC internal.
- inline int SizeFromMap(Map map) const;
+ V8_EXPORT_PRIVATE int SizeFromMap(Map map) const;
// Returns the field at offset in obj, as a read/write Object reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
@@ -164,7 +165,7 @@ class HeapObject : public Object {
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into
// a heap with a different hash seed, these objects need to adapt.
- inline bool NeedsRehashing() const;
+ bool NeedsRehashing() const;
// Rehashing support is not implemented for all objects that need rehashing.
// With objects that need rehashing but cannot be rehashed, rehashing has to
@@ -172,7 +173,7 @@ class HeapObject : public Object {
bool CanBeRehashed() const;
// Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap(Isolate* isolate);
+ void RehashBasedOnMap(ReadOnlyRoots roots);
// Layout description.
#define HEAP_OBJECT_FIELDS(V) \
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
index 3fb21ed841..5925c6aa92 100644
--- a/deps/v8/src/objects/instance-type-inl.h
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -17,7 +17,7 @@ namespace internal {
namespace InstanceTypeChecker {
// Define type checkers for classes with single instance type.
-INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER);
+INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER)
#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
@@ -35,7 +35,7 @@ STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
V8_INLINE bool Is##type(InstanceType instance_type) { \
return IsInRange(instance_type, first_instance_type, last_instance_type); \
}
-INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE);
+INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE)
#undef INSTANCE_TYPE_CHECKER_RANGE
V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
@@ -67,7 +67,7 @@ V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
// TODO(v8:7786): For instance types that have a single map instance on the
// roots, and when that map is a embedded in the binary, compare against the map
// pointer rather than looking up the instance type.
-INSTANCE_TYPE_CHECKERS(TYPE_CHECKER);
+INSTANCE_TYPE_CHECKERS(TYPE_CHECKER)
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
TYPE_CHECKER(Fixed##Type##Array)
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 3aebb934ee..b121e9be2c 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -15,15 +15,15 @@ namespace v8 {
namespace internal {
// We use the full 16 bits of the instance_type field to encode heap object
-// instance types. All the high-order bits (bit 7-15) are cleared if the object
+// instance types. All the high-order bits (bit 6-15) are cleared if the object
// is a string, and contain set bits if it is not a string.
-const uint32_t kIsNotStringMask = 0xff80;
+const uint32_t kIsNotStringMask = 0xffc0;
const uint32_t kStringTag = 0x0;
-// Bit 6 indicates that the object is an internalized string (if set) or not.
-// Bit 7 has to be clear as well.
-const uint32_t kIsNotInternalizedMask = 0x40;
-const uint32_t kNotInternalizedTag = 0x40;
+// Bit 5 indicates that the object is an internalized string (if not set) or
+// not (if set). Bit 7 has to be clear as well.
+const uint32_t kIsNotInternalizedMask = 0x20;
+const uint32_t kNotInternalizedTag = 0x20;
const uint32_t kInternalizedTag = 0x0;
// If bit 7 is clear then bit 3 indicates whether the string consists of
@@ -52,15 +52,10 @@ STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
kIsIndirectStringTag); // NOLINT
STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-// If bit 7 is clear, then bit 4 indicates whether this two-byte
-// string actually contains one byte data.
-const uint32_t kOneByteDataHintMask = 0x10;
-const uint32_t kOneByteDataHintTag = 0x10;
-
-// If bit 7 is clear and string representation indicates an external string,
+// If bit 6 is clear and string representation indicates an external string,
// then bit 5 indicates whether the data pointer is cached.
-const uint32_t kUncachedExternalStringMask = 0x20;
-const uint32_t kUncachedExternalStringTag = 0x20;
+const uint32_t kUncachedExternalStringMask = 0x10;
+const uint32_t kUncachedExternalStringTag = 0x10;
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector. We don't allocate any
@@ -86,18 +81,12 @@ enum InstanceType : uint16_t {
kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
kOneByteStringTag | kExternalStringTag | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
- kInternalizedTag,
UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE =
EXTERNAL_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
kInternalizedTag,
UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
kInternalizedTag,
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kUncachedExternalStringTag | kInternalizedTag,
STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
ONE_BYTE_STRING_TYPE =
ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
@@ -112,16 +101,10 @@ enum InstanceType : uint16_t {
EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
EXTERNAL_ONE_BYTE_STRING_TYPE =
EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kNotInternalizedTag,
UNCACHED_EXTERNAL_STRING_TYPE =
UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kNotInternalizedTag,
THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
THIN_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
@@ -129,7 +112,7 @@ enum InstanceType : uint16_t {
// Non-string names
SYMBOL_TYPE =
1 + (kIsNotInternalizedMask | kUncachedExternalStringMask |
- kOneByteDataHintMask | kStringEncodingMask |
+ kStringEncodingMask |
kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
// Other primitives (cannot contain non-map-word pointers to heap objects).
@@ -171,6 +154,7 @@ enum InstanceType : uint16_t {
ALLOCATION_MEMENTO_TYPE,
ASM_WASM_DATA_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
+ CLASS_POSITIONS_TYPE,
DEBUG_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
FUNCTION_TEMPLATE_RARE_DATA_TYPE,
@@ -184,6 +168,7 @@ enum InstanceType : uint16_t {
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
STACK_FRAME_INFO_TYPE,
+ STACK_TRACE_FRAME_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -196,7 +181,7 @@ enum InstanceType : uint16_t {
PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
- WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+ FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
ALLOCATION_SITE_TYPE,
EMBEDDER_DATA_ARRAY_TYPE,
@@ -250,6 +235,7 @@ enum InstanceType : uint16_t {
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
WEAK_ARRAY_LIST_TYPE,
+ WEAK_CELL_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -292,10 +278,9 @@ enum InstanceType : uint16_t {
JS_SET_KEY_VALUE_ITERATOR_TYPE,
JS_SET_VALUE_ITERATOR_TYPE,
JS_STRING_ITERATOR_TYPE,
- JS_WEAK_CELL_TYPE,
JS_WEAK_REF_TYPE,
- JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE,
- JS_WEAK_FACTORY_TYPE,
+ JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE,
+ JS_FINALIZATION_GROUP_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
@@ -354,7 +339,7 @@ enum InstanceType : uint16_t {
LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
- LAST_MICROTASK_TYPE = WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE,
+ LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
@@ -413,6 +398,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(CallHandlerInfo, CALL_HANDLER_INFO_TYPE) \
V(Cell, CELL_TYPE) \
V(Code, CODE_TYPE) \
+ V(CachedTemplateObject, TUPLE3_TYPE) \
V(CodeDataContainer, CODE_DATA_CONTAINER_TYPE) \
V(CoverageInfo, FIXED_ARRAY_TYPE) \
V(DescriptorArray, DESCRIPTOR_ARRAY_TYPE) \
@@ -455,9 +441,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
V(JSValue, JS_VALUE_TYPE) \
- V(JSWeakFactory, JS_WEAK_FACTORY_TYPE) \
- V(JSWeakFactoryCleanupIterator, JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE) \
- V(JSWeakCell, JS_WEAK_CELL_TYPE) \
+ V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE) \
+ V(JSFinalizationGroupCleanupIterator, \
+ JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
V(JSWeakMap, JS_WEAK_MAP_TYPE) \
V(JSWeakRef, JS_WEAK_REF_TYPE) \
V(JSWeakSet, JS_WEAK_SET_TYPE) \
@@ -497,7 +483,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(WasmMemoryObject, WASM_MEMORY_TYPE) \
V(WasmModuleObject, WASM_MODULE_TYPE) \
V(WasmTableObject, WASM_TABLE_TYPE) \
- V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
+ V(WeakArrayList, WEAK_ARRAY_LIST_TYPE) \
+ V(WeakCell, WEAK_CELL_TYPE)
#ifdef V8_INTL_SUPPORT
#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index dfabb29af9..b990a85bed 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -29,6 +29,7 @@
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
+#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
@@ -254,7 +255,7 @@ MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
// Called from TF builtins.
String Intl::ConvertOneByteToLower(String src, String dst) {
DCHECK_EQ(src->length(), dst->length());
- DCHECK(src->HasOnlyOneByteChars());
+ DCHECK(src->IsOneByteRepresentation());
DCHECK(src->IsFlat());
DCHECK(dst->IsSeqOneByteString());
@@ -298,7 +299,7 @@ String Intl::ConvertOneByteToLower(String src, String dst) {
}
MaybeHandle<String> Intl::ConvertToLower(Isolate* isolate, Handle<String> s) {
- if (!s->HasOnlyOneByteChars()) {
+ if (!s->IsOneByteRepresentation()) {
// Use a slower implementation for strings with characters beyond U+00FF.
return LocaleConvertCase(isolate, s, false, "");
}
@@ -330,7 +331,7 @@ MaybeHandle<String> Intl::ConvertToLower(Isolate* isolate, Handle<String> s) {
MaybeHandle<String> Intl::ConvertToUpper(Isolate* isolate, Handle<String> s) {
int32_t length = s->length();
- if (s->HasOnlyOneByteChars() && length > 0) {
+ if (s->IsOneByteRepresentation() && length > 0) {
Handle<SeqOneByteString> result =
isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
@@ -552,7 +553,8 @@ std::string DefaultLocale(Isolate* isolate) {
if (isolate->default_locale().empty()) {
icu::Locale default_locale;
// Translate ICU's fallback locale to a well-known locale.
- if (strcmp(default_locale.getName(), "en_US_POSIX") == 0) {
+ if (strcmp(default_locale.getName(), "en_US_POSIX") == 0 ||
+ strcmp(default_locale.getName(), "c") == 0) {
isolate->set_default_locale("en-US");
} else {
// Set the locale
@@ -729,11 +731,20 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
}
std::string locale(locale_str->ToCString().get());
+ return Intl::CanonicalizeLanguageTag(isolate, locale);
+}
+
+Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
+ const std::string& locale_in) {
+ std::string locale = locale_in;
+
if (locale.length() == 0 ||
!String::IsAscii(locale.data(), static_cast<int>(locale.length()))) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
- NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ NewRangeError(
+ MessageTemplate::kInvalidLanguageTag,
+ isolate->factory()->NewStringFromAsciiChecked(locale.c_str())),
Nothing<std::string>());
}
@@ -774,18 +785,22 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
if (U_FAILURE(error) || icu_locale.isBogus()) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
- NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ NewRangeError(
+ MessageTemplate::kInvalidLanguageTag,
+ isolate->factory()->NewStringFromAsciiChecked(locale.c_str())),
Nothing<std::string>());
}
Maybe<std::string> maybe_to_language_tag = Intl::ToLanguageTag(icu_locale);
if (maybe_to_language_tag.IsNothing()) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
- NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ NewRangeError(
+ MessageTemplate::kInvalidLanguageTag,
+ isolate->factory()->NewStringFromAsciiChecked(locale.c_str())),
Nothing<std::string>());
}
- return Intl::ToLanguageTag(icu_locale);
+ return maybe_to_language_tag;
}
Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
@@ -994,11 +1009,14 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<Object> num,
Handle<Object> locales,
Handle<Object> options) {
- Handle<Object> number_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, number_obj,
- Object::ToNumber(isolate, num), String);
-
- double number = number_obj->Number();
+ Handle<Object> numeric_obj;
+ if (FLAG_harmony_intl_bigint) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumeric(isolate, num), String);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
+ Object::ToNumber(isolate, num), String);
+ }
// We only cache the instance when both locales and options are undefined,
// as that is the only case when the specified side-effects of examining
@@ -1011,8 +1029,8 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Isolate::ICUObjectCacheType::kDefaultNumberFormat));
// We may use the cached icu::NumberFormat for a fast path.
if (cached_number_format != nullptr) {
- return JSNumberFormat::FormatNumber(isolate, *cached_number_format,
- number);
+ return JSNumberFormat::FormatNumeric(isolate, *cached_number_format,
+ numeric_obj);
}
}
@@ -1036,7 +1054,8 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
// Return FormatNumber(numberFormat, x).
icu::NumberFormat* icu_number_format =
number_format->icu_number_format()->raw();
- return JSNumberFormat::FormatNumber(isolate, *icu_number_format, number);
+ return JSNumberFormat::FormatNumeric(isolate, *icu_number_format,
+ numeric_obj);
}
namespace {
@@ -1710,9 +1729,9 @@ MaybeHandle<String> Intl::Normalize(Isolate* isolate, Handle<String> string,
// functionality in a straightforward way.
class ICUTimezoneCache : public base::TimezoneCache {
public:
- ICUTimezoneCache() : timezone_(nullptr) { Clear(); }
+ ICUTimezoneCache() : timezone_(nullptr) { Clear(TimeZoneDetection::kSkip); }
- ~ICUTimezoneCache() override { Clear(); };
+ ~ICUTimezoneCache() override { Clear(TimeZoneDetection::kSkip); }
const char* LocalTimezone(double time_ms) override;
@@ -1720,7 +1739,7 @@ class ICUTimezoneCache : public base::TimezoneCache {
double LocalTimeOffset(double time_ms, bool is_utc) override;
- void Clear() override;
+ void Clear(TimeZoneDetection time_zone_detection) override;
private:
icu::TimeZone* GetTimeZone();
@@ -1793,11 +1812,14 @@ double ICUTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
return raw_offset + dst_offset;
}
-void ICUTimezoneCache::Clear() {
+void ICUTimezoneCache::Clear(TimeZoneDetection time_zone_detection) {
delete timezone_;
timezone_ = nullptr;
timezone_name_.clear();
dst_timezone_name_.clear();
+ if (time_zone_detection == TimeZoneDetection::kRedetect) {
+ icu::TimeZone::adoptDefault(icu::TimeZone::detectHostTimeZone());
+ }
}
base::TimezoneCache* Intl::CreateTimeZoneCache() {
@@ -1842,5 +1864,17 @@ Intl::HourCycle Intl::ToHourCycle(const std::string& hc) {
return Intl::HourCycle::kUndefined;
}
+const std::set<std::string>& Intl::GetAvailableLocalesForLocale() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::Locale>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
+}
+
+const std::set<std::string>& Intl::GetAvailableLocalesForDateFormat() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::DateFormat>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 4165ec4a77..4dcafab793 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -132,6 +132,9 @@ class Intl {
static Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
Handle<Object> locale_in);
+ static Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
+ const std::string& locale);
+
// https://tc39.github.io/ecma402/#sec-canonicalizelocalelist
// {only_return_one_result} is an optimization for callers that only
// care about the first result.
@@ -247,6 +250,30 @@ class Intl {
const std::vector<std::string>& requested_locales, MatcherOption options,
const std::set<std::string>& relevant_extension_keys);
+ // A helper template to implement the GetAvailableLocales
+ // Usage in src/objects/js-XXX.cc
+ //
+ // const std::set<std::string>& JSXxx::GetAvailableLocales() {
+ // static base::LazyInstance<Intl::AvailableLocales<icu::YYY>>::type
+ // available_locales = LAZY_INSTANCE_INITIALIZER;
+ // return available_locales.Pointer()->Get();
+ // }
+ template <typename T>
+ class AvailableLocales {
+ public:
+ AvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ T::getAvailableLocales(num_locales);
+ set = Intl::BuildLocaleSet(icu_available_locales, num_locales);
+ }
+ virtual ~AvailableLocales() {}
+ const std::set<std::string>& Get() const { return set; }
+
+ private:
+ std::set<std::string> set;
+ };
+
// Utility function to set text to BreakIterator.
static Managed<icu::UnicodeString> SetTextToBreakIterator(
Isolate* isolate, Handle<String> text,
@@ -264,6 +291,10 @@ class Intl {
static const uint8_t* ToLatin1LowerTable();
static String ConvertOneByteToLower(String src, String dst);
+
+ static const std::set<std::string>& GetAvailableLocalesForLocale();
+
+ static const std::set<std::string>& GetAvailableLocalesForDateFormat();
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 9ac72ae111..b1f3ed4ce2 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -7,7 +7,9 @@
#include "src/objects/js-array-buffer.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/js-objects-inl.h"
#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
@@ -27,21 +29,21 @@ CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSDataView)
size_t JSArrayBuffer::byte_length() const {
- return READ_UINTPTR_FIELD(this, kByteLengthOffset);
+ return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
}
void JSArrayBuffer::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(this, kByteLengthOffset, value);
+ WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
}
void* JSArrayBuffer::backing_store() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
+ intptr_t ptr = READ_INTPTR_FIELD(*this, kBackingStoreOffset);
return reinterpret_cast<void*>(ptr);
}
void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
+ WRITE_INTPTR_FIELD(*this, kBackingStoreOffset, ptr);
}
size_t JSArrayBuffer::allocation_length() const {
@@ -95,11 +97,11 @@ void JSArrayBuffer::clear_padding() {
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
- WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
+ WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits);
}
uint32_t JSArrayBuffer::bit_field() const {
- return READ_UINT32_FIELD(this, kBitFieldOffset);
+ return READ_UINT32_FIELD(*this, kBitFieldOffset);
}
// |bit_field| fields.
@@ -115,19 +117,19 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_growable,
JSArrayBuffer::IsGrowableBit)
size_t JSArrayBufferView::byte_offset() const {
- return READ_UINTPTR_FIELD(this, kByteOffsetOffset);
+ return READ_UINTPTR_FIELD(*this, kByteOffsetOffset);
}
void JSArrayBufferView::set_byte_offset(size_t value) {
- WRITE_UINTPTR_FIELD(this, kByteOffsetOffset, value);
+ WRITE_UINTPTR_FIELD(*this, kByteOffsetOffset, value);
}
size_t JSArrayBufferView::byte_length() const {
- return READ_UINTPTR_FIELD(this, kByteLengthOffset);
+ return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
}
void JSArrayBufferView::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(this, kByteLengthOffset, value);
+ WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
}
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
@@ -136,7 +138,7 @@ bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer())->was_detached();
}
-Object JSTypedArray::length() const { return READ_FIELD(this, kLengthOffset); }
+Object JSTypedArray::length() const { return READ_FIELD(*this, kLengthOffset); }
size_t JSTypedArray::length_value() const {
double val = length()->Number();
@@ -148,7 +150,7 @@ size_t JSTypedArray::length_value() const {
}
void JSTypedArray::set_length(Object value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kLengthOffset, value);
+ WRITE_FIELD(*this, kLengthOffset, value);
CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
}
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index e8c0c33a64..b5a8ab79a0 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -210,7 +210,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
Handle<JSTypedArray> o,
Handle<Object> key,
PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+ Maybe<ShouldThrow> should_throw) {
// 1. Assert: IsPropertyKey(P) is true.
DCHECK(key->IsName() || key->IsNumber());
// 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
@@ -226,19 +226,19 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
// FIXME: the standard allows up to 2^53 elements.
uint32_t index;
if (numeric_index->IsMinusZero() || !numeric_index->ToUint32(&index)) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
// 3b iv. Let length be O.[[ArrayLength]].
size_t length = o->length_value();
// 3b v. If numericIndex ā‰„ length, return false.
if (o->WasDetached() || index >= length) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
// 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed, key));
}
// 3b vii. If Desc has a [[Configurable]] field and if
@@ -250,7 +250,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
if ((desc->has_configurable() && desc->configurable()) ||
(desc->has_enumerable() && !desc->enumerable()) ||
(desc->has_writable() && !desc->writable())) {
- RETURN_FAILURE(isolate, should_throw,
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed, key));
}
// 3b x. If Desc has a [[Value]] field, then
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 5628b6acba..3cc42e61f9 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -189,7 +189,7 @@ class JSTypedArray : public JSArrayBufferView {
// ES6 9.4.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
DECL_CAST(JSTypedArray)
@@ -210,10 +210,10 @@ class JSTypedArray : public JSArrayBufferView {
DECL_VERIFIER(JSTypedArray)
// Layout description.
-#define JS_TYPED_ARRAY_FIELDS(V) \
- /* Raw data fields. */ \
- V(kLengthOffset, kSystemPointerSize) \
- /* Header size. */ \
+#define JS_TYPED_ARRAY_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kLengthOffset, kTaggedSize) \
+ /* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 7f9710915d..31c8735f62 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -63,11 +63,11 @@ ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
IterationKind JSArrayIterator::kind() const {
return static_cast<IterationKind>(
- Smi::cast(READ_FIELD(this, kKindOffset))->value());
+ Smi::cast(READ_FIELD(*this, kKindOffset))->value());
}
void JSArrayIterator::set_kind(IterationKind kind) {
- WRITE_FIELD(this, kKindOffset, Smi::FromInt(static_cast<int>(kind)));
+ WRITE_FIELD(*this, kKindOffset, Smi::FromInt(static_cast<int>(kind)));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 9554253bad..a85af97e4a 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -55,14 +55,14 @@ class JSArray : public JSObject {
// ES6 9.4.2.1
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
- PropertyDescriptor* desc, ShouldThrow should_throw);
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
static bool AnythingToArrayLength(Isolate* isolate,
Handle<Object> length_object,
uint32_t* output);
V8_WARN_UNUSED_RESULT static Maybe<bool> ArraySetLength(
Isolate* isolate, Handle<JSArray> a, PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ Maybe<ShouldThrow> should_throw);
// Support for Array.prototype.join().
// Writes a fixed array of strings and separators to a single destination
@@ -105,8 +105,8 @@ class JSArray : public JSObject {
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSARRAY_FIELDS)
-#undef JS_ARRAY_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSARRAY_FIELDS)
static const int kLengthDescriptorIndex = 0;
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 294c893065..7c22be25f6 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -22,11 +22,11 @@ OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
inline void JSV8BreakIterator::set_type(Type type) {
DCHECK_GT(JSV8BreakIterator::Type::COUNT, type);
- WRITE_FIELD(this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
+ WRITE_FIELD(*this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
}
inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
- Object value = READ_FIELD(this, kTypeOffset);
+ Object value = READ_FIELD(*this, kTypeOffset);
return static_cast<JSV8BreakIterator::Type>(Smi::ToInt(value));
}
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 505934f8e9..4516b34aac 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -186,11 +186,10 @@ String JSV8BreakIterator::BreakType(Isolate* isolate,
return ReadOnlyRoots(isolate).unknown_string();
}
-std::set<std::string> JSV8BreakIterator::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::BreakIterator::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSV8BreakIterator::GetAvailableLocales() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::BreakIterator>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index edba90df7a..03d036c957 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -35,7 +35,7 @@ class JSV8BreakIterator : public JSObject {
static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
static void AdoptText(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator,
@@ -87,7 +87,7 @@ class JSV8BreakIterator : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
#undef BREAK_ITERATOR_FIELDS
- OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject)
+ OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index 693021e26a..b6fa239c31 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -21,9 +21,9 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSCollator, JSObject)
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
-ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset);
+ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset)
-CAST_ACCESSOR(JSCollator);
+CAST_ACCESSOR(JSCollator)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 3c2efa93db..dd927f02dc 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -45,7 +45,7 @@ void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
// This is a brand new JSObject that shouldn't already have the same
// key so this shouldn't fail.
CHECK(JSReceiver::CreateDataProperty(isolate, options, key, value_str,
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
@@ -56,7 +56,7 @@ void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
// This is a brand new JSObject that shouldn't already have the same
// key so this shouldn't fail.
CHECK(JSReceiver::CreateDataProperty(isolate, options, key, value_obj,
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
@@ -484,11 +484,10 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
return collator;
}
-std::set<std::string> JSCollator::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::Collator::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSCollator::GetAvailableLocales() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::Collator>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 0e8cec5f7b..f338a5cfb5 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -40,7 +40,7 @@ class JSCollator : public JSObject {
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSCollator> collator);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
DECL_CAST(JSCollator)
DECL_PRINTER(JSCollator)
@@ -57,7 +57,7 @@ class JSCollator : public JSObject {
#undef JS_COLLATOR_FIELDS
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
- DECL_ACCESSORS(bound_compare, Object);
+ DECL_ACCESSORS(bound_compare, Object)
OBJECT_CONSTRUCTORS(JSCollator, JSObject);
};
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index c216b3daff..79b55fda4a 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -7,7 +7,11 @@
#include "src/objects/js-collection.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
+#include "src/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 7c2f265ae7..5a685a8c78 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -19,14 +19,9 @@ class JSCollection : public JSObject {
// [table]: the backing hash table
DECL_ACCESSORS(table, Object)
-// Layout description.
-#define JS_COLLECTION_FIELDS(V) \
- V(kTableOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLECTION_FIELDS)
-#undef JS_COLLECTION_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSCOLLECTION_FIELDS)
static const int kAddFunctionDescriptorIndex = 3;
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index a2ae92b112..6bcec9e301 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -20,10 +20,10 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject)
-ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset);
+ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
Managed<icu::SimpleDateFormat>, kICUSimpleDateFormatOffset)
-ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset);
+ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset)
SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
inline void JSDateTimeFormat::set_hour_cycle(Intl::HourCycle hour_cycle) {
@@ -36,7 +36,29 @@ inline Intl::HourCycle JSDateTimeFormat::hour_cycle() const {
return HourCycleBits::decode(flags());
}
-CAST_ACCESSOR(JSDateTimeFormat);
+inline void JSDateTimeFormat::set_date_style(
+ JSDateTimeFormat::DateTimeStyle date_style) {
+ int hints = flags();
+ hints = DateStyleBits::update(hints, date_style);
+ set_flags(hints);
+}
+
+inline JSDateTimeFormat::DateTimeStyle JSDateTimeFormat::date_style() const {
+ return DateStyleBits::decode(flags());
+}
+
+inline void JSDateTimeFormat::set_time_style(
+ JSDateTimeFormat::DateTimeStyle time_style) {
+ int hints = flags();
+ hints = TimeStyleBits::update(hints, time_style);
+ set_flags(hints);
+}
+
+inline JSDateTimeFormat::DateTimeStyle JSDateTimeFormat::time_style() const {
+ return TimeStyleBits::decode(flags());
+}
+
+CAST_ACCESSOR(JSDateTimeFormat)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index b3b1d11253..3c1405f563 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -54,7 +54,7 @@ class PatternItem {
std::vector<const char*> allowed_values;
};
-const std::vector<PatternItem> GetPatternItems() {
+static const std::vector<PatternItem> BuildPatternItems() {
const std::vector<const char*> kLongShort = {"long", "short"};
const std::vector<const char*> kNarrowLongShort = {"narrow", "long", "short"};
const std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
@@ -107,6 +107,22 @@ const std::vector<PatternItem> GetPatternItems() {
return kPatternItems;
}
+class PatternItems {
+ public:
+ PatternItems() : data(BuildPatternItems()) {}
+ virtual ~PatternItems() {}
+ const std::vector<PatternItem>& Get() const { return data; }
+
+ private:
+ const std::vector<PatternItem> data;
+};
+
+static const std::vector<PatternItem>& GetPatternItems() {
+ static base::LazyInstance<PatternItems>::type items =
+ LAZY_INSTANCE_INITIALIZER;
+ return items.Pointer()->Get();
+}
+
class PatternData {
public:
PatternData(const std::string property, std::vector<PatternMap> pairs,
@@ -154,23 +170,57 @@ const std::vector<PatternData> CreateData(const char* digit2,
// kk 24
// K hour in am/pm (0~11) K 0
// KK 00
-const std::vector<PatternData> GetPatternData(Intl::HourCycle hour_cycle) {
- const std::vector<PatternData> data = CreateData("jj", "j");
- const std::vector<PatternData> data_h11 = CreateData("KK", "K");
- const std::vector<PatternData> data_h12 = CreateData("hh", "h");
- const std::vector<PatternData> data_h23 = CreateData("HH", "H");
- const std::vector<PatternData> data_h24 = CreateData("kk", "k");
+
+class Pattern {
+ public:
+ Pattern(const char* d1, const char* d2) : data(CreateData(d1, d2)) {}
+ virtual ~Pattern() {}
+ virtual const std::vector<PatternData>& Get() const { return data; }
+
+ private:
+ std::vector<PatternData> data;
+};
+
+#define DEFFINE_TRAIT(name, d1, d2) \
+ struct name { \
+ static void Construct(void* allocated_ptr) { \
+ new (allocated_ptr) Pattern(d1, d2); \
+ } \
+ };
+DEFFINE_TRAIT(H11Trait, "KK", "K")
+DEFFINE_TRAIT(H12Trait, "hh", "h")
+DEFFINE_TRAIT(H23Trait, "HH", "H")
+DEFFINE_TRAIT(H24Trait, "kk", "k")
+DEFFINE_TRAIT(HDefaultTrait, "jj", "j")
+#undef DEFFINE_TRAIT
+
+const std::vector<PatternData>& GetPatternData(Intl::HourCycle hour_cycle) {
switch (hour_cycle) {
- case Intl::HourCycle::kH11:
- return data_h11;
- case Intl::HourCycle::kH12:
- return data_h12;
- case Intl::HourCycle::kH23:
- return data_h23;
- case Intl::HourCycle::kH24:
- return data_h24;
- case Intl::HourCycle::kUndefined:
- return data;
+ case Intl::HourCycle::kH11: {
+ static base::LazyInstance<Pattern, H11Trait>::type h11 =
+ LAZY_INSTANCE_INITIALIZER;
+ return h11.Pointer()->Get();
+ }
+ case Intl::HourCycle::kH12: {
+ static base::LazyInstance<Pattern, H12Trait>::type h12 =
+ LAZY_INSTANCE_INITIALIZER;
+ return h12.Pointer()->Get();
+ }
+ case Intl::HourCycle::kH23: {
+ static base::LazyInstance<Pattern, H23Trait>::type h23 =
+ LAZY_INSTANCE_INITIALIZER;
+ return h23.Pointer()->Get();
+ }
+ case Intl::HourCycle::kH24: {
+ static base::LazyInstance<Pattern, H24Trait>::type h24 =
+ LAZY_INSTANCE_INITIALIZER;
+ return h24.Pointer()->Get();
+ }
+ case Intl::HourCycle::kUndefined: {
+ static base::LazyInstance<Pattern, HDefaultTrait>::type hDefault =
+ LAZY_INSTANCE_INITIALIZER;
+ return hDefault.Pointer()->Get();
+ }
default:
UNREACHABLE();
}
@@ -273,6 +323,26 @@ std::string JSDateTimeFormat::CanonicalizeTimeZoneID(Isolate* isolate,
return ToTitleCaseTimezoneLocation(isolate, input);
}
+namespace {
+
+Handle<String> DateTimeStyleAsString(Isolate* isolate,
+ JSDateTimeFormat::DateTimeStyle style) {
+ switch (style) {
+ case JSDateTimeFormat::DateTimeStyle::kFull:
+ return ReadOnlyRoots(isolate).full_string_handle();
+ case JSDateTimeFormat::DateTimeStyle::kLong:
+ return ReadOnlyRoots(isolate).long_string_handle();
+ case JSDateTimeFormat::DateTimeStyle::kMedium:
+ return ReadOnlyRoots(isolate).medium_string_handle();
+ case JSDateTimeFormat::DateTimeStyle::kShort:
+ return ReadOnlyRoots(isolate).short_string_handle();
+ case JSDateTimeFormat::DateTimeStyle::kUndefined:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
@@ -366,24 +436,25 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
// [[Minute]] "minute"
// [[Second]] "second"
// [[TimeZoneName]] "timeZoneName"
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->locale_string(), locale, kDontThrow)
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->locale_string(), locale,
+ Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->calendar_string(),
factory->NewStringFromAsciiChecked(calendar_str.c_str()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
if (!numbering_system.empty()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->numberingSystem_string(),
factory->NewStringFromAsciiChecked(numbering_system.c_str()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
CHECK(JSReceiver::CreateDataProperty(isolate, options,
factory->timeZone_string(),
- timezone_value, kDontThrow)
+ timezone_value, Just(kDontThrow))
.FromJust());
// 5.b.i. Let hc be dtf.[[HourCycle]].
@@ -392,23 +463,23 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
if (hc != Intl::HourCycle::kUndefined) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->hourCycle_string(),
- date_time_format->HourCycleAsString(), kDontThrow)
+ date_time_format->HourCycleAsString(), Just(kDontThrow))
.FromJust());
switch (hc) {
// ii. If hc is "h11" or "h12", let v be true.
case Intl::HourCycle::kH11:
case Intl::HourCycle::kH12:
- CHECK(JSReceiver::CreateDataProperty(isolate, options,
- factory->hour12_string(),
- factory->true_value(), kDontThrow)
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->hour12_string(),
+ factory->true_value(), Just(kDontThrow))
.FromJust());
break;
// iii. Else if, hc is "h23" or "h24", let v be false.
case Intl::HourCycle::kH23:
case Intl::HourCycle::kH24:
- CHECK(JSReceiver::CreateDataProperty(isolate, options,
- factory->hour12_string(),
- factory->false_value(), kDontThrow)
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->hour12_string(),
+ factory->false_value(), Just(kDontThrow))
.FromJust());
break;
// iv. Else, let v be undefined.
@@ -424,13 +495,31 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
isolate, options,
factory->NewStringFromAsciiChecked(item.property.c_str()),
factory->NewStringFromAsciiChecked(pair.value.c_str()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
break;
}
}
}
+ // dateStyle
+ if (date_time_format->date_style() != DateTimeStyle::kUndefined) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->dateStyle_string(),
+ DateTimeStyleAsString(isolate, date_time_format->date_style()),
+ Just(kDontThrow))
+ .FromJust());
+ }
+
+ // timeStyle
+ if (date_time_format->time_style() != DateTimeStyle::kUndefined) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->timeStyle_string(),
+ DateTimeStyleAsString(isolate, date_time_format->time_style()),
+ Just(kDontThrow))
+ .FromJust());
+ }
+
return options;
}
@@ -606,7 +695,7 @@ Maybe<bool> CreateDefault(Isolate* isolate, Handle<JSObject> options,
MAYBE_RETURN(
JSReceiver::CreateDataProperty(
isolate, options, factory->NewStringFromAsciiChecked(prop.c_str()),
- factory->numeric_string(), kThrowOnError),
+ factory->numeric_string(), Just(kThrowOnError)),
Nothing<bool>());
}
return Just(true);
@@ -734,35 +823,62 @@ std::unique_ptr<icu::TimeZone> CreateTimeZone(Isolate* isolate,
return tz;
}
-std::unique_ptr<icu::Calendar> CreateCalendar(Isolate* isolate,
- const icu::Locale& icu_locale,
- const char* timezone) {
- std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone);
- if (tz.get() == nullptr) return std::unique_ptr<icu::Calendar>();
-
- // Create a calendar using locale, and apply time zone to it.
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::Calendar> calendar(
- icu::Calendar::createInstance(tz.release(), icu_locale, status));
- CHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(calendar.get());
-
- if (calendar->getDynamicClassID() ==
- icu::GregorianCalendar::getStaticClassID()) {
- icu::GregorianCalendar* gc =
- static_cast<icu::GregorianCalendar*>(calendar.get());
+class CalendarCache {
+ public:
+ icu::Calendar* CreateCalendar(const icu::Locale& locale, icu::TimeZone* tz) {
+ icu::UnicodeString tz_id;
+ tz->getID(tz_id);
+ std::string key;
+ tz_id.toUTF8String<std::string>(key);
+ key += ":";
+ key += locale.getName();
+
+ base::MutexGuard guard(&mutex_);
+ auto it = map_.find(key);
+ if (it != map_.end()) {
+ delete tz;
+ return it->second->clone();
+ }
+ // Create a calendar using locale, and apply time zone to it.
UErrorCode status = U_ZERO_ERROR;
- // The beginning of ECMAScript time, namely -(2**53)
- const double start_of_time = -9007199254740992;
- gc->setGregorianChange(start_of_time, status);
- DCHECK(U_SUCCESS(status));
+ std::unique_ptr<icu::Calendar> calendar(
+ icu::Calendar::createInstance(tz, locale, status));
+ CHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(calendar.get());
+
+ if (calendar->getDynamicClassID() ==
+ icu::GregorianCalendar::getStaticClassID()) {
+ icu::GregorianCalendar* gc =
+ static_cast<icu::GregorianCalendar*>(calendar.get());
+ UErrorCode status = U_ZERO_ERROR;
+ // The beginning of ECMAScript time, namely -(2**53)
+ const double start_of_time = -9007199254740992;
+ gc->setGregorianChange(start_of_time, status);
+ DCHECK(U_SUCCESS(status));
+ }
+
+ if (map_.size() > 8) { // Cache at most 8 calendars.
+ map_.clear();
+ }
+ map_[key].reset(calendar.release());
+ return map_[key]->clone();
}
- return calendar;
+
+ private:
+ std::map<std::string, std::unique_ptr<icu::Calendar>> map_;
+ base::Mutex mutex_;
+};
+
+icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
+ icu::TimeZone* tz) {
+ static base::LazyInstance<CalendarCache>::type calendar_cache =
+ LAZY_INSTANCE_INITIALIZER;
+ return calendar_cache.Pointer()->CreateCalendar(icu_locale, tz);
}
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
- Isolate* isolate, const icu::Locale& icu_locale,
- const std::string& skeleton) {
+ const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator& generator) {
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
// generation needs to be done in the base locale according to the
// current spec however odd it may be. See also crbug.com/826549 .
@@ -773,16 +889,10 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
// locale for the pattern match is not quite right. Moreover, what to
// do with 'related year' part when 'chinese/dangi' calendar is specified
// has to be discussed. Revisit once the spec is clarified/revised.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::DateTimePatternGenerator> generator(
- icu::DateTimePatternGenerator::createInstance(no_extension_locale,
- status));
icu::UnicodeString pattern;
- if (U_SUCCESS(status)) {
- pattern =
- generator->getBestPattern(icu::UnicodeString(skeleton.c_str()), status);
- }
+ UErrorCode status = U_ZERO_ERROR;
+ pattern = generator.getBestPattern(skeleton, status);
+ CHECK(U_SUCCESS(status));
// Make formatter from skeleton. Calendar and numbering system are added
// to the locale as Unicode extension (if they were specified at all).
@@ -795,9 +905,47 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
return date_format;
}
-Intl::HourCycle HourCycleDefault(icu::SimpleDateFormat* date_format) {
- icu::UnicodeString pattern;
- date_format->toPattern(pattern);
+class DateFormatCache {
+ public:
+ icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
+ const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator& generator) {
+ std::string key;
+ skeleton.toUTF8String<std::string>(key);
+ key += ":";
+ key += icu_locale.getName();
+
+ base::MutexGuard guard(&mutex_);
+ auto it = map_.find(key);
+ if (it != map_.end()) {
+ return static_cast<icu::SimpleDateFormat*>(it->second->clone());
+ }
+
+ if (map_.size() > 8) { // Cache at most 8 DateFormats.
+ map_.clear();
+ }
+ std::unique_ptr<icu::SimpleDateFormat> instance(
+ CreateICUDateFormat(icu_locale, skeleton, generator));
+ if (instance.get() == nullptr) return nullptr;
+ map_[key] = std::move(instance);
+ return static_cast<icu::SimpleDateFormat*>(map_[key]->clone());
+ }
+
+ private:
+ std::map<std::string, std::unique_ptr<icu::SimpleDateFormat>> map_;
+ base::Mutex mutex_;
+};
+
+std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
+ const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator& generator) {
+ static base::LazyInstance<DateFormatCache>::type cache =
+ LAZY_INSTANCE_INITIALIZER;
+ return std::unique_ptr<icu::SimpleDateFormat>(
+ cache.Pointer()->Create(icu_locale, skeleton, generator));
+}
+
+Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) {
bool in_quote = false;
for (int32_t i = 0; i < pattern.length(); i++) {
char16_t ch = pattern[i];
@@ -822,6 +970,137 @@ Intl::HourCycle HourCycleDefault(icu::SimpleDateFormat* date_format) {
return Intl::HourCycle::kUndefined;
}
+icu::DateFormat::EStyle DateTimeStyleToEStyle(
+ JSDateTimeFormat::DateTimeStyle style) {
+ switch (style) {
+ case JSDateTimeFormat::DateTimeStyle::kFull:
+ return icu::DateFormat::EStyle::kFull;
+ case JSDateTimeFormat::DateTimeStyle::kLong:
+ return icu::DateFormat::EStyle::kLong;
+ case JSDateTimeFormat::DateTimeStyle::kMedium:
+ return icu::DateFormat::EStyle::kMedium;
+ case JSDateTimeFormat::DateTimeStyle::kShort:
+ return icu::DateFormat::EStyle::kShort;
+ case JSDateTimeFormat::DateTimeStyle::kUndefined:
+ UNREACHABLE();
+ }
+}
+
+icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
+ Intl::HourCycle hc) {
+ icu::UnicodeString result;
+ char16_t to;
+ switch (hc) {
+ case Intl::HourCycle::kH11:
+ to = 'K';
+ break;
+ case Intl::HourCycle::kH12:
+ to = 'h';
+ break;
+ case Intl::HourCycle::kH23:
+ to = 'H';
+ break;
+ case Intl::HourCycle::kH24:
+ to = 'k';
+ break;
+ case Intl::HourCycle::kUndefined:
+ UNREACHABLE();
+ }
+ for (int32_t i = 0; i < input.length(); i++) {
+ switch (input[i]) {
+ // We need to skip 'a', 'b', 'B' here due to
+ // https://unicode-org.atlassian.net/browse/ICU-20437
+ case 'a':
+ V8_FALLTHROUGH;
+ case 'b':
+ V8_FALLTHROUGH;
+ case 'B':
+ // ignore
+ break;
+ case 'h':
+ V8_FALLTHROUGH;
+ case 'H':
+ V8_FALLTHROUGH;
+ case 'K':
+ V8_FALLTHROUGH;
+ case 'k':
+ result += to;
+ break;
+ default:
+ result += input[i];
+ break;
+ }
+ }
+ return result;
+}
+
+std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
+ JSDateTimeFormat::DateTimeStyle date_style,
+ JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
+ Intl::HourCycle hc, icu::DateTimePatternGenerator& generator) {
+ std::unique_ptr<icu::SimpleDateFormat> result;
+ if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
+ if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
+ result.reset(reinterpret_cast<icu::SimpleDateFormat*>(
+ icu::DateFormat::createDateTimeInstance(
+ DateTimeStyleToEStyle(date_style),
+ DateTimeStyleToEStyle(time_style), icu_locale)));
+ } else {
+ result.reset(reinterpret_cast<icu::SimpleDateFormat*>(
+ icu::DateFormat::createDateInstance(DateTimeStyleToEStyle(date_style),
+ icu_locale)));
+ // For instance without time, we do not need to worry about the hour cycle
+ // impact so we can return directly.
+ return result;
+ }
+ } else {
+ if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
+ result.reset(reinterpret_cast<icu::SimpleDateFormat*>(
+ icu::DateFormat::createTimeInstance(DateTimeStyleToEStyle(time_style),
+ icu_locale)));
+ } else {
+ UNREACHABLE();
+ }
+ }
+ icu::UnicodeString pattern;
+ pattern = result->toPattern(pattern);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString skeleton =
+ icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
+ CHECK(U_SUCCESS(status));
+
+ // If the skeleton match the HourCycle, we just return it.
+ if (hc == HourCycleFromPattern(pattern)) {
+ return result;
+ }
+
+ return CreateICUDateFormatFromCache(icu_locale, ReplaceSkeleton(skeleton, hc),
+ generator);
+}
+
+class DateTimePatternGeneratorCache {
+ public:
+ // Return a clone copy that the caller have to free.
+ icu::DateTimePatternGenerator* CreateGenerator(const icu::Locale& locale) {
+ std::string key(locale.getBaseName());
+ base::MutexGuard guard(&mutex_);
+ auto it = map_.find(key);
+ if (it != map_.end()) {
+ return it->second->clone();
+ }
+ UErrorCode status = U_ZERO_ERROR;
+ map_[key].reset(icu::DateTimePatternGenerator::createInstance(
+ icu::Locale(key.c_str()), status));
+ CHECK(U_SUCCESS(status));
+ return map_[key]->clone();
+ }
+
+ private:
+ std::map<std::string, std::unique_ptr<icu::DateTimePatternGenerator>> map_;
+ base::Mutex mutex_;
+};
+
} // namespace
enum FormatMatcherOption { kBestFit, kBasic };
@@ -891,14 +1170,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
- if (!maybe_get_hour12.FromJust() &&
- hour_cycle == Intl::HourCycle::kUndefined) {
- auto hc_extension_it = r.extensions.find("hc");
- if (hc_extension_it != r.extensions.end()) {
- hour_cycle = Intl::ToHourCycle(hc_extension_it->second.c_str());
- }
- }
-
// 17. Let timeZone be ? Get(options, "timeZone").
const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
@@ -907,8 +1178,17 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
"Intl.DateTimeFormat", &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
+ std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get());
+ if (tz.get() == nullptr) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ isolate->factory()->NewStringFromAsciiChecked(
+ timezone.get())),
+ JSDateTimeFormat);
+ }
+
std::unique_ptr<icu::Calendar> calendar(
- CreateCalendar(isolate, icu_locale, timezone.get()));
+ CreateCalendar(isolate, icu_locale, tz.release()));
// 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
// i. Throw a RangeError exception.
@@ -920,97 +1200,209 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
JSDateTimeFormat);
}
- // 29. If dateTimeFormat.[[Hour]] is not undefined, then
+ static base::LazyInstance<DateTimePatternGeneratorCache>::type
+ generator_cache = LAZY_INSTANCE_INITIALIZER;
+
+ std::unique_ptr<icu::DateTimePatternGenerator> generator(
+ generator_cache.Pointer()->CreateGenerator(icu_locale));
+
+ // 15.Let hcDefault be dataLocaleData.[[hourCycle]].
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status);
+ CHECK(U_SUCCESS(status));
+ Intl::HourCycle hc_default = HourCycleFromPattern(hour_pattern);
+
+ // 16.Let hc be r.[[hc]].
+ Intl::HourCycle hc = Intl::HourCycle::kUndefined;
if (hour_cycle == Intl::HourCycle::kUndefined) {
- // d. If hour12 is not undefined, then
- if (maybe_get_hour12.FromJust()) {
- // i. If hour12 is true, then
- if (hour12) {
- hour_cycle = Intl::HourCycle::kH12;
- } else { // ii. Else,
- hour_cycle = Intl::HourCycle::kH23;
- }
+ auto hc_extension_it = r.extensions.find("hc");
+ if (hc_extension_it != r.extensions.end()) {
+ hc = Intl::ToHourCycle(hc_extension_it->second.c_str());
}
+ } else {
+ hc = hour_cycle;
+ }
+ // 17. If hc is null, then
+ if (hc == Intl::HourCycle::kUndefined) {
+ // a. Set hc to hcDefault.
+ hc = hc_default;
}
- bool has_hour_option = false;
- // 22. For each row of Table 5, except the header row, do
- std::string skeleton;
- for (const PatternData& item : GetPatternData(hour_cycle)) {
- std::unique_ptr<char[]> input;
- // a. Let prop be the name given in the Property column of the row.
- // b. Let value be ? GetOption(options, prop, "string", Ā« the strings given
- // in the Values column of the row Ā», undefined).
- Maybe<bool> maybe_get_option = Intl::GetStringOption(
- isolate, options, item.property.c_str(), item.allowed_values,
- "Intl.DateTimeFormat", &input);
- MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
- if (maybe_get_option.FromJust()) {
- if (item.property == "hour") {
- has_hour_option = true;
+ // 18. If hour12 is not undefined, then
+ if (maybe_get_hour12.FromJust()) {
+ // a. If hour12 is true, then
+ if (hour12) {
+ // i. If hcDefault is "h11" or "h23", then
+ if (hc_default == Intl::HourCycle::kH11 ||
+ hc_default == Intl::HourCycle::kH23) {
+ // 1. Set hc to "h11".
+ hc = Intl::HourCycle::kH11;
+ // ii. Else,
+ } else {
+ // 1. Set hc to "h12".
+ hc = Intl::HourCycle::kH12;
}
- DCHECK_NOT_NULL(input.get());
- // c. Set opt.[[<prop>]] to value.
- skeleton += item.map.find(input.get())->second;
+ // b. Else,
+ } else {
+ // ii. If hcDefault is "h11" or "h23", then
+ if (hc_default == Intl::HourCycle::kH11 ||
+ hc_default == Intl::HourCycle::kH23) {
+ // 1. Set hc to "h23".
+ hc = Intl::HourCycle::kH23;
+ // iii. Else,
+ } else {
+ // 1. Set hc to "h24".
+ hc = Intl::HourCycle::kH24;
+ }
+ }
+ }
+ date_time_format->set_hour_cycle(hc);
+
+ DateTimeStyle date_style = DateTimeStyle::kUndefined;
+ DateTimeStyle time_style = DateTimeStyle::kUndefined;
+ std::unique_ptr<icu::SimpleDateFormat> icu_date_format;
+
+ if (FLAG_harmony_intl_datetime_style) {
+ // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", Ā«
+ // "full", "long", "medium", "short" Ā», undefined).
+ Maybe<DateTimeStyle> maybe_date_style =
+ Intl::GetStringOption<DateTimeStyle>(
+ isolate, options, "dateStyle", "Intl.DateTimeFormat",
+ {"full", "long", "medium", "short"},
+ {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
+ DateTimeStyle::kShort},
+ DateTimeStyle::kUndefined);
+ MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>());
+ // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
+ // dateStyle.
+ date_style = maybe_date_style.FromJust();
+ if (date_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_date_style(date_style);
+ }
+
+ // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", Ā«
+ // "full", "long", "medium", "short" Ā»).
+ Maybe<DateTimeStyle> maybe_time_style =
+ Intl::GetStringOption<DateTimeStyle>(
+ isolate, options, "timeStyle", "Intl.DateTimeFormat",
+ {"full", "long", "medium", "short"},
+ {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
+ DateTimeStyle::kShort},
+ DateTimeStyle::kUndefined);
+ MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>());
+
+ // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
+ // timeStyle.
+ time_style = maybe_time_style.FromJust();
+ if (time_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_time_style(time_style);
+ }
+
+ // 32. If dateStyle or timeStyle are not undefined, then
+ if (date_style != DateTimeStyle::kUndefined ||
+ time_style != DateTimeStyle::kUndefined) {
+ icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
+ hc, *generator);
}
}
+ // 33. Else,
+ if (icu_date_format.get() == nullptr) {
+ bool has_hour_option = false;
+ // b. For each row of Table 5, except the header row, do
+ std::string skeleton;
+ for (const PatternData& item : GetPatternData(hc)) {
+ std::unique_ptr<char[]> input;
+ // i. Let prop be the name given in the Property column of the row.
+ // ii. Let value be ? GetOption(options, prop, "string", Ā« the strings
+ // given in the Values column of the row Ā», undefined).
+ Maybe<bool> maybe_get_option = Intl::GetStringOption(
+ isolate, options, item.property.c_str(), item.allowed_values,
+ "Intl.DateTimeFormat", &input);
+ MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
+ if (maybe_get_option.FromJust()) {
+ if (item.property == "hour") {
+ has_hour_option = true;
+ }
+ DCHECK_NOT_NULL(input.get());
+ // iii. Set opt.[[<prop>]] to value.
+ skeleton += item.map.find(input.get())->second;
+ }
+ }
- enum FormatMatcherOption { kBestFit, kBasic };
- // We implement only best fit algorithm, but still need to check
- // if the formatMatcher values are in range.
- // 25. Let matcher be ? GetOption(options, "formatMatcher", "string",
- // Ā« "basic", "best fit" Ā», "best fit").
- Maybe<FormatMatcherOption> maybe_format_matcher =
- Intl::GetStringOption<FormatMatcherOption>(
- isolate, options, "formatMatcher", "Intl.DateTimeFormat",
- {"best fit", "basic"},
- {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
- FormatMatcherOption::kBestFit);
- MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
- // TODO(ftang): uncomment the following line and handle format_matcher.
- // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
+ enum FormatMatcherOption { kBestFit, kBasic };
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ // c. Let matcher be ? GetOption(options, "formatMatcher", "string",
+ // Ā« "basic", "best fit" Ā», "best fit").
+ Maybe<FormatMatcherOption> maybe_format_matcher =
+ Intl::GetStringOption<FormatMatcherOption>(
+ isolate, options, "formatMatcher", "Intl.DateTimeFormat",
+ {"best fit", "basic"},
+ {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
+ FormatMatcherOption::kBestFit);
+ MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
+ // TODO(ftang): uncomment the following line and handle format_matcher.
+ // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
+
+ icu::UnicodeString skeleton_ustr(skeleton.c_str());
+ icu_date_format =
+ CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator);
+ if (icu_date_format.get() == nullptr) {
+ // Remove extensions and try again.
+ icu_locale = icu::Locale(icu_locale.getBaseName());
+ icu_date_format =
+ CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator);
+ if (icu_date_format.get() == nullptr) {
+ FATAL("Failed to create ICU date format, are ICU data files missing?");
+ }
+ }
- std::unique_ptr<icu::SimpleDateFormat> date_format(
- CreateICUDateFormat(isolate, icu_locale, skeleton));
- if (date_format.get() == nullptr) {
- // Remove extensions and try again.
- icu_locale = icu::Locale(icu_locale.getBaseName());
- date_format = CreateICUDateFormat(isolate, icu_locale, skeleton);
- if (date_format.get() == nullptr) {
- FATAL("Failed to create ICU date format, are ICU data files missing?");
+ // g. If dateTimeFormat.[[Hour]] is not undefined, then
+ if (!has_hour_option) {
+ // h. Else, i. Set dateTimeFormat.[[HourCycle]] to undefined.
+ date_time_format->set_hour_cycle(Intl::HourCycle::kUndefined);
}
}
// The creation of Calendar depends on timeZone so we have to put 13 after 17.
- // Also date_format is not created until here.
+ // Also icu_date_format is not created until here.
// 13. Set dateTimeFormat.[[Calendar]] to r.[[ca]].
- date_format->adoptCalendar(calendar.release());
-
- // 29. If dateTimeFormat.[[Hour]] is not undefined, then
- if (has_hour_option) {
- // a. Let hcDefault be dataLocaleData.[[hourCycle]].
- Intl::HourCycle hc_default = HourCycleDefault(date_format.get());
- // b. Let hc be dateTimeFormat.[[HourCycle]].
- Intl::HourCycle hc = hour_cycle;
- // c. If hc is null, then
- if (hc == Intl::HourCycle::kUndefined) {
- // i. Set hc to hcDefault.
- hc = hc_default;
+ icu_date_format->adoptCalendar(calendar.release());
+
+ // 12.1.1 InitializeDateTimeFormat ( dateTimeFormat, locales, options )
+ //
+ // Steps 8-9 set opt.[[hc]] to value *other than undefined*
+ // if "hour12" is set or "hourCycle" is set in the option.
+ //
+ // 9.2.6 ResolveLocale (... )
+ // Step 8.h / 8.i and 8.k
+ //
+ // An hour12 option always overrides an hourCycle option.
+ // Additionally hour12 and hourCycle both clear out any existing Unicode
+ // extension key in the input locale.
+ //
+ // See details in https://github.com/tc39/test262/pull/2035
+ if (maybe_get_hour12.FromJust() ||
+ maybe_hour_cycle.FromJust() != Intl::HourCycle::kUndefined) {
+ auto hc_extension_it = r.extensions.find("hc");
+ if (hc_extension_it != r.extensions.end()) {
+ if (date_time_format->hour_cycle() !=
+ Intl::ToHourCycle(hc_extension_it->second.c_str())) {
+ // Remove -hc- if it does not agree with what we used.
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.setKeywordValue(uloc_toLegacyKey("hc"), nullptr, status);
+ CHECK(U_SUCCESS(status));
+ }
}
- // e. Set dateTimeFormat.[[HourCycle]] to hc.
- date_time_format->set_hour_cycle(hc);
- // 30. Else
- } else {
- // a. Set dateTimeFormat.[[HourCycle]] to undefined.
- date_time_format->set_hour_cycle(Intl::HourCycle::kUndefined);
}
+
Handle<Managed<icu::Locale>> managed_locale =
Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
date_time_format->set_icu_locale(*managed_locale);
Handle<Managed<icu::SimpleDateFormat>> managed_format =
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
- std::move(date_format));
+ std::move(icu_date_format));
date_time_format->set_icu_simple_date_format(*managed_format);
return date_time_format;
@@ -1127,11 +1519,8 @@ MaybeHandle<Object> JSDateTimeFormat::FormatToParts(
return result;
}
-std::set<std::string> JSDateTimeFormat::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::DateFormat::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSDateTimeFormat::GetAvailableLocales() {
+ return Intl::GetAvailableLocalesForDateFormat();
}
Handle<String> JSDateTimeFormat::HourCycleAsString() const {
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 1e28b38add..5909258d84 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -71,11 +71,14 @@ class JSDateTimeFormat : public JSObject {
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
Handle<Object> options, RequiredOption required, DefaultsOption defaults);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
Handle<String> HourCycleAsString() const;
DECL_CAST(JSDateTimeFormat)
+ // ecma-402/#sec-properties-of-intl-datetimeformat-instances
+ enum class DateTimeStyle { kUndefined, kFull, kLong, kMedium, kShort };
+
// Layout description.
#define JS_DATE_TIME_FORMAT_FIELDS(V) \
V(kICULocaleOffset, kTaggedSize) \
@@ -92,8 +95,17 @@ class JSDateTimeFormat : public JSObject {
inline void set_hour_cycle(Intl::HourCycle hour_cycle);
inline Intl::HourCycle hour_cycle() const;
+ inline void set_date_style(DateTimeStyle date_style);
+ inline DateTimeStyle date_style() const;
+
+ inline void set_time_style(DateTimeStyle time_style);
+ inline DateTimeStyle time_style() const;
+
// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) V(HourCycleBits, Intl::HourCycle, 3, _)
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(HourCycleBits, Intl::HourCycle, 3, _) \
+ V(DateStyleBits, DateTimeStyle, 3, _) \
+ V(TimeStyleBits, DateTimeStyle, 3, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
@@ -104,6 +116,18 @@ class JSDateTimeFormat : public JSObject {
STATIC_ASSERT(Intl::HourCycle::kH23 <= HourCycleBits::kMax);
STATIC_ASSERT(Intl::HourCycle::kH24 <= HourCycleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kUndefined <= DateStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kFull <= DateStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kLong <= DateStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kMedium <= DateStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kShort <= DateStyleBits::kMax);
+
+ STATIC_ASSERT(DateTimeStyle::kUndefined <= TimeStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kFull <= TimeStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kLong <= TimeStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kMedium <= TimeStyleBits::kMax);
+ STATIC_ASSERT(DateTimeStyle::kShort <= TimeStyleBits::kMax);
+
DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
DECL_ACCESSORS(bound_format, Object)
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 53541fc997..e2a48810a9 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -146,18 +146,8 @@ class AsyncGeneratorRequest : public Struct {
DECL_ACCESSORS(value, Object)
DECL_ACCESSORS(promise, Object)
-// Layout description.
-#define ASYNC_GENERATOR_REQUEST_FIELDS(V) \
- V(kNextOffset, kTaggedSize) \
- V(kResumeModeOffset, kTaggedSize) \
- V(kValueOffset, kTaggedSize) \
- V(kPromiseOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- ASYNC_GENERATOR_REQUEST_FIELDS)
-#undef ASYNC_GENERATOR_REQUEST_FIELDS
+ TORQUE_GENERATED_ASYNC_GENERATOR_REQUEST_FIELDS)
DECL_CAST(AsyncGeneratorRequest)
DECL_PRINTER(AsyncGeneratorRequest)
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index fd7417959a..dd3f4dceb9 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -48,7 +48,7 @@ inline JSListFormat::Type JSListFormat::type() const {
return TypeBits::decode(flags());
}
-CAST_ACCESSOR(JSListFormat);
+CAST_ACCESSOR(JSListFormat)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index e6f9c76a61..dd7ab172af 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -411,15 +411,11 @@ MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
return Intl::ToString(isolate, formatted);
}
-std::set<std::string> JSListFormat::GetAvailableLocales() {
- int32_t num_locales = 0;
- // TODO(ftang): for now just use
- // icu::Locale::getAvailableLocales(count) until we migrate to
- // Intl::GetAvailableLocales().
+const std::set<std::string>& JSListFormat::GetAvailableLocales() {
+ // Since ListFormatter does not have a method to list all supported
+ // locales, use the one in icu::Locale per comments in
// ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
- const icu::Locale* icu_available_locales =
- icu::Locale::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+ return Intl::GetAvailableLocalesForLocale();
}
// ecma42 #sec-formatlisttoparts
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 1ae6fcdb84..3880f6faeb 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -49,7 +49,7 @@ class JSListFormat : public JSObject {
Isolate* isolate, Handle<JSListFormat> format_holder,
Handle<JSArray> list);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
Handle<String> StyleAsString() const;
Handle<String> TypeAsString() const;
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index 15a2082a0a..44e223ef06 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -21,9 +21,9 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject)
-ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kICULocaleOffset);
+ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
-CAST_ACCESSOR(JSLocale);
+CAST_ACCESSOR(JSLocale)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index be438a2508..94b4cb2aba 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -134,12 +134,16 @@ Handle<Object> UnicodeKeywordValue(Isolate* isolate, Handle<JSLocale> locale,
if (status == U_ILLEGAL_ARGUMENT_ERROR || value == "") {
return isolate->factory()->undefined_value();
}
+ if (value == "yes") {
+ value = "true";
+ }
return isolate->factory()->NewStringFromAsciiChecked(value.c_str());
}
bool InRange(size_t value, size_t start, size_t end) {
return (start <= value) && (value <= end);
}
+
bool InRange(char value, char start, char end) {
return (start <= value) && (value <= end);
}
@@ -163,37 +167,86 @@ bool IsDigit(const std::string& str, size_t min, size_t max) {
[](char c) -> bool { return InRange(c, '0', '9'); });
}
-bool ValidateLanguageProduction(const std::string& value) {
- // language = 2*3ALPHA ; shortest ISO 639 code
- // ["-" extlang] ; sometimes followed by
- // ; extended language subtags
- // / 4ALPHA ; or reserved for future use
- // / 5*8ALPHA ; or registered language subtag
- //
- // extlang = 3ALPHA ; selected ISO 639 codes
- // *2("-" 3ALPHA) ; permanently reserved
- // TODO(ftang) not handling the [extlang] yet
- return IsAlpha(value, 2, 8);
+bool IsAlphanum(const std::string& str, size_t min, size_t max) {
+ return IsCheckRange(str, min, max, [](char c) -> bool {
+ return InRange(c, 'a', 'z') || InRange(c, 'A', 'Z') || InRange(c, '0', '9');
+ });
+}
+
+bool IsUnicodeLanguageSubtag(const std::string& value) {
+ // unicode_language_subtag = alpha{2,3} | alpha{5,8};
+ return IsAlpha(value, 2, 3) || IsAlpha(value, 5, 8);
}
-bool ValidateScriptProduction(const std::string& value) {
- // script = 4ALPHA ; ISO 15924 code
+bool IsUnicodeScriptSubtag(const std::string& value) {
+ // unicode_script_subtag = alpha{4} ;
return IsAlpha(value, 4, 4);
}
-bool ValidateRegionProduction(const std::string& value) {
- // region = 2ALPHA ; ISO 3166-1 code
- // / 3DIGIT ; UN M.49 code
+bool IsUnicodeRegionSubtag(const std::string& value) {
+ // unicode_region_subtag = (alpha{2} | digit{3});
return IsAlpha(value, 2, 2) || IsDigit(value, 3, 3);
}
-Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
+bool IsDigitAlphanum3(const std::string& value) {
+ return value.length() == 4 && InRange(value[0], '0', '9') &&
+ IsAlphanum(value.substr(1), 3, 3);
+}
+
+bool IsUnicodeVariantSubtag(const std::string& value) {
+ // unicode_variant_subtag = (alphanum{5,8} | digit alphanum{3}) ;
+ return IsAlphanum(value, 5, 8) || IsDigitAlphanum3(value);
+}
+
+bool IsExtensionSingleton(const std::string& value) {
+ return IsAlphanum(value, 1, 1);
+}
+
+// TODO(ftang) Replace the following check w/ icu::LocaleBuilder
+// once ICU64 land in March 2019.
+bool StartsWithUnicodeLanguageId(const std::string& value) {
+ // unicode_language_id =
+ // unicode_language_subtag (sep unicode_script_subtag)?
+ // (sep unicode_region_subtag)? (sep unicode_variant_subtag)* ;
+ std::vector<std::string> tokens;
+ std::string token;
+ std::istringstream token_stream(value);
+ while (std::getline(token_stream, token, '-')) {
+ tokens.push_back(token);
+ }
+ if (tokens.size() == 0) return false;
+
+ // length >= 1
+ if (!IsUnicodeLanguageSubtag(tokens[0])) return false;
+
+ if (tokens.size() == 1) return true;
+
+ // length >= 2
+ if (IsExtensionSingleton(tokens[1])) return true;
+
+ size_t index = 1;
+ if (IsUnicodeScriptSubtag(tokens[index])) {
+ index++;
+ if (index == tokens.size()) return true;
+ }
+ if (IsUnicodeRegionSubtag(tokens[index])) {
+ index++;
+ }
+ while (index < tokens.size()) {
+ if (IsExtensionSingleton(tokens[index])) return true;
+ if (!IsUnicodeVariantSubtag(tokens[index])) return false;
+ index++;
+ }
+ return true;
+}
+
+Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Handle<JSReceiver> options) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
if (tag->length() == 0) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
- Nothing<icu::Locale>());
+ Nothing<std::string>());
}
v8::String::Utf8Value bcp47_tag(v8_isolate, v8::Utils::ToLocal(tag));
@@ -201,13 +254,18 @@ Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
CHECK_NOT_NULL(*bcp47_tag);
// 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError
// exception.
+ if (!StartsWithUnicodeLanguageId(*bcp47_tag)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<std::string>());
+ }
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale =
icu::Locale::forLanguageTag({*bcp47_tag, bcp47_tag.length()}, status);
if (U_FAILURE(status)) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<icu::Locale>());
+ Nothing<std::string>());
}
// 3. Let language be ? GetOption(options, "language", "string", undefined,
@@ -217,20 +275,15 @@ Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_language =
Intl::GetStringOption(isolate, options, "language", empty_values,
"ApplyOptionsToTag", &language_str);
- MAYBE_RETURN(maybe_language, Nothing<icu::Locale>());
+ MAYBE_RETURN(maybe_language, Nothing<std::string>());
// 4. If language is not undefined, then
if (maybe_language.FromJust()) {
- // a. If language does not match the language production, throw a RangeError
- // exception.
- // b. If language matches the grandfathered production, throw a RangeError
- // exception.
- // Currently ValidateLanguageProduction only take 2*3ALPHA / 4ALPHA /
- // 5*8ALPHA and won't take 2*3ALPHA "-" extlang so none of the grandfathered
- // will be matched.
- if (!ValidateLanguageProduction(language_str.get())) {
+ // a. If language does not match the unicode_language_subtag production,
+ // throw a RangeError exception.
+ if (!IsUnicodeLanguageSubtag(language_str.get())) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<icu::Locale>());
+ Nothing<std::string>());
}
}
// 5. Let script be ? GetOption(options, "script", "string", undefined,
@@ -239,15 +292,15 @@ Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_script =
Intl::GetStringOption(isolate, options, "script", empty_values,
"ApplyOptionsToTag", &script_str);
- MAYBE_RETURN(maybe_script, Nothing<icu::Locale>());
+ MAYBE_RETURN(maybe_script, Nothing<std::string>());
// 6. If script is not undefined, then
if (maybe_script.FromJust()) {
- // a. If script does not match the script production, throw a RangeError
- // exception.
- if (!ValidateScriptProduction(script_str.get())) {
+ // a. If script does not match the unicode_script_subtag production, throw
+ // a RangeError exception.
+ if (!IsUnicodeScriptSubtag(script_str.get())) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<icu::Locale>());
+ Nothing<std::string>());
}
}
// 7. Let region be ? GetOption(options, "region", "string", undefined,
@@ -256,79 +309,85 @@ Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_region =
Intl::GetStringOption(isolate, options, "region", empty_values,
"ApplyOptionsToTag", &region_str);
- MAYBE_RETURN(maybe_region, Nothing<icu::Locale>());
+ MAYBE_RETURN(maybe_region, Nothing<std::string>());
// 8. If region is not undefined, then
if (maybe_region.FromJust()) {
// a. If region does not match the region production, throw a RangeError
// exception.
- if (!ValidateRegionProduction(region_str.get())) {
+ if (!IsUnicodeRegionSubtag(region_str.get())) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<icu::Locale>());
+ Nothing<std::string>());
}
}
// 9. Set tag to CanonicalizeLanguageTag(tag).
+ Maybe<std::string> maybe_canonicalized =
+ Intl::CanonicalizeLanguageTag(isolate, tag);
+ MAYBE_RETURN(maybe_canonicalized, Nothing<std::string>());
+
+ std::vector<std::string> tokens;
+ std::string token;
+ std::istringstream token_stream(maybe_canonicalized.FromJust());
+ while (std::getline(token_stream, token, '-')) {
+ tokens.push_back(token);
+ }
// 10. If language is not undefined,
std::string locale_str;
if (maybe_language.FromJust()) {
- // a. Assert: tag matches the langtag production.
- // b. Set tag to tag with the substring corresponding to the language
- // production replaced by the string language.
- locale_str = language_str.get();
- } else {
- locale_str = icu_locale.getLanguage();
+ // a. Assert: tag matches the unicode_locale_id production.
+ // b. Set tag to tag with the substring corresponding to the
+ // unicode_language_subtag production replaced by the string language.
+ tokens[0] = language_str.get();
}
+
// 11. If script is not undefined, then
- const char* script_ptr = nullptr;
if (maybe_script.FromJust()) {
- // a. If tag does not contain a script production, then
- // i. Set tag to the concatenation of the language production of tag, "-",
- // script, and the rest of tag.
- // i. Set tag to tag with the substring corresponding to the script
- // production replaced by the string script.
- script_ptr = script_str.get();
- } else {
- script_ptr = icu_locale.getScript();
- }
- if (script_ptr != nullptr && strlen(script_ptr) > 0) {
- locale_str.append("-");
- locale_str.append(script_ptr);
+ // a. If tag does not contain a unicode_script_subtag production, then
+ if (tokens.size() < 2 || !IsUnicodeScriptSubtag(tokens[1])) {
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, "-", script, and the rest of tag.
+ tokens.insert(tokens.begin() + 1, script_str.get());
+ // b. Else,
+ } else {
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_script_subtag production replaced by the string script.
+ tokens[1] = script_str.get();
+ }
}
// 12. If region is not undefined, then
- const char* region_ptr = nullptr;
if (maybe_region.FromJust()) {
- // a. If tag does not contain a region production, then
- //
- // i. Set tag to the concatenation of the language production of tag, the
- // substring corresponding to the "-" script production if present, "-",
- // region, and the rest of tag.
- //
+ // a. If tag does not contain a unicode_region_subtag production, then
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, the substring corresponding to the "-"
+ // unicode_script_subtag production if present, "-", region, and
+ // the rest of tag.
// b. Else,
- //
- // i. Set tag to tag with the substring corresponding to the region
- // production replaced by the string region.
- region_ptr = region_str.get();
- } else {
- region_ptr = icu_locale.getCountry();
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_region_subtag production replaced by the string region.
+ if (tokens.size() > 1 && IsUnicodeRegionSubtag(tokens[1])) {
+ tokens[1] = region_str.get();
+ } else if (tokens.size() > 1 && IsUnicodeScriptSubtag(tokens[1])) {
+ if (tokens.size() > 2 && IsUnicodeRegionSubtag(tokens[2])) {
+ tokens[2] = region_str.get();
+ } else {
+ tokens.insert(tokens.begin() + 2, region_str.get());
+ }
+ } else {
+ tokens.insert(tokens.begin() + 1, region_str.get());
+ }
}
- std::string without_options(icu_locale.getName());
-
- // replace with values from options
- icu_locale =
- icu::Locale(locale_str.c_str(), region_ptr, icu_locale.getVariant());
- locale_str = icu_locale.getName();
-
- // Append extensions from tag
- size_t others = without_options.find("@");
- if (others != std::string::npos) {
- locale_str += without_options.substr(others);
+ std::string replaced;
+ for (auto it = tokens.begin(); it != tokens.end(); it++) {
+ replaced += *it;
+ if (it + 1 != tokens.end()) {
+ replaced += '-';
+ }
}
// 13. Return CanonicalizeLanguageTag(tag).
- icu_locale = icu::Locale::createCanonical(locale_str.c_str());
- return Just(icu_locale);
+ return Intl::CanonicalizeLanguageTag(isolate, replaced);
}
} // namespace
@@ -337,10 +396,17 @@ MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
Handle<JSLocale> locale,
Handle<String> locale_str,
Handle<JSReceiver> options) {
- Maybe<icu::Locale> maybe_locale =
+ Maybe<std::string> maybe_locale =
ApplyOptionsToTag(isolate, locale_str, options);
MAYBE_RETURN(maybe_locale, MaybeHandle<JSLocale>());
- icu::Locale icu_locale = maybe_locale.FromJust();
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale =
+ icu::Locale::forLanguageTag(maybe_locale.FromJust().c_str(), status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters),
+ JSLocale);
+ }
Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, &icu_locale);
MAYBE_RETURN(error, MaybeHandle<JSLocale>());
@@ -364,6 +430,10 @@ Handle<String> MorphLocale(Isolate* isolate, String locale,
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale =
icu::Locale::forLanguageTag(locale.ToCString().get(), status);
+ // TODO(ftang): Remove the following lines after ICU-8420 fixed.
+ // Due to ICU-8420 "und" is turn into "" by forLanguageTag,
+ // we have to work around to use icu::Locale("und") directly
+ if (icu_locale.getName()[0] == '\0') icu_locale = icu::Locale("und");
CHECK(U_SUCCESS(status));
CHECK(!icu_locale.isBogus());
(*morph_func)(&icu_locale, &status);
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 077abda0c9..3edf6f1ea3 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -50,7 +50,7 @@ inline JSNumberFormat::CurrencyDisplay JSNumberFormat::currency_display()
return CurrencyDisplayBits::decode(flags());
}
-CAST_ACCESSOR(JSNumberFormat);
+CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 646cbed8e7..fda9a940d6 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -109,19 +109,20 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
// [[MinimumSignificantDigits]] "minimumSignificantDigits"
// [[MaximumSignificantDigits]] "maximumSignificantDigits"
// [[UseGrouping]] "useGrouping"
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->locale_string(), locale, kDontThrow)
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->locale_string(), locale,
+ Just(kDontThrow))
.FromJust());
if (!numbering_system.empty()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->numberingSystem_string(),
factory->NewStringFromAsciiChecked(numbering_system.c_str()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
- number_format_holder->StyleAsString(), kDontThrow)
+ number_format_holder->StyleAsString(), Just(kDontThrow))
.FromJust());
if (number_format_holder->style() == Style::CURRENCY) {
icu::UnicodeString currency(number_format->getCurrency());
@@ -133,49 +134,49 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
currency.length()))
.ToHandleChecked(),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->currencyDisplay_string(),
- number_format_holder->CurrencyDisplayAsString(), kDontThrow)
+ number_format_holder->CurrencyDisplayAsString(), Just(kDontThrow))
.FromJust());
}
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->minimumIntegerDigits_string(),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
CHECK(
JSReceiver::CreateDataProperty(
isolate, options, factory->minimumFractionDigits_string(),
factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
CHECK(
JSReceiver::CreateDataProperty(
isolate, options, factory->maximumFractionDigits_string(),
factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
if (decimal_format->areSignificantDigitsUsed()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->minimumSignificantDigits_string(),
factory->NewNumberFromInt(
decimal_format->getMinimumSignificantDigits()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->maximumSignificantDigits_string(),
factory->NewNumberFromInt(
decimal_format->getMaximumSignificantDigits()),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->useGrouping_string(),
factory->ToBoolean((number_format->isGroupingUsed() == TRUE)),
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
return options;
}
@@ -461,10 +462,45 @@ Handle<String> JSNumberFormat::CurrencyDisplayAsString() const {
}
}
-MaybeHandle<String> JSNumberFormat::FormatNumber(
- Isolate* isolate, const icu::NumberFormat& number_format, double number) {
+namespace {
+Maybe<icu::UnicodeString> IcuFormatNumber(
+ Isolate* isolate, const icu::NumberFormat& number_format,
+ Handle<Object> numeric_obj, icu::FieldPositionIterator* fp_iter) {
icu::UnicodeString result;
- number_format.format(number, result);
+ // If it is BigInt, handle it differently.
+ UErrorCode status = U_ZERO_ERROR;
+ if (numeric_obj->IsBigInt()) {
+ Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
+ Handle<String> big_int_string;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
+ BigInt::ToString(isolate, big_int),
+ Nothing<icu::UnicodeString>());
+ number_format.format(
+ {big_int_string->ToCString().get(), big_int_string->length()}, result,
+ fp_iter, status);
+ } else {
+ double number = numeric_obj->Number();
+ number_format.format(number, result, fp_iter, status);
+ }
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::UnicodeString>());
+ }
+ return Just(result);
+}
+
+} // namespace
+
+MaybeHandle<String> JSNumberFormat::FormatNumeric(
+ Isolate* isolate, const icu::NumberFormat& number_format,
+ Handle<Object> numeric_obj) {
+ DCHECK(numeric_obj->IsNumeric());
+
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, number_format, numeric_obj, nullptr);
+ MAYBE_RETURN(maybe_format, Handle<String>());
+ icu::UnicodeString result = maybe_format.FromJust();
return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
@@ -490,13 +526,22 @@ bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
// The list comes from third_party/icu/source/i18n/unicode/unum.h.
// They're mapped to NumberFormat part types mentioned throughout
// https://tc39.github.io/ecma402/#sec-partitionnumberpattern .
-Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
+Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id,
+ Handle<Object> numeric_obj,
Isolate* isolate) {
+ DCHECK(numeric_obj->IsNumeric());
switch (static_cast<UNumberFormatFields>(field_id)) {
case UNUM_INTEGER_FIELD:
- if (std::isfinite(number)) return isolate->factory()->integer_string();
- if (std::isnan(number)) return isolate->factory()->nan_string();
- return isolate->factory()->infinity_string();
+ if (numeric_obj->IsBigInt()) {
+ // Neither NaN nor Infinite could be stored into BigInt
+ // so just return integer.
+ return isolate->factory()->integer_string();
+ } else {
+ double number = numeric_obj->Number();
+ if (std::isfinite(number)) return isolate->factory()->integer_string();
+ if (std::isnan(number)) return isolate->factory()->nan_string();
+ return isolate->factory()->infinity_string();
+ }
case UNUM_FRACTION_FIELD:
return isolate->factory()->fraction_string();
case UNUM_DECIMAL_SEPARATOR_FIELD:
@@ -508,9 +553,15 @@ Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
case UNUM_PERCENT_FIELD:
return isolate->factory()->percentSign_string();
case UNUM_SIGN_FIELD:
- return number < 0 ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
-
+ if (numeric_obj->IsBigInt()) {
+ Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
+ return big_int->IsNegative() ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
+ } else {
+ double number = numeric_obj->Number();
+ return number < 0 ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
+ }
case UNUM_EXPONENT_SYMBOL_FIELD:
case UNUM_EXPONENT_SIGN_FIELD:
case UNUM_EXPONENT_FIELD:
@@ -625,16 +676,15 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
Handle<JSArray> result,
int start_index,
- const icu::NumberFormat& fmt,
- double number, Handle<String> unit) {
- icu::UnicodeString formatted;
+ const icu::NumberFormat& number_format,
+ Handle<Object> numeric_obj,
+ Handle<String> unit) {
+ DCHECK(numeric_obj->IsNumeric());
icu::FieldPositionIterator fp_iter;
- UErrorCode status = U_ZERO_ERROR;
- fmt.format(number, formatted, &fp_iter, status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewTypeError(MessageTemplate::kIcuError), Nothing<int>());
- }
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, number_format, numeric_obj, &fp_iter);
+ MAYBE_RETURN(maybe_format, Nothing<int>());
+ icu::UnicodeString formatted = maybe_format.FromJust();
int32_t length = formatted.length();
int index = start_index;
@@ -662,7 +712,7 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
Handle<String> field_type_string =
part.field_id == -1
? isolate->factory()->literal_string()
- : IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
+ : IcuNumberFieldIdToNumberType(part.field_id, numeric_obj, isolate);
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, substring,
@@ -681,7 +731,9 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
}
MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
- Isolate* isolate, Handle<JSNumberFormat> number_format, double number) {
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> numeric_obj) {
+ CHECK(numeric_obj->IsNumeric());
Factory* factory = isolate->factory();
icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
CHECK_NOT_NULL(fmt);
@@ -689,17 +741,16 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
Handle<JSArray> result = factory->NewJSArray(0);
Maybe<int> maybe_format_to_parts = JSNumberFormat::FormatToParts(
- isolate, result, 0, *fmt, number, Handle<String>());
+ isolate, result, 0, *fmt, numeric_obj, Handle<String>());
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
}
-std::set<std::string> JSNumberFormat::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::NumberFormat::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSNumberFormat::GetAvailableLocales() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::NumberFormat>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 03071a25e4..0f0f6342ac 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -44,7 +44,8 @@ class JSNumberFormat : public JSObject {
Handle<JSNumberFormat> number_format);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatToParts(
- Isolate* isolate, Handle<JSNumberFormat> number_format, double number);
+ Isolate* isolate, Handle<JSNumberFormat> number_format,
+ Handle<Object> numeric_obj);
// A utility function used by the above JSNumberFormat::FormatToParts()
// and JSRelativeTimeFormat::FormatToParts().
@@ -56,12 +57,14 @@ class JSNumberFormat : public JSObject {
// unit as "unit" to each added object.
V8_WARN_UNUSED_RESULT static Maybe<int> FormatToParts(
Isolate* isolate, Handle<JSArray> result, int start_index,
- const icu::NumberFormat& fmt, double number, Handle<String> unit);
+ const icu::NumberFormat& fmt, Handle<Object> numeric_obj,
+ Handle<String> unit);
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumber(
- Isolate* isolate, const icu::NumberFormat& number_format, double number);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumeric(
+ Isolate* isolate, const icu::NumberFormat& number_format,
+ Handle<Object> numeric_obj);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
Handle<String> StyleAsString() const;
Handle<String> CurrencyDisplayAsString() const;
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 1de9a10c2a..3b4313b54a 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -8,17 +8,19 @@
#include "src/objects/js-objects.h"
#include "src/feedback-vector.h"
+#include "src/field-index-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/keys.h"
#include "src/lookup-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/property-array-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/smi-inl.h"
-#include "src/prototype.h"
+#include "src/prototype-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -129,7 +131,7 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
FixedArrayBase JSObject::elements() const {
- Object array = READ_FIELD(this, kElementsOffset);
+ Object array = READ_FIELD(*this, kElementsOffset);
return FixedArrayBase::cast(array);
}
@@ -267,16 +269,8 @@ int JSObject::GetHeaderSize(const Map map) {
// static
int JSObject::GetEmbedderFieldsStartOffset(const Map map) {
- // Embedder fields are located after the header size rounded up to the
- // kSystemPointerSize, whereas in-object properties are at the end of the
- // object.
- int header_size = GetHeaderSize(map);
- if (kTaggedSize == kSystemPointerSize) {
- DCHECK(IsAligned(header_size, kSystemPointerSize));
- return header_size;
- } else {
- return RoundUp(header_size, kSystemPointerSize);
- }
+ // Embedder fields are located after the object header.
+ return GetHeaderSize(map);
}
int JSObject::GetEmbedderFieldsStartOffset() {
@@ -287,12 +281,13 @@ int JSObject::GetEmbedderFieldsStartOffset() {
int JSObject::GetEmbedderFieldCount(const Map map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
- // Embedder fields are located after the header size rounded up to the
- // kSystemPointerSize, whereas in-object properties are at the end of the
- // object. We don't have to round up the header size here because division by
- // kEmbedderDataSlotSizeInTaggedSlots will swallow potential padding in case
- // of (kTaggedSize != kSystemPointerSize) anyway.
- return (((instance_size - GetHeaderSize(map)) >> kTaggedSizeLog2) -
+ // Embedder fields are located after the object header, whereas in-object
+ // properties are located at the end of the object. We don't have to round up
+ // the header size here because division by kEmbedderDataSlotSizeInTaggedSlots
+ // will swallow potential padding in case of (kTaggedSize !=
+ // kSystemPointerSize) anyway.
+ return (((instance_size - GetEmbedderFieldsStartOffset(map)) >>
+ kTaggedSizeLog2) -
map->GetInObjectProperties()) /
kEmbedderDataSlotSizeInTaggedSlots;
}
@@ -330,7 +325,7 @@ bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
Object JSObject::RawFastPropertyAt(FieldIndex index) {
DCHECK(!IsUnboxedDoubleField(index));
if (index.is_inobject()) {
- return READ_FIELD(this, index.offset());
+ return READ_FIELD(*this, index.offset());
} else {
return property_array()->get(index.outobject_array_index());
}
@@ -338,12 +333,12 @@ Object JSObject::RawFastPropertyAt(FieldIndex index) {
double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_DOUBLE_FIELD(this, index.offset());
+ return READ_DOUBLE_FIELD(*this, index.offset());
}
uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_UINT64_FIELD(this, index.offset());
+ return READ_UINT64_FIELD(*this, index.offset());
}
void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value) {
@@ -430,9 +425,10 @@ Object JSObject::InObjectPropertyAtPut(int index, Object value,
void JSObject::InitializeBody(Map map, int start_offset,
Object pre_allocated_value, Object filler_value) {
- DCHECK(!filler_value->IsHeapObject() || !Heap::InNewSpace(filler_value));
- DCHECK(!pre_allocated_value->IsHeapObject() ||
- !Heap::InNewSpace(pre_allocated_value));
+ DCHECK_IMPLIES(filler_value->IsHeapObject(),
+ !ObjectInYoungGeneration(filler_value));
+ DCHECK_IMPLIES(pre_allocated_value->IsHeapObject(),
+ !ObjectInYoungGeneration(pre_allocated_value));
int size = map->instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
@@ -451,7 +447,7 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
Object JSBoundFunction::raw_bound_target_function() const {
- return READ_FIELD(this, kBoundTargetFunctionOffset);
+ return READ_FIELD(*this, kBoundTargetFunctionOffset);
}
ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
@@ -461,7 +457,7 @@ ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
-ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
+ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
@@ -545,13 +541,13 @@ Code JSFunction::code() const {
}
void JSFunction::set_code(Code value) {
- DCHECK(!Heap::InNewSpace(value));
+ DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
MarkingBarrier(*this, RawField(kCodeOffset), value);
}
void JSFunction::set_code_no_write_barrier(Code value) {
- DCHECK(!Heap::InNewSpace(value));
+ DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
}
@@ -601,7 +597,9 @@ bool JSFunction::has_context() const {
JSGlobalProxy JSFunction::global_proxy() { return context()->global_proxy(); }
-Context JSFunction::native_context() { return context()->native_context(); }
+NativeContext JSFunction::native_context() {
+ return context()->native_context();
+}
void JSFunction::set_context(Object value) {
DCHECK(value->IsUndefined() || value->IsContext());
@@ -711,11 +709,11 @@ ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
MessageTemplate JSMessageObject::type() const {
- Object value = READ_FIELD(this, kTypeOffset);
+ Object value = READ_FIELD(*this, kTypeOffset);
return MessageTemplateFromInt(Smi::ToInt(value));
}
void JSMessageObject::set_type(MessageTemplate value) {
- WRITE_FIELD(this, kTypeOffset, Smi::FromInt(static_cast<int>(value)));
+ WRITE_FIELD(*this, kTypeOffset, Smi::FromInt(static_cast<int>(value)));
}
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
@@ -728,7 +726,7 @@ ElementsKind JSObject::GetElementsKind() const {
ElementsKind kind = map()->elements_kind();
#if VERIFY_HEAP && DEBUG
FixedArrayBase fixed_array =
- FixedArrayBase::unchecked_cast(READ_FIELD(this, kElementsOffset));
+ FixedArrayBase::unchecked_cast(READ_FIELD(*this, kElementsOffset));
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
@@ -783,6 +781,10 @@ bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
+bool JSObject::HasPackedElements() {
+ return GetElementsKind() == PACKED_ELEMENTS;
+}
+
bool JSObject::HasFastArgumentsElements() {
return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -846,13 +848,13 @@ NumberDictionary JSObject::element_dictionary() {
void JSReceiver::initialize_properties() {
ReadOnlyRoots roots = GetReadOnlyRoots();
- DCHECK(!Heap::InNewSpace(roots.empty_fixed_array()));
- DCHECK(!Heap::InNewSpace(roots.empty_property_dictionary()));
+ DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
+ DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
if (map()->is_dictionary_map()) {
- WRITE_FIELD(this, kPropertiesOrHashOffset,
+ WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
} else {
- WRITE_FIELD(this, kPropertiesOrHashOffset, roots.empty_fixed_array());
+ WRITE_FIELD(*this, kPropertiesOrHashOffset, roots.empty_fixed_array());
}
}
@@ -975,6 +977,34 @@ ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+static inline bool ShouldConvertToSlowElements(JSObject object,
+ uint32_t capacity,
+ uint32_t index,
+ uint32_t* new_capacity) {
+ STATIC_ASSERT(JSObject::kMaxUncheckedOldFastElementsLength <=
+ JSObject::kMaxUncheckedFastElementsLength);
+ if (index < capacity) {
+ *new_capacity = capacity;
+ return false;
+ }
+ if (index - capacity >= JSObject::kMaxGap) return true;
+ *new_capacity = JSObject::NewElementsCapacity(index + 1);
+ DCHECK_LT(index, *new_capacity);
+ // TODO(ulan): Check if it works with young large objects.
+ if (*new_capacity <= JSObject::kMaxUncheckedOldFastElementsLength ||
+ (*new_capacity <= JSObject::kMaxUncheckedFastElementsLength &&
+ ObjectInYoungGeneration(object))) {
+ return false;
+ }
+ // If the fast-case backing storage takes up much more memory than a
+ // dictionary backing storage would, the object should have slow elements.
+ int used_elements = object->GetFastElementsUsage();
+ uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(used_elements) *
+ NumberDictionary::kEntrySize;
+ return size_threshold <= *new_capacity;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
new file mode 100644
index 0000000000..f515a84599
--- /dev/null
+++ b/deps/v8/src/objects/js-objects.cc
@@ -0,0 +1,5804 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/js-objects.h"
+
+#include "src/api-arguments-inl.h"
+#include "src/arguments.h"
+#include "src/bootstrapper.h"
+#include "src/compiler.h"
+#include "src/counters.h"
+#include "src/date.h"
+#include "src/elements.h"
+#include "src/field-type.h"
+#include "src/handles-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/layout-descriptor.h"
+#include "src/log.h"
+#include "src/lookup.h"
+#include "src/maybe-handles.h"
+#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/dictionary.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-array-buffer.h"
+#include "src/objects/js-array-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator.h"
+#include "src/objects/js-collator.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-collection.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-date-time-format.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-generator-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-list-format.h"
+#include "src/objects/js-locale.h"
+#include "src/objects/js-number-format.h"
+#include "src/objects/js-plural-rules.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-promise.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segment-iterator.h"
+#include "src/objects/js-segmenter.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/module.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell.h"
+#include "src/objects/prototype-info.h"
+#include "src/objects/shared-function-info.h"
+#include "src/ostreams.h"
+#include "src/property-descriptor.h"
+#include "src/property.h"
+#include "src/prototype.h"
+#include "src/string-builder-inl.h"
+#include "src/string-stream.h"
+#include "src/transitions.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName());
+ case LookupIterator::INTERCEPTOR: {
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithInterceptor(it);
+ if (result.IsNothing()) return Nothing<bool>();
+ if (result.FromJust() != ABSENT) return Just(true);
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK: {
+ if (it->HasAccess()) break;
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ if (result.IsNothing()) return Nothing<bool>();
+ return Just(result.FromJust() != ABSENT);
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ // TypedArray out-of-bounds access.
+ return Just(false);
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return Just(true);
+ }
+ }
+ return Just(false);
+}
+
+// static
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSModuleNamespace()) {
+ PropertyDescriptor desc;
+ return JSReceiver::GetOwnPropertyDescriptor(object->GetIsolate(), object,
+ name, &desc);
+ }
+
+ if (object->IsJSObject()) { // Shortcut.
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
+ return HasProperty(&it);
+ }
+
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetOwnPropertyAttributes(object, name);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ return Just(attributes.FromJust() != ABSENT);
+}
+
+Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ // Support calling this method without an active context, but refuse
+ // access to access-checked objects in that case.
+ if (!it->isolate()->context().is_null() && it->HasAccess()) continue;
+ V8_FALLTHROUGH;
+ case LookupIterator::JSPROXY:
+ it->NotFound();
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::ACCESSOR:
+ // TODO(verwaest): For now this doesn't call into AccessorInfo, since
+ // clients don't need it. Update once relevant.
+ it->NotFound();
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return it->isolate()->factory()->undefined_value();
+ case LookupIterator::DATA:
+ return it->GetDataValue();
+ }
+ }
+ return it->isolate()->factory()->undefined_value();
+}
+
+// static
+Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> proto) {
+ PrototypeIterator iter(isolate, object, kStartAtReceiver);
+ while (true) {
+ if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
+ if (iter.IsAtEnd()) return Just(false);
+ if (PrototypeIterator::GetCurrent(iter).is_identical_to(proto)) {
+ return Just(true);
+ }
+ }
+}
+
+namespace {
+
+bool HasExcludedProperty(
+ const ScopedVector<Handle<Object>>* excluded_properties,
+ Handle<Object> search_element) {
+ // TODO(gsathya): Change this to be a hashtable.
+ for (int i = 0; i < excluded_properties->length(); i++) {
+ if (search_element->SameValue(*excluded_properties->at(i))) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
+ Handle<JSReceiver> target, Handle<Object> source,
+ const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
+ // Non-empty strings are the only non-JSReceivers that need to be handled
+ // explicitly by Object.assign.
+ if (!source->IsJSReceiver()) {
+ return Just(!source->IsString() || String::cast(*source)->length() == 0);
+ }
+
+ // If the target is deprecated, the object will be updated on first store. If
+ // the source for that store equals the target, this will invalidate the
+ // cached representation of the source. Preventively upgrade the target.
+ // Do this on each iteration since any property load could cause deprecation.
+ if (target->map()->is_deprecated()) {
+ JSObject::MigrateInstance(Handle<JSObject>::cast(target));
+ }
+
+ Isolate* isolate = target->GetIsolate();
+ Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> from = Handle<JSObject>::cast(source);
+ if (from->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
+ return Just(false);
+ }
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int length = map->NumberOfOwnDescriptors();
+
+ bool stable = true;
+
+ for (int i = 0; i < length; i++) {
+ Handle<Name> next_key(descriptors->GetKey(i), isolate);
+ Handle<Object> prop_value;
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetStrongValue(i), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ prop_value = JSObject::FastPropertyAt(from, representation, index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value,
+ JSReceiver::GetProperty(isolate, from, next_key), Nothing<bool>());
+ stable = from->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(from, next_key, from,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+
+ if (use_set) {
+ LookupIterator it(target, next_key, target);
+ Maybe<bool> result =
+ Object::SetProperty(&it, prop_value, StoreOrigin::kNamed,
+ Just(ShouldThrow::kThrowOnError));
+ if (result.IsNothing()) return result;
+ if (stable) stable = from->map() == *map;
+ } else {
+ if (excluded_properties != nullptr &&
+ HasExcludedProperty(excluded_properties, next_key)) {
+ continue;
+ }
+
+ // 4a ii 2. Perform ? CreateDataProperty(target, nextKey, propValue).
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, target, next_key, &success, LookupIterator::OWN);
+ CHECK(success);
+ CHECK(JSObject::CreateDataProperty(&it, prop_value, Just(kThrowOnError))
+ .FromJust());
+ }
+ }
+
+ return Just(true);
+}
+} // namespace
+
+// static
+Maybe<bool> JSReceiver::SetOrCopyDataProperties(
+ Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+ const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
+ Maybe<bool> fast_assign =
+ FastAssign(target, source, excluded_properties, use_set);
+ if (fast_assign.IsNothing()) return Nothing<bool>();
+ if (fast_assign.FromJust()) return Just(true);
+
+ Handle<JSReceiver> from = Object::ToObject(isolate, source).ToHandleChecked();
+ // 3b. Let keys be ? from.[[OwnPropertyKeys]]().
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kKeepNumbers),
+ Nothing<bool>());
+
+ // 4. Repeat for each element nextKey of keys in List order,
+ for (int j = 0; j < keys->length(); ++j) {
+ Handle<Object> next_key(keys->get(j), isolate);
+ // 4a i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+ if (found.IsNothing()) return Nothing<bool>();
+ // 4a ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+ if (found.FromJust() && desc.enumerable()) {
+ // 4a ii 1. Let propValue be ? Get(from, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value,
+ Runtime::GetObjectProperty(isolate, from, next_key), Nothing<bool>());
+
+ if (use_set) {
+ // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+ Handle<Object> status;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, status,
+ Runtime::SetObjectProperty(isolate, target, next_key, prop_value,
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError)),
+ Nothing<bool>());
+ } else {
+ if (excluded_properties != nullptr &&
+ HasExcludedProperty(excluded_properties, next_key)) {
+ continue;
+ }
+
+ // 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, target, next_key, &success, LookupIterator::OWN);
+ CHECK(success);
+ CHECK(JSObject::CreateDataProperty(&it, prop_value, Just(kThrowOnError))
+ .FromJust());
+ }
+ }
+ }
+
+ return Just(true);
+}
+
+String JSReceiver::class_name() {
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ if (IsFunction()) return roots.Function_string();
+ if (IsJSArgumentsObject()) return roots.Arguments_string();
+ if (IsJSArray()) return roots.Array_string();
+ if (IsJSArrayBuffer()) {
+ if (JSArrayBuffer::cast(*this)->is_shared()) {
+ return roots.SharedArrayBuffer_string();
+ }
+ return roots.ArrayBuffer_string();
+ }
+ if (IsJSArrayIterator()) return roots.ArrayIterator_string();
+ if (IsJSDate()) return roots.Date_string();
+ if (IsJSError()) return roots.Error_string();
+ if (IsJSGeneratorObject()) return roots.Generator_string();
+ if (IsJSMap()) return roots.Map_string();
+ if (IsJSMapIterator()) return roots.MapIterator_string();
+ if (IsJSProxy()) {
+ return map()->is_callable() ? roots.Function_string()
+ : roots.Object_string();
+ }
+ if (IsJSRegExp()) return roots.RegExp_string();
+ if (IsJSSet()) return roots.Set_string();
+ if (IsJSSetIterator()) return roots.SetIterator_string();
+ if (IsJSTypedArray()) {
+#define SWITCH_KIND(Type, type, TYPE, ctype) \
+ if (map()->elements_kind() == TYPE##_ELEMENTS) { \
+ return roots.Type##Array_string(); \
+ }
+ TYPED_ARRAYS(SWITCH_KIND)
+#undef SWITCH_KIND
+ }
+ if (IsJSValue()) {
+ Object value = JSValue::cast(*this)->value();
+ if (value->IsBoolean()) return roots.Boolean_string();
+ if (value->IsString()) return roots.String_string();
+ if (value->IsNumber()) return roots.Number_string();
+ if (value->IsBigInt()) return roots.BigInt_string();
+ if (value->IsSymbol()) return roots.Symbol_string();
+ if (value->IsScript()) return roots.Script_string();
+ UNREACHABLE();
+ }
+ if (IsJSWeakMap()) return roots.WeakMap_string();
+ if (IsJSWeakSet()) return roots.WeakSet_string();
+ if (IsJSGlobalProxy()) return roots.global_string();
+
+ Object maybe_constructor = map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ if (constructor->shared()->IsApiFunction()) {
+ maybe_constructor = constructor->shared()->get_api_func_data();
+ }
+ }
+
+ if (maybe_constructor->IsFunctionTemplateInfo()) {
+ FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
+ if (info->class_name()->IsString()) return String::cast(info->class_name());
+ }
+
+ return roots.Object_string();
+}
+
+namespace {
+std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
+ Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+
+ // If the object was instantiated simply with base == new.target, the
+ // constructor on the map provides the most accurate name.
+ // Don't provide the info for prototypes, since their constructors are
+ // reclaimed and replaced by Object in OptimizeAsPrototype.
+ if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
+ !receiver->map()->is_prototype_map()) {
+ Object maybe_constructor = receiver->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ String name = constructor->shared()->DebugName();
+ if (name->length() != 0 &&
+ !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ return std::make_pair(handle(constructor, isolate),
+ handle(name, isolate));
+ }
+ } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
+ if (info->class_name()->IsString()) {
+ return std::make_pair(
+ MaybeHandle<JSFunction>(),
+ handle(String::cast(info->class_name()), isolate));
+ }
+ }
+ }
+
+ Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ if (maybe_tag->IsString())
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ Handle<String>::cast(maybe_tag));
+
+ PrototypeIterator iter(isolate, receiver);
+ if (iter.IsAtEnd()) {
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(receiver->class_name(), isolate));
+ }
+
+ Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction constructor = JSFunction::cast(*maybe_constructor);
+ String name = constructor->shared()->DebugName();
+
+ if (name->length() != 0 &&
+ !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ return std::make_pair(handle(constructor, isolate),
+ handle(name, isolate));
+ }
+ }
+
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(receiver->class_name(), isolate));
+}
+} // anonymous namespace
+
+// static
+MaybeHandle<JSFunction> JSReceiver::GetConstructor(
+ Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(receiver).first;
+}
+
+// static
+Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(receiver).second;
+}
+
+Handle<NativeContext> JSReceiver::GetCreationContext() {
+ JSReceiver receiver = *this;
+ // Externals are JSObjects with null as a constructor.
+ DCHECK(!receiver->IsExternal(GetIsolate()));
+ Object constructor = receiver->map()->GetConstructor();
+ JSFunction function;
+ if (constructor->IsJSFunction()) {
+ function = JSFunction::cast(constructor);
+ } else if (constructor->IsFunctionTemplateInfo()) {
+ // Remote objects don't have a creation context.
+ return Handle<NativeContext>::null();
+ } else if (receiver->IsJSGeneratorObject()) {
+ function = JSGeneratorObject::cast(receiver)->function();
+ } else {
+ // Functions have null as a constructor,
+ // but any JSFunction knows its context immediately.
+ CHECK(receiver->IsJSFunction());
+ function = JSFunction::cast(receiver);
+ }
+
+ return function->has_context()
+ ? Handle<NativeContext>(function->context()->native_context(),
+ receiver->GetIsolate())
+ : Handle<NativeContext>::null();
+}
+
+// static
+MaybeHandle<NativeContext> JSReceiver::GetFunctionRealm(
+ Handle<JSReceiver> receiver) {
+ if (receiver->IsJSProxy()) {
+ return JSProxy::GetFunctionRealm(Handle<JSProxy>::cast(receiver));
+ }
+
+ if (receiver->IsJSFunction()) {
+ return JSFunction::GetFunctionRealm(Handle<JSFunction>::cast(receiver));
+ }
+
+ if (receiver->IsJSBoundFunction()) {
+ return JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction>::cast(receiver));
+ }
+
+ return JSObject::GetFunctionRealm(Handle<JSObject>::cast(receiver));
+}
+
+// static
+MaybeHandle<NativeContext> JSReceiver::GetContextForMicrotask(
+ Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+ while (receiver->IsJSBoundFunction() || receiver->IsJSProxy()) {
+ if (receiver->IsJSBoundFunction()) {
+ receiver = handle(
+ Handle<JSBoundFunction>::cast(receiver)->bound_target_function(),
+ isolate);
+ } else {
+ DCHECK(receiver->IsJSProxy());
+ Handle<Object> target(Handle<JSProxy>::cast(receiver)->target(), isolate);
+ if (!target->IsJSReceiver()) return MaybeHandle<NativeContext>();
+ receiver = Handle<JSReceiver>::cast(target);
+ }
+ }
+
+ if (!receiver->IsJSFunction()) return MaybeHandle<NativeContext>();
+ return handle(Handle<JSFunction>::cast(receiver)->native_context(), isolate);
+}
+
+Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
+ LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return JSProxy::GetPropertyAttributes(it);
+ case LookupIterator::INTERCEPTOR: {
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithInterceptor(it);
+ if (result.IsNothing()) return result;
+ if (result.FromJust() != ABSENT) return result;
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess()) break;
+ return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return Just(ABSENT);
+ case LookupIterator::ACCESSOR:
+ if (it->GetHolder<Object>()->IsJSModuleNamespace()) {
+ return JSModuleNamespace::GetPropertyAttributes(it);
+ } else {
+ return Just(it->property_attributes());
+ }
+ case LookupIterator::DATA:
+ return Just(it->property_attributes());
+ }
+ }
+ return Just(ABSENT);
+}
+
+namespace {
+
+Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+
+ ReadOnlyRoots roots = properties->GetReadOnlyRoots();
+ if (properties == roots.empty_fixed_array() ||
+ properties == roots.empty_property_array() ||
+ properties == roots.empty_property_dictionary()) {
+ return Smi::FromInt(hash);
+ }
+
+ if (properties->IsPropertyArray()) {
+ PropertyArray::cast(properties)->SetHash(hash);
+ DCHECK_LT(0, PropertyArray::cast(properties)->length());
+ return properties;
+ }
+
+ if (properties->IsGlobalDictionary()) {
+ GlobalDictionary::cast(properties)->SetHash(hash);
+ return properties;
+ }
+
+ DCHECK(properties->IsNameDictionary());
+ NameDictionary::cast(properties)->SetHash(hash);
+ return properties;
+}
+
+int GetIdentityHashHelper(JSReceiver object) {
+ DisallowHeapAllocation no_gc;
+ Object properties = object->raw_properties_or_hash();
+ if (properties->IsSmi()) {
+ return Smi::ToInt(properties);
+ }
+
+ if (properties->IsPropertyArray()) {
+ return PropertyArray::cast(properties)->Hash();
+ }
+
+ if (properties->IsNameDictionary()) {
+ return NameDictionary::cast(properties)->Hash();
+ }
+
+ if (properties->IsGlobalDictionary()) {
+ return GlobalDictionary::cast(properties)->Hash();
+ }
+
+#ifdef DEBUG
+ ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ DCHECK(properties == roots.empty_fixed_array() ||
+ properties == roots.empty_property_dictionary());
+#endif
+
+ return PropertyArray::kNoHashSentinel;
+}
+} // namespace
+
+void JSReceiver::SetIdentityHash(int hash) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+
+ HeapObject existing_properties = HeapObject::cast(raw_properties_or_hash());
+ Object new_properties = SetHashAndUpdateProperties(existing_properties, hash);
+ set_raw_properties_or_hash(new_properties);
+}
+
+void JSReceiver::SetProperties(HeapObject properties) {
+ DCHECK_IMPLIES(properties->IsPropertyArray() &&
+ PropertyArray::cast(properties)->length() == 0,
+ properties == GetReadOnlyRoots().empty_property_array());
+ DisallowHeapAllocation no_gc;
+ int hash = GetIdentityHashHelper(*this);
+ Object new_properties = properties;
+
+ // TODO(cbruni): Make GetIdentityHashHelper return a bool so that we
+ // don't have to manually compare against kNoHashSentinel.
+ if (hash != PropertyArray::kNoHashSentinel) {
+ new_properties = SetHashAndUpdateProperties(properties, hash);
+ }
+
+ set_raw_properties_or_hash(new_properties);
+}
+
+Object JSReceiver::GetIdentityHash() {
+ DisallowHeapAllocation no_gc;
+
+ int hash = GetIdentityHashHelper(*this);
+ if (hash == PropertyArray::kNoHashSentinel) {
+ return GetReadOnlyRoots().undefined_value();
+ }
+
+ return Smi::FromInt(hash);
+}
+
+// static
+Smi JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver key) {
+ DisallowHeapAllocation no_gc;
+ int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
+ DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
+
+ key->SetIdentityHash(hash);
+ return Smi::FromInt(hash);
+}
+
+Smi JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+
+ int hash = GetIdentityHashHelper(*this);
+ if (hash != PropertyArray::kNoHashSentinel) {
+ return Smi::FromInt(hash);
+ }
+
+ return JSReceiver::CreateIdentityHash(isolate, *this);
+}
+
+void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
+ int entry) {
+ DCHECK(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+
+ if (object->IsJSGlobalObject()) {
+ // If we have a global object, invalidate the cell and swap in a new one.
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ DCHECK_NE(GlobalDictionary::kNotFound, entry);
+
+ auto cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
+ cell->set_value(ReadOnlyRoots(isolate).the_hole_value());
+ cell->set_property_details(
+ PropertyDetails::Empty(PropertyCellType::kUninitialized));
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+ DCHECK_NE(NameDictionary::kNotFound, entry);
+
+ dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
+ object->SetProperties(*dictionary);
+ }
+ if (object->map()->is_prototype_map()) {
+ // Invalidate prototype validity cell as this may invalidate transitioning
+ // store IC handlers.
+ JSObject::InvalidatePrototypeChains(object->map());
+ }
+}
+
+Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode) {
+ it->UpdateProtector();
+
+ Isolate* isolate = it->isolate();
+
+ if (it->state() == LookupIterator::JSPROXY) {
+ return JSProxy::DeletePropertyOrElement(it->GetHolder<JSProxy>(),
+ it->GetName(), language_mode);
+ }
+
+ if (it->GetReceiver()->IsJSProxy()) {
+ if (it->state() != LookupIterator::NOT_FOUND) {
+ DCHECK_EQ(LookupIterator::DATA, it->state());
+ DCHECK(it->name()->IsPrivate());
+ it->Delete();
+ }
+ return Just(true);
+ }
+ Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
+
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::ACCESS_CHECK:
+ if (it->HasAccess()) break;
+ isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(false);
+ case LookupIterator::INTERCEPTOR: {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
+ Maybe<bool> result =
+ JSObject::DeletePropertyWithInterceptor(it, should_throw);
+ // An exception was thrown in the interceptor. Propagate.
+ if (isolate->has_pending_exception()) return Nothing<bool>();
+ // Delete with interceptor succeeded. Return result.
+ // TODO(neis): In strict mode, we should probably throw if the
+ // interceptor returns false.
+ if (result.IsJust()) return result;
+ break;
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return Just(true);
+ case LookupIterator::DATA:
+ case LookupIterator::ACCESSOR: {
+ if (!it->IsConfigurable()) {
+ // Fail if the property is not configurable.
+ if (is_strict(language_mode)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kStrictDeleteProperty, it->GetName(),
+ receiver));
+ return Nothing<bool>();
+ }
+ return Just(false);
+ }
+
+ it->Delete();
+
+ return Just(true);
+ }
+ }
+ }
+
+ return Just(true);
+}
+
+Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
+ LanguageMode language_mode) {
+ LookupIterator it(object->GetIsolate(), object, index, object,
+ LookupIterator::OWN);
+ return DeleteProperty(&it, language_mode);
+}
+
+Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ LookupIterator it(object, name, object, LookupIterator::OWN);
+ return DeleteProperty(&it, language_mode);
+}
+
+Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
+ return DeleteProperty(&it, language_mode);
+}
+
+// ES6 19.1.2.4
+// static
+Object JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> attributes) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperty");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
+ }
+ // 2. Let key be ToPropertyKey(P).
+ // 3. ReturnIfAbrupt(key).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToPropertyKey(isolate, key));
+ // 4. Let desc be ToPropertyDescriptor(Attributes).
+ // 5. ReturnIfAbrupt(desc).
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return ReadOnlyRoots(isolate).exception();
+ }
+ // 6. Let success be DefinePropertyOrThrow(O,key, desc).
+ Maybe<bool> success =
+ DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object), key, &desc,
+ Just(kThrowOnError));
+ // 7. ReturnIfAbrupt(success).
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
+ CHECK(success.FromJust());
+ // 8. Return O.
+ return *object;
+}
+
+// ES6 19.1.2.3.1
+// static
+MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> properties) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperties");
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name),
+ Object);
+ }
+ // 2. Let props be ToObject(Properties).
+ // 3. ReturnIfAbrupt(props).
+ Handle<JSReceiver> props;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, props,
+ Object::ToObject(isolate, properties), Object);
+
+ // 4. Let keys be props.[[OwnPropertyKeys]]().
+ // 5. ReturnIfAbrupt(keys).
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(props, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES),
+ Object);
+ // 6. Let descriptors be an empty List.
+ int capacity = keys->length();
+ std::vector<PropertyDescriptor> descriptors(capacity);
+ size_t descriptors_index = 0;
+ // 7. Repeat for each element nextKey of keys in List order,
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> next_key(keys->get(i), isolate);
+ // 7a. Let propDesc be props.[[GetOwnProperty]](nextKey).
+ // 7b. ReturnIfAbrupt(propDesc).
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, props, next_key, &success, LookupIterator::OWN);
+ DCHECK(success);
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ if (maybe.IsNothing()) return MaybeHandle<Object>();
+ PropertyAttributes attrs = maybe.FromJust();
+ // 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
+ if (attrs == ABSENT) continue;
+ if (attrs & DONT_ENUM) continue;
+ // 7c i. Let descObj be Get(props, nextKey).
+ // 7c ii. ReturnIfAbrupt(descObj).
+ Handle<Object> desc_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, desc_obj, Object::GetProperty(&it),
+ Object);
+ // 7c iii. Let desc be ToPropertyDescriptor(descObj).
+ success = PropertyDescriptor::ToPropertyDescriptor(
+ isolate, desc_obj, &descriptors[descriptors_index]);
+ // 7c iv. ReturnIfAbrupt(desc).
+ if (!success) return MaybeHandle<Object>();
+ // 7c v. Append the pair (a two element List) consisting of nextKey and
+ // desc to the end of descriptors.
+ descriptors[descriptors_index].set_name(next_key);
+ descriptors_index++;
+ }
+ // 8. For each pair from descriptors in list order,
+ for (size_t i = 0; i < descriptors_index; ++i) {
+ PropertyDescriptor* desc = &descriptors[i];
+ // 8a. Let P be the first element of pair.
+ // 8b. Let desc be the second element of pair.
+ // 8c. Let status be DefinePropertyOrThrow(O, P, desc).
+ Maybe<bool> status =
+ DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object),
+ desc->name(), desc, Just(kThrowOnError));
+ // 8d. ReturnIfAbrupt(status).
+ if (status.IsNothing()) return MaybeHandle<Object>();
+ CHECK(status.FromJust());
+ }
+ // 9. Return o.
+ return object;
+}
+
+// static
+Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ if (object->IsJSArray()) {
+ return JSArray::DefineOwnProperty(isolate, Handle<JSArray>::cast(object),
+ key, desc, should_throw);
+ }
+ if (object->IsJSProxy()) {
+ return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
+ key, desc, should_throw);
+ }
+ if (object->IsJSTypedArray()) {
+ return JSTypedArray::DefineOwnProperty(
+ isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
+ }
+
+ // OrdinaryDefineOwnProperty, by virtue of calling
+ // DefineOwnPropertyIgnoreAttributes, can handle arguments
+ // (ES#sec-arguments-exotic-objects-defineownproperty-p-desc).
+ return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
+ desc, should_throw);
+}
+
+// static
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::OWN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+
+ // Deal with access checks first.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
+ }
+ it.Next();
+ }
+
+ return OrdinaryDefineOwnProperty(&it, desc, should_throw);
+}
+
+namespace {
+
+MaybeHandle<Object> GetPropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor, bool* done) {
+ *done = false;
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->getter()->IsUndefined(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Object> result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Just(kDontThrow));
+
+ if (it->IsElement()) {
+ result = args.CallIndexedGetter(interceptor, it->index());
+ } else {
+ result = args.CallNamedGetter(interceptor, it->name());
+ }
+
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.is_null()) return isolate->factory()->undefined_value();
+ *done = true;
+ // Rebox handle before return
+ return handle(*result, isolate);
+}
+
+Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc(isolate);
+ HandleScope scope(isolate);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ DCHECK_IMPLIES(!it->IsElement() && it->name()->IsSymbol(),
+ interceptor->can_intercept_symbols());
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<PropertyAttributes>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Just(kDontThrow));
+ if (!interceptor->query()->IsUndefined(isolate)) {
+ Handle<Object> result;
+ if (it->IsElement()) {
+ result = args.CallIndexedQuery(interceptor, it->index());
+ } else {
+ result = args.CallNamedQuery(interceptor, it->name());
+ }
+ if (!result.is_null()) {
+ int32_t value;
+ CHECK(result->ToInt32(&value));
+ return Just(static_cast<PropertyAttributes>(value));
+ }
+ } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ // TODO(verwaest): Use GetPropertyWithInterceptor?
+ Handle<Object> result;
+ if (it->IsElement()) {
+ result = args.CallIndexedGetter(interceptor, it->index());
+ } else {
+ result = args.CallNamedGetter(interceptor, it->name());
+ }
+ if (!result.is_null()) return Just(DONT_ENUM);
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
+}
+
+Maybe<bool> SetPropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor,
+ Maybe<ShouldThrow> should_throw, Handle<Object> value) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ bool result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
+
+ if (it->IsElement()) {
+ // TODO(neis): In the future, we may want to actually return the
+ // interceptor's result, which then should be a boolean.
+ result = !args.CallIndexedSetter(interceptor, it->index(), value).is_null();
+ } else {
+ result = !args.CallNamedSetter(interceptor, it->name(), value).is_null();
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(result);
+}
+
+Maybe<bool> DefinePropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor,
+ Maybe<ShouldThrow> should_throw, PropertyDescriptor& desc) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ bool result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
+
+ std::unique_ptr<v8::PropertyDescriptor> descriptor(
+ new v8::PropertyDescriptor());
+ if (PropertyDescriptor::IsAccessorDescriptor(&desc)) {
+ descriptor.reset(new v8::PropertyDescriptor(
+ v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set())));
+ } else if (PropertyDescriptor::IsDataDescriptor(&desc)) {
+ if (desc.has_writable()) {
+ descriptor.reset(new v8::PropertyDescriptor(
+ v8::Utils::ToLocal(desc.value()), desc.writable()));
+ } else {
+ descriptor.reset(
+ new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value())));
+ }
+ }
+ if (desc.has_enumerable()) {
+ descriptor->set_enumerable(desc.enumerable());
+ }
+ if (desc.has_configurable()) {
+ descriptor->set_configurable(desc.configurable());
+ }
+
+ if (it->IsElement()) {
+ result = !args.CallIndexedDefiner(interceptor, it->index(), *descriptor)
+ .is_null();
+ } else {
+ result =
+ !args.CallNamedDefiner(interceptor, it->name(), *descriptor).is_null();
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(result);
+}
+
+} // namespace
+
+// ES6 9.1.6.1
+// static
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(
+ LookupIterator* it, PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw) {
+ Isolate* isolate = it->isolate();
+ // 1. Let current be O.[[GetOwnProperty]](P).
+ // 2. ReturnIfAbrupt(current).
+ PropertyDescriptor current;
+ MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
+
+ it->Restart();
+ // Handle interceptor
+ for (; it->IsFound(); it->Next()) {
+ if (it->state() == LookupIterator::INTERCEPTOR) {
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ Maybe<bool> result = DefinePropertyWithInterceptorInternal(
+ it, it->GetInterceptor(), should_throw, *desc);
+ if (result.IsNothing() || result.FromJust()) {
+ return result;
+ }
+ }
+ }
+ }
+
+ // TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
+ // the iterator every time. Currently, the reasons why we need it are:
+ // - handle interceptors correctly
+ // - handle accessors correctly (which might change the holder's map)
+ it->Restart();
+ // 3. Let extensible be the value of the [[Extensible]] internal slot of O.
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
+ bool extensible = JSObject::IsExtensible(object);
+
+ return ValidateAndApplyPropertyDescriptor(
+ isolate, it, extensible, desc, &current, should_throw, Handle<Name>());
+}
+
+// ES6 9.1.6.2
+// static
+Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ Maybe<ShouldThrow> should_throw) {
+ // 1. Return ValidateAndApplyPropertyDescriptor(undefined, undefined,
+ // Extensible, Desc, Current).
+ return ValidateAndApplyPropertyDescriptor(
+ isolate, nullptr, extensible, desc, current, should_throw, property_name);
+}
+
+// ES6 9.1.6.3
+// static
+Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ Maybe<ShouldThrow> should_throw, Handle<Name> property_name) {
+ // We either need a LookupIterator, or a property name.
+ DCHECK((it == nullptr) != property_name.is_null());
+ Handle<JSObject> object;
+ if (it != nullptr) object = Handle<JSObject>::cast(it->GetReceiver());
+ bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
+ bool desc_is_accessor_descriptor =
+ PropertyDescriptor::IsAccessorDescriptor(desc);
+ bool desc_is_generic_descriptor =
+ PropertyDescriptor::IsGenericDescriptor(desc);
+ // 1. (Assert)
+ // 2. If current is undefined, then
+ if (current->is_empty()) {
+ // 2a. If extensible is false, return false.
+ if (!extensible) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kDefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ // 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
+ // (This is equivalent to !IsAccessorDescriptor(desc).)
+ DCHECK((desc_is_generic_descriptor || desc_is_data_descriptor) ==
+ !desc_is_accessor_descriptor);
+ if (!desc_is_accessor_descriptor) {
+ // 2c i. If O is not undefined, create an own data property named P of
+ // object O whose [[Value]], [[Writable]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (it != nullptr) {
+ if (!desc->has_writable()) desc->set_writable(false);
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> value(
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineOwnPropertyIgnoreAttributes(it, value,
+ desc->ToAttributes());
+ if (result.is_null()) return Nothing<bool>();
+ }
+ } else {
+ // 2d. Else Desc must be an accessor Property Descriptor,
+ DCHECK(desc_is_accessor_descriptor);
+ // 2d i. If O is not undefined, create an own accessor property named P
+ // of object O whose [[Get]], [[Set]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (it != nullptr) {
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, desc->ToAttributes());
+ if (result.is_null()) return Nothing<bool>();
+ }
+ }
+ // 2e. Return true.
+ return Just(true);
+ }
+ // 3. Return true, if every field in Desc is absent.
+ // 4. Return true, if every field in Desc also occurs in current and the
+ // value of every field in Desc is the same value as the corresponding field
+ // in current when compared using the SameValue algorithm.
+ if ((!desc->has_enumerable() ||
+ desc->enumerable() == current->enumerable()) &&
+ (!desc->has_configurable() ||
+ desc->configurable() == current->configurable()) &&
+ (!desc->has_value() ||
+ (current->has_value() && current->value()->SameValue(*desc->value()))) &&
+ (!desc->has_writable() ||
+ (current->has_writable() && current->writable() == desc->writable())) &&
+ (!desc->has_get() ||
+ (current->has_get() && current->get()->SameValue(*desc->get()))) &&
+ (!desc->has_set() ||
+ (current->has_set() && current->set()->SameValue(*desc->set())))) {
+ return Just(true);
+ }
+ // 5. If the [[Configurable]] field of current is false, then
+ if (!current->configurable()) {
+ // 5a. Return false, if the [[Configurable]] field of Desc is true.
+ if (desc->has_configurable() && desc->configurable()) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ // 5b. Return false, if the [[Enumerable]] field of Desc is present and the
+ // [[Enumerable]] fields of current and Desc are the Boolean negation of
+ // each other.
+ if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ }
+
+ bool current_is_data_descriptor =
+ PropertyDescriptor::IsDataDescriptor(current);
+ // 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
+ if (desc_is_generic_descriptor) {
+ // Nothing to see here.
+
+ // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) have
+ // different results, then:
+ } else if (current_is_data_descriptor != desc_is_data_descriptor) {
+ // 7a. Return false, if the [[Configurable]] field of current is false.
+ if (!current->configurable()) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ // 7b. If IsDataDescriptor(current) is true, then:
+ if (current_is_data_descriptor) {
+ // 7b i. If O is not undefined, convert the property named P of object O
+ // from a data property to an accessor property. Preserve the existing
+ // values of the converted property's [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the property's attributes to their
+ // default values.
+ // --> Folded into step 10.
+ } else {
+ // 7c i. If O is not undefined, convert the property named P of object O
+ // from an accessor property to a data property. Preserve the existing
+ // values of the converted propertyā€™s [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the propertyā€™s attributes to their
+ // default values.
+ // --> Folded into step 10.
+ }
+
+ // 8. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
+ // true, then:
+ } else if (current_is_data_descriptor && desc_is_data_descriptor) {
+ // 8a. If the [[Configurable]] field of current is false, then:
+ if (!current->configurable()) {
+ // 8a i. Return false, if the [[Writable]] field of current is false and
+ // the [[Writable]] field of Desc is true.
+ if (!current->writable() && desc->has_writable() && desc->writable()) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ // 8a ii. If the [[Writable]] field of current is false, then:
+ if (!current->writable()) {
+ // 8a ii 1. Return false, if the [[Value]] field of Desc is present and
+ // SameValue(Desc.[[Value]], current.[[Value]]) is false.
+ if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ }
+ }
+ } else {
+ // 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
+ // are both true,
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(current) &&
+ desc_is_accessor_descriptor);
+ // 9a. If the [[Configurable]] field of current is false, then:
+ if (!current->configurable()) {
+ // 9a i. Return false, if the [[Set]] field of Desc is present and
+ // SameValue(Desc.[[Set]], current.[[Set]]) is false.
+ if (desc->has_set() && !desc->set()->SameValue(*current->set())) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ // 9a ii. Return false, if the [[Get]] field of Desc is present and
+ // SameValue(Desc.[[Get]], current.[[Get]]) is false.
+ if (desc->has_get() && !desc->get()->SameValue(*current->get())) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
+ }
+ }
+ }
+
+ // 10. If O is not undefined, then:
+ if (it != nullptr) {
+ // 10a. For each field of Desc that is present, set the corresponding
+ // attribute of the property named P of object O to the value of the field.
+ PropertyAttributes attrs = NONE;
+
+ if (desc->has_enumerable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->enumerable() ? NONE : DONT_ENUM));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->enumerable() ? NONE : DONT_ENUM));
+ }
+ if (desc->has_configurable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->configurable() ? NONE : DONT_DELETE));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->configurable() ? NONE : DONT_DELETE));
+ }
+ if (desc_is_data_descriptor ||
+ (desc_is_generic_descriptor && current_is_data_descriptor)) {
+ if (desc->has_writable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->writable() ? NONE : READ_ONLY));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->writable() ? NONE : READ_ONLY));
+ }
+ Handle<Object> value(
+ desc->has_value() ? desc->value()
+ : current->has_value()
+ ? current->value()
+ : Handle<Object>::cast(
+ isolate->factory()->undefined_value()));
+ return JSObject::DefineOwnPropertyIgnoreAttributes(it, value, attrs,
+ should_throw);
+ } else {
+ DCHECK(desc_is_accessor_descriptor ||
+ (desc_is_generic_descriptor &&
+ PropertyDescriptor::IsAccessorDescriptor(current)));
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : current->has_get()
+ ? current->get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : current->has_set()
+ ? current->set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, attrs);
+ if (result.is_null()) return Nothing<bool>();
+ }
+ }
+
+ // 11. Return true.
+ return Just(true);
+}
+
+// static
+Maybe<bool> JSReceiver::CreateDataProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, key,
+ LookupIterator::OWN);
+ return CreateDataProperty(&it, value, should_throw);
+}
+
+// static
+Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ DCHECK(!it->check_prototype_chain());
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
+
+ if (receiver->IsJSObject()) {
+ return JSObject::CreateDataProperty(it, value, should_throw); // Shortcut.
+ }
+
+ PropertyDescriptor new_desc;
+ new_desc.set_value(value);
+ new_desc.set_writable(true);
+ new_desc.set_enumerable(true);
+ new_desc.set_configurable(true);
+
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &new_desc, should_throw);
+}
+
+// static
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::OWN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+ return GetOwnPropertyDescriptor(&it, desc);
+}
+
+namespace {
+
+Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
+ if (it->state() == LookupIterator::ACCESS_CHECK) {
+ if (it->HasAccess()) {
+ it->Next();
+ } else if (!JSObject::AllCanRead(it) ||
+ it->state() != LookupIterator::INTERCEPTOR) {
+ it->Restart();
+ return Just(false);
+ }
+ }
+
+ if (it->state() != LookupIterator::INTERCEPTOR) return Just(false);
+
+ Isolate* isolate = it->isolate();
+ Handle<InterceptorInfo> interceptor = it->GetInterceptor();
+ if (interceptor->descriptor()->IsUndefined(isolate)) return Just(false);
+
+ Handle<Object> result;
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Just(kDontThrow));
+ if (it->IsElement()) {
+ result = args.CallIndexedDescriptor(interceptor, it->index());
+ } else {
+ result = args.CallNamedDescriptor(interceptor, it->name());
+ }
+ if (!result.is_null()) {
+ // Request successfully intercepted, try to set the property
+ // descriptor.
+ Utils::ApiCheck(
+ PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
+ it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
+ : "v8::NamedPropertyDescriptorCallback",
+ "Invalid property descriptor.");
+
+ return Just(true);
+ }
+
+ it->Next();
+ return Just(false);
+}
+} // namespace
+
+// ES6 9.1.5.1
+// Returns true on success, false if the property didn't exist, nothing if
+// an exception was thrown.
+// static
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
+ Isolate* isolate = it->isolate();
+ // "Virtual" dispatch.
+ if (it->IsFound() && it->GetHolder<JSReceiver>()->IsJSProxy()) {
+ return JSProxy::GetOwnPropertyDescriptor(isolate, it->GetHolder<JSProxy>(),
+ it->GetName(), desc);
+ }
+
+ Maybe<bool> intercepted = GetPropertyDescriptorWithInterceptor(it, desc);
+ MAYBE_RETURN(intercepted, Nothing<bool>());
+ if (intercepted.FromJust()) {
+ return Just(true);
+ }
+
+ // Request was not intercepted, continue as normal.
+ // 1. (Assert)
+ // 2. If O does not have an own property with key P, return undefined.
+ Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
+ MAYBE_RETURN(maybe, Nothing<bool>());
+ PropertyAttributes attrs = maybe.FromJust();
+ if (attrs == ABSENT) return Just(false);
+ DCHECK(!isolate->has_pending_exception());
+
+ // 3. Let D be a newly created Property Descriptor with no fields.
+ DCHECK(desc->is_empty());
+ // 4. Let X be O's own property whose key is P.
+ // 5. If X is a data property, then
+ bool is_accessor_pair = it->state() == LookupIterator::ACCESSOR &&
+ it->GetAccessors()->IsAccessorPair();
+ if (!is_accessor_pair) {
+ // 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
+ Handle<Object> value;
+ if (!Object::GetProperty(it).ToHandle(&value)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ desc->set_value(value);
+ // 5b. Set D.[[Writable]] to the value of X's [[Writable]] attribute
+ desc->set_writable((attrs & READ_ONLY) == 0);
+ } else {
+ // 6. Else X is an accessor property, so
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(it->GetAccessors());
+ // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
+ desc->set_get(
+ AccessorPair::GetComponent(isolate, accessors, ACCESSOR_GETTER));
+ // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
+ desc->set_set(
+ AccessorPair::GetComponent(isolate, accessors, ACCESSOR_SETTER));
+ }
+
+ // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
+ desc->set_enumerable((attrs & DONT_ENUM) == 0);
+ // 8. Set D.[[Configurable]] to the value of X's [[Configurable]] attribute.
+ desc->set_configurable((attrs & DONT_DELETE) == 0);
+ // 9. Return D.
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(desc) !=
+ PropertyDescriptor::IsDataDescriptor(desc));
+ return Just(true);
+}
+Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
+ IntegrityLevel level,
+ ShouldThrow should_throw) {
+ DCHECK(level == SEALED || level == FROZEN);
+
+ if (receiver->IsJSObject()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+
+ if (!object->HasSloppyArgumentsElements() &&
+ !object->IsJSModuleNamespace()) { // Fast path.
+ // Prevent memory leaks by not adding unnecessary transitions.
+ Maybe<bool> test = JSObject::TestIntegrityLevel(object, level);
+ MAYBE_RETURN(test, Nothing<bool>());
+ if (test.FromJust()) return test;
+
+ if (level == SEALED) {
+ return JSObject::PreventExtensionsWithTransition<SEALED>(object,
+ should_throw);
+ } else {
+ return JSObject::PreventExtensionsWithTransition<FROZEN>(object,
+ should_throw);
+ }
+ }
+ }
+
+ Isolate* isolate = receiver->GetIsolate();
+
+ MAYBE_RETURN(JSReceiver::PreventExtensions(receiver, should_throw),
+ Nothing<bool>());
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
+
+ PropertyDescriptor no_conf;
+ no_conf.set_configurable(false);
+
+ PropertyDescriptor no_conf_no_write;
+ no_conf_no_write.set_configurable(false);
+ no_conf_no_write.set_writable(false);
+
+ if (level == SEALED) {
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ MAYBE_RETURN(DefineOwnProperty(isolate, receiver, key, &no_conf,
+ Just(kThrowOnError)),
+ Nothing<bool>());
+ }
+ return Just(true);
+ }
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ PropertyDescriptor desc =
+ PropertyDescriptor::IsAccessorDescriptor(&current_desc)
+ ? no_conf
+ : no_conf_no_write;
+ MAYBE_RETURN(
+ DefineOwnProperty(isolate, receiver, key, &desc, Just(kThrowOnError)),
+ Nothing<bool>());
+ }
+ }
+ return Just(true);
+}
+
+namespace {
+Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
+ PropertyAttributes level) {
+ DCHECK(level == SEALED || level == FROZEN);
+
+ Maybe<bool> extensible = JSReceiver::IsExtensible(receiver);
+ MAYBE_RETURN(extensible, Nothing<bool>());
+ if (extensible.FromJust()) return Just(false);
+
+ Isolate* isolate = receiver->GetIsolate();
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ if (current_desc.configurable()) return Just(false);
+ if (level == FROZEN &&
+ PropertyDescriptor::IsDataDescriptor(&current_desc) &&
+ current_desc.writable()) {
+ return Just(false);
+ }
+ }
+ }
+ return Just(true);
+}
+
+} // namespace
+
+Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
+ IntegrityLevel level) {
+ if (!receiver->map()->IsCustomElementsReceiverMap()) {
+ return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
+ level);
+ }
+ return GenericTestIntegrityLevel(receiver, level);
+}
+
+Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
+ ShouldThrow should_throw) {
+ if (object->IsJSProxy()) {
+ return JSProxy::PreventExtensions(Handle<JSProxy>::cast(object),
+ should_throw);
+ }
+ DCHECK(object->IsJSObject());
+ return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
+ should_throw);
+}
+
+Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
+ if (object->IsJSProxy()) {
+ return JSProxy::IsExtensible(Handle<JSProxy>::cast(object));
+ }
+ return Just(JSObject::IsExtensible(Handle<JSObject>::cast(object)));
+}
+
+// static
+MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
+ ToPrimitiveHint hint) {
+ Isolate* const isolate = receiver->GetIsolate();
+ Handle<Object> exotic_to_prim;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, exotic_to_prim,
+ Object::GetMethod(receiver, isolate->factory()->to_primitive_symbol()),
+ Object);
+ if (!exotic_to_prim->IsUndefined(isolate)) {
+ Handle<Object> hint_string =
+ isolate->factory()->ToPrimitiveHintString(hint);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, exotic_to_prim, receiver, 1, &hint_string),
+ Object);
+ if (result->IsPrimitive()) return result;
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
+ Object);
+ }
+ return OrdinaryToPrimitive(receiver, (hint == ToPrimitiveHint::kString)
+ ? OrdinaryToPrimitiveHint::kString
+ : OrdinaryToPrimitiveHint::kNumber);
+}
+
+// static
+MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
+ Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint) {
+ Isolate* const isolate = receiver->GetIsolate();
+ Handle<String> method_names[2];
+ switch (hint) {
+ case OrdinaryToPrimitiveHint::kNumber:
+ method_names[0] = isolate->factory()->valueOf_string();
+ method_names[1] = isolate->factory()->toString_string();
+ break;
+ case OrdinaryToPrimitiveHint::kString:
+ method_names[0] = isolate->factory()->toString_string();
+ method_names[1] = isolate->factory()->valueOf_string();
+ break;
+ }
+ for (Handle<String> name : method_names) {
+ Handle<Object> method;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, method,
+ JSReceiver::GetProperty(isolate, receiver, name),
+ Object);
+ if (method->IsCallable()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, method, receiver, 0, nullptr), Object);
+ if (result->IsPrimitive()) return result;
+ }
+ }
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
+ Object);
+}
+
+V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
+ Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
+ Handle<FixedArray>* result) {
+ Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> object(JSObject::cast(*receiver), isolate);
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_elements =
+ object->GetElementsAccessor()->GetCapacity(*object, object->elements());
+ Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
+ number_of_own_descriptors + number_of_own_elements);
+ int count = 0;
+
+ if (object->elements() != ReadOnlyRoots(isolate).empty_fixed_array()) {
+ MAYBE_RETURN(object->GetElementsAccessor()->CollectValuesOrEntries(
+ isolate, object, values_or_entries, get_entries, &count,
+ ENUMERABLE_STRINGS),
+ Nothing<bool>());
+ }
+
+ bool stable = object->map() == *map;
+
+ for (int index = 0; index < number_of_own_descriptors; index++) {
+ Handle<Name> next_key(descriptors->GetKey(index), isolate);
+ if (!next_key->IsString()) continue;
+ Handle<Object> prop_value;
+
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(index);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetStrongValue(index), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
+ prop_value =
+ JSObject::FastPropertyAt(object, representation, field_index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value,
+ JSReceiver::GetProperty(isolate, object, next_key),
+ Nothing<bool>());
+ stable = object->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(isolate, object, next_key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+
+ if (get_entries) {
+ prop_value = MakeEntryPair(isolate, next_key, prop_value);
+ }
+
+ values_or_entries->set(count, *prop_value);
+ count++;
+ }
+
+ DCHECK_LE(count, values_or_entries->length());
+ *result = FixedArray::ShrinkOrEmpty(isolate, values_or_entries, count);
+ return Just(true);
+}
+
+MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
+ Handle<JSReceiver> object,
+ PropertyFilter filter,
+ bool try_fast_path,
+ bool get_entries) {
+ Handle<FixedArray> values_or_entries;
+ if (try_fast_path && filter == ENUMERABLE_STRINGS) {
+ Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
+ isolate, object, get_entries, &values_or_entries);
+ if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
+ if (fast_values_or_entries.FromJust()) return values_or_entries;
+ }
+
+ PropertyFilter key_filter =
+ static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, key_filter,
+ GetKeysConversion::kConvertToString),
+ MaybeHandle<FixedArray>());
+
+ values_or_entries = isolate->factory()->NewFixedArray(keys->length());
+ int length = 0;
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
+
+ if (filter & ONLY_ENUMERABLE) {
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, object, key, &descriptor);
+ MAYBE_RETURN(did_get_descriptor, MaybeHandle<FixedArray>());
+ if (!did_get_descriptor.FromJust() || !descriptor.enumerable()) continue;
+ }
+
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetPropertyOrElement(isolate, object, key),
+ MaybeHandle<FixedArray>());
+
+ if (get_entries) {
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ entry_storage->set(0, *key);
+ entry_storage->set(1, *value);
+ value = isolate->factory()->NewJSArrayWithElements(entry_storage,
+ PACKED_ELEMENTS, 2);
+ }
+
+ values_or_entries->set(length, *value);
+ length++;
+ }
+ DCHECK_LE(length, values_or_entries->length());
+ return FixedArray::ShrinkOrEmpty(isolate, values_or_entries, length);
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, false);
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, true);
+}
+
+Handle<FixedArray> JSReceiver::GetOwnElementIndices(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES);
+ accumulator.CollectOwnElementIndices(receiver, object);
+ Handle<FixedArray> keys =
+ accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
+ DCHECK(keys->ContainsSortedNumbers());
+ return keys;
+}
+Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ if (object->IsJSProxy()) {
+ return JSProxy::SetPrototype(Handle<JSProxy>::cast(object), value,
+ from_javascript, should_throw);
+ }
+ return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
+ from_javascript, should_throw);
+}
+
+bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
+ for (PrototypeIterator iter(isolate, *this, kStartAtReceiver,
+ PrototypeIterator::END_AT_NULL);
+ !iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
+ if (iter.GetCurrent()->IsJSProxy()) return true;
+ }
+ return false;
+}
+
+bool JSReceiver::HasComplexElements() {
+ if (IsJSProxy()) return true;
+ JSObject this_object = JSObject::cast(*this);
+ if (this_object->HasIndexedInterceptor()) {
+ return true;
+ }
+ if (!this_object->HasDictionaryElements()) return false;
+ return this_object->element_dictionary()->HasComplexElements();
+}
+
+// static
+MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site) {
+ // If called through new, new.target can be:
+ // - a subclass of constructor,
+ // - a proxy wrapper around constructor, or
+ // - the constructor itself.
+ // If called through Reflect.construct, it's guaranteed to be a constructor.
+ Isolate* const isolate = constructor->GetIsolate();
+ DCHECK(constructor->IsConstructor());
+ DCHECK(new_target->IsConstructor());
+ DCHECK(!constructor->has_initial_map() ||
+ constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
+
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
+ if (initial_map->is_dictionary_map()) {
+ Handle<NameDictionary> dictionary =
+ NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
+ result->SetProperties(*dictionary);
+ }
+ isolate->counters()->constructed_objects()->Increment();
+ isolate->counters()->constructed_objects_runtime()->Increment();
+ return result;
+}
+
+// 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
+// Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
+MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
+ Handle<Object> prototype) {
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map =
+ Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
+
+ // Actually allocate the object.
+ Handle<JSObject> object;
+ if (map->is_dictionary_map()) {
+ object = isolate->factory()->NewSlowJSObjectFromMap(map);
+ } else {
+ object = isolate->factory()->NewJSObjectFromMap(map);
+ }
+ return object;
+}
+
+void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
+ DCHECK(object->HasSmiOrObjectElements() ||
+ object->HasFastStringWrapperElements());
+ FixedArray raw_elems = FixedArray::cast(object->elements());
+ Isolate* isolate = object->GetIsolate();
+ if (raw_elems->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
+ Handle<FixedArray> elems(raw_elems, isolate);
+ Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
+ elems, isolate->factory()->fixed_array_map());
+ object->set_elements(*writable_elems);
+ isolate->counters()->cow_arrays_converted()->Increment();
+}
+
+int JSObject::GetHeaderSize(InstanceType type,
+ bool function_has_prototype_slot) {
+ switch (type) {
+ case JS_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_GENERATOR_OBJECT_TYPE:
+ return JSGeneratorObject::kSize;
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ return JSAsyncFunctionObject::kSize;
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ return JSAsyncGeneratorObject::kSize;
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ return JSAsyncFromSyncIterator::kSize;
+ case JS_GLOBAL_PROXY_TYPE:
+ return JSGlobalProxy::kSize;
+ case JS_GLOBAL_OBJECT_TYPE:
+ return JSGlobalObject::kSize;
+ case JS_BOUND_FUNCTION_TYPE:
+ return JSBoundFunction::kSize;
+ case JS_FUNCTION_TYPE:
+ return JSFunction::GetHeaderSize(function_has_prototype_slot);
+ case JS_VALUE_TYPE:
+ return JSValue::kSize;
+ case JS_DATE_TYPE:
+ return JSDate::kSize;
+ case JS_ARRAY_TYPE:
+ return JSArray::kSize;
+ case JS_ARRAY_BUFFER_TYPE:
+ return JSArrayBuffer::kHeaderSize;
+ case JS_ARRAY_ITERATOR_TYPE:
+ return JSArrayIterator::kSize;
+ case JS_TYPED_ARRAY_TYPE:
+ return JSTypedArray::kHeaderSize;
+ case JS_DATA_VIEW_TYPE:
+ return JSDataView::kHeaderSize;
+ case JS_SET_TYPE:
+ return JSSet::kSize;
+ case JS_MAP_TYPE:
+ return JSMap::kSize;
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ return JSSetIterator::kSize;
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ return JSMapIterator::kSize;
+ case WEAK_CELL_TYPE:
+ return WeakCell::kSize;
+ case JS_WEAK_REF_TYPE:
+ return JSWeakRef::kSize;
+ case JS_FINALIZATION_GROUP_TYPE:
+ return JSFinalizationGroup::kSize;
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ return JSFinalizationGroupCleanupIterator::kSize;
+ case JS_WEAK_MAP_TYPE:
+ return JSWeakMap::kSize;
+ case JS_WEAK_SET_TYPE:
+ return JSWeakSet::kSize;
+ case JS_PROMISE_TYPE:
+ return JSPromise::kSize;
+ case JS_REGEXP_TYPE:
+ return JSRegExp::kSize;
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ return JSRegExpStringIterator::kSize;
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_MESSAGE_OBJECT_TYPE:
+ return JSMessageObject::kSize;
+ case JS_ARGUMENTS_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_ERROR_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_STRING_ITERATOR_TYPE:
+ return JSStringIterator::kSize;
+ case JS_MODULE_NAMESPACE_TYPE:
+ return JSModuleNamespace::kHeaderSize;
+#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ return JSV8BreakIterator::kSize;
+ case JS_INTL_COLLATOR_TYPE:
+ return JSCollator::kSize;
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ return JSDateTimeFormat::kSize;
+ case JS_INTL_LIST_FORMAT_TYPE:
+ return JSListFormat::kSize;
+ case JS_INTL_LOCALE_TYPE:
+ return JSLocale::kSize;
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ return JSNumberFormat::kSize;
+ case JS_INTL_PLURAL_RULES_TYPE:
+ return JSPluralRules::kSize;
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ return JSRelativeTimeFormat::kSize;
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ return JSSegmentIterator::kSize;
+ case JS_INTL_SEGMENTER_TYPE:
+ return JSSegmenter::kSize;
+#endif // V8_INTL_SUPPORT
+ case WASM_GLOBAL_TYPE:
+ return WasmGlobalObject::kSize;
+ case WASM_INSTANCE_TYPE:
+ return WasmInstanceObject::kSize;
+ case WASM_MEMORY_TYPE:
+ return WasmMemoryObject::kSize;
+ case WASM_MODULE_TYPE:
+ return WasmModuleObject::kSize;
+ case WASM_TABLE_TYPE:
+ return WasmTableObject::kSize;
+ case WASM_EXCEPTION_TYPE:
+ return WasmExceptionObject::kSize;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// static
+bool JSObject::AllCanRead(LookupIterator* it) {
+ // Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
+ // which have already been checked.
+ DCHECK(it->state() == LookupIterator::ACCESS_CHECK ||
+ it->state() == LookupIterator::INTERCEPTOR);
+ for (it->Next(); it->IsFound(); it->Next()) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ auto accessors = it->GetAccessors();
+ if (accessors->IsAccessorInfo()) {
+ if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
+ }
+ } else if (it->state() == LookupIterator::INTERCEPTOR) {
+ if (it->GetInterceptor()->all_can_read()) return true;
+ } else if (it->state() == LookupIterator::JSPROXY) {
+ // Stop lookupiterating. And no, AllCanNotRead.
+ return false;
+ }
+ }
+ return false;
+}
+
+MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
+ LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ Handle<JSObject> checked = it->GetHolder<JSObject>();
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ while (AllCanRead(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return Object::GetPropertyWithAccessor(it);
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ bool done;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ GetPropertyWithInterceptor(it, &done), Object);
+ if (done) return result;
+ }
+
+ } else {
+ Handle<Object> result;
+ bool done;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ GetPropertyWithInterceptorInternal(it, interceptor, &done), Object);
+ if (done) return result;
+ }
+
+ // Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
+ // undefined.
+ Handle<Name> name = it->GetName();
+ if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
+ return it->factory()->undefined_value();
+ }
+
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return it->factory()->undefined_value();
+}
+
+Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
+ LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ Handle<JSObject> checked = it->GetHolder<JSObject>();
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ while (AllCanRead(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return Just(it->property_attributes());
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ auto result = GetPropertyAttributesWithInterceptor(it);
+ if (isolate->has_scheduled_exception()) break;
+ if (result.IsJust() && result.FromJust() != ABSENT) return result;
+ }
+ } else {
+ Maybe<PropertyAttributes> result =
+ GetPropertyAttributesWithInterceptorInternal(it, interceptor);
+ if (isolate->has_pending_exception()) return Nothing<PropertyAttributes>();
+ if (result.FromMaybe(ABSENT) != ABSENT) return result;
+ }
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
+}
+
+// static
+bool JSObject::AllCanWrite(LookupIterator* it) {
+ for (; it->IsFound() && it->state() != LookupIterator::JSPROXY; it->Next()) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ Handle<Object> accessors = it->GetAccessors();
+ if (accessors->IsAccessorInfo()) {
+ if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
+ }
+ }
+ }
+ return false;
+}
+
+Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, Maybe<ShouldThrow> should_throw) {
+ Isolate* isolate = it->isolate();
+ Handle<JSObject> checked = it->GetHolder<JSObject>();
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ if (AllCanWrite(it)) {
+ return Object::SetPropertyWithAccessor(it, value, should_throw);
+ }
+ } else {
+ Maybe<bool> result = SetPropertyWithInterceptorInternal(
+ it, interceptor, should_throw, value);
+ if (isolate->has_pending_exception()) return Nothing<bool>();
+ if (result.IsJust()) return result;
+ }
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
+}
+
+void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ DCHECK(!object->HasFastProperties());
+ DCHECK(name->IsUniqueName());
+ Isolate* isolate = object->GetIsolate();
+
+ uint32_t hash = name->Hash();
+
+ if (object->IsJSGlobalObject()) {
+ Handle<JSGlobalObject> global_obj = Handle<JSGlobalObject>::cast(object);
+ Handle<GlobalDictionary> dictionary(global_obj->global_dictionary(),
+ isolate);
+ int entry = dictionary->FindEntry(ReadOnlyRoots(isolate), name, hash);
+
+ if (entry == GlobalDictionary::kNotFound) {
+ DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(global_obj->map()));
+ auto cell = isolate->factory()->NewPropertyCell(name);
+ cell->set_value(*value);
+ auto cell_type = value->IsUndefined(isolate)
+ ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
+ details = details.set_cell_type(cell_type);
+ value = cell;
+ dictionary =
+ GlobalDictionary::Add(isolate, dictionary, name, value, details);
+ global_obj->set_global_dictionary(*dictionary);
+ } else {
+ Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
+ isolate, dictionary, entry, value, details);
+ cell->set_value(*value);
+ }
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ int entry = dictionary->FindEntry(isolate, name);
+ if (entry == NameDictionary::kNotFound) {
+ DCHECK_IMPLIES(object->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
+ dictionary =
+ NameDictionary::Add(isolate, dictionary, name, value, details);
+ object->SetProperties(*dictionary);
+ } else {
+ PropertyDetails original_details = dictionary->DetailsAt(entry);
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(isolate, entry, *name, *value, details);
+ }
+ }
+}
+
+void JSObject::JSObjectShortPrint(StringStream* accumulator) {
+ switch (map()->instance_type()) {
+ case JS_ARRAY_TYPE: {
+ double length = JSArray::cast(*this)->length()->IsUndefined()
+ ? 0
+ : JSArray::cast(*this)->length()->Number();
+ accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
+ break;
+ }
+ case JS_BOUND_FUNCTION_TYPE: {
+ JSBoundFunction bound_function = JSBoundFunction::cast(*this);
+ accumulator->Add("<JSBoundFunction");
+ accumulator->Add(" (BoundTargetFunction %p)>",
+ reinterpret_cast<void*>(
+ bound_function->bound_target_function().ptr()));
+ break;
+ }
+ case JS_WEAK_MAP_TYPE: {
+ accumulator->Add("<JSWeakMap>");
+ break;
+ }
+ case JS_WEAK_SET_TYPE: {
+ accumulator->Add("<JSWeakSet>");
+ break;
+ }
+ case JS_REGEXP_TYPE: {
+ accumulator->Add("<JSRegExp");
+ JSRegExp regexp = JSRegExp::cast(*this);
+ if (regexp->source()->IsString()) {
+ accumulator->Add(" ");
+ String::cast(regexp->source())->StringShortPrint(accumulator);
+ }
+ accumulator->Add(">");
+
+ break;
+ }
+ case JS_FUNCTION_TYPE: {
+ JSFunction function = JSFunction::cast(*this);
+ Object fun_name = function->shared()->DebugName();
+ bool printed = false;
+ if (fun_name->IsString()) {
+ String str = String::cast(fun_name);
+ if (str->length() > 0) {
+ accumulator->Add("<JSFunction ");
+ accumulator->Put(str);
+ printed = true;
+ }
+ }
+ if (!printed) {
+ accumulator->Add("<JSFunction");
+ }
+ if (FLAG_trace_file_names) {
+ Object source_name = Script::cast(function->shared()->script())->name();
+ if (source_name->IsString()) {
+ String str = String::cast(source_name);
+ if (str->length() > 0) {
+ accumulator->Add(" <");
+ accumulator->Put(str);
+ accumulator->Add(">");
+ }
+ }
+ }
+ accumulator->Add(" (sfi = %p)",
+ reinterpret_cast<void*>(function->shared().ptr()));
+ accumulator->Put('>');
+ break;
+ }
+ case JS_GENERATOR_OBJECT_TYPE: {
+ accumulator->Add("<JSGenerator>");
+ break;
+ }
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE: {
+ accumulator->Add("<JSAsyncFunctionObject>");
+ break;
+ }
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE: {
+ accumulator->Add("<JS AsyncGenerator>");
+ break;
+ }
+
+ // All other JSObjects are rather similar to each other (JSObject,
+ // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
+ default: {
+ Map map_of_this = map();
+ Heap* heap = GetHeap();
+ Object constructor = map_of_this->GetConstructor();
+ bool printed = false;
+ if (constructor->IsHeapObject() &&
+ !heap->Contains(HeapObject::cast(constructor))) {
+ accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
+ } else {
+ bool global_object = IsJSGlobalProxy();
+ if (constructor->IsJSFunction()) {
+ if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
+ accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
+ } else {
+ String constructor_name =
+ JSFunction::cast(constructor)->shared()->Name();
+ if (constructor_name->length() > 0) {
+ accumulator->Add(global_object ? "<GlobalObject " : "<");
+ accumulator->Put(constructor_name);
+ accumulator->Add(
+ " %smap = %p",
+ map_of_this->is_deprecated() ? "deprecated-" : "",
+ map_of_this);
+ printed = true;
+ }
+ }
+ } else if (constructor->IsFunctionTemplateInfo()) {
+ accumulator->Add(global_object ? "<RemoteObject>" : "<RemoteObject>");
+ printed = true;
+ }
+ if (!printed) {
+ accumulator->Add("<JS%sObject", global_object ? "Global " : "");
+ }
+ }
+ if (IsJSValue()) {
+ accumulator->Add(" value = ");
+ JSValue::cast(*this)->value()->ShortPrint(accumulator);
+ }
+ accumulator->Put('>');
+ break;
+ }
+ }
+}
+
+void JSObject::PrintElementsTransition(FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind,
+ Handle<FixedArrayBase> to_elements) {
+ if (from_kind != to_kind) {
+ OFStream os(file);
+ os << "elements transition [" << ElementsKindToString(from_kind) << " -> "
+ << ElementsKindToString(to_kind) << "] in ";
+ JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
+ PrintF(file, " for ");
+ object->ShortPrint(file);
+ PrintF(file, " from ");
+ from_elements->ShortPrint(file);
+ PrintF(file, " to ");
+ to_elements->ShortPrint(file);
+ PrintF(file, "\n");
+ }
+}
+
+void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
+ Map new_map) {
+ if (new_map->is_dictionary_map()) {
+ PrintF(file, "[migrating to slow]\n");
+ return;
+ }
+ PrintF(file, "[migrating]");
+ DescriptorArray o = original_map->instance_descriptors();
+ DescriptorArray n = new_map->instance_descriptors();
+ for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o->GetDetails(i).representation();
+ Representation n_r = n->GetDetails(i).representation();
+ if (!o_r.Equals(n_r)) {
+ String::cast(o->GetKey(i))->PrintOn(file);
+ PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
+ } else if (o->GetDetails(i).location() == kDescriptor &&
+ n->GetDetails(i).location() == kField) {
+ Name name = o->GetKey(i);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "{symbol %p}", reinterpret_cast<void*>(name.ptr()));
+ }
+ PrintF(file, " ");
+ }
+ }
+ if (original_map->elements_kind() != new_map->elements_kind()) {
+ PrintF(file, "elements_kind[%i->%i]", original_map->elements_kind(),
+ new_map->elements_kind());
+ }
+ PrintF(file, "\n");
+}
+
+bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
+ Object object = *o;
+ if (object->IsSmi()) return false;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ JSObject js_object = JSObject::cast(object);
+ if (!js_object->IsDroppableApiWrapper()) return false;
+ Object maybe_constructor = js_object->map()->GetConstructor();
+ if (!maybe_constructor->IsJSFunction()) return false;
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ if (js_object->elements()->length() != 0) return false;
+
+ return constructor->initial_map() == heap_object->map();
+}
+
+// static
+void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate) {
+ DCHECK(old_map->is_prototype_map());
+ DCHECK(new_map->is_prototype_map());
+ bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
+ new_map->set_prototype_info(old_map->prototype_info());
+ old_map->set_prototype_info(Smi::kZero);
+ if (FLAG_trace_prototype_users) {
+ PrintF("Moving prototype_info %p from map %p to map %p.\n",
+ reinterpret_cast<void*>(new_map->prototype_info()->ptr()),
+ reinterpret_cast<void*>(old_map->ptr()),
+ reinterpret_cast<void*>(new_map->ptr()));
+ }
+ if (was_registered) {
+ if (new_map->prototype_info()->IsPrototypeInfo()) {
+ // The new map isn't registered with its prototype yet; reflect this fact
+ // in the PrototypeInfo it just inherited from the old map.
+ PrototypeInfo::cast(new_map->prototype_info())
+ ->set_registry_slot(PrototypeInfo::UNREGISTERED);
+ }
+ JSObject::LazyRegisterPrototypeUser(new_map, isolate);
+ }
+}
+
+// static
+void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+ Isolate* isolate) {
+ if (!old_map->is_prototype_map()) return;
+
+ InvalidatePrototypeChains(*old_map);
+
+ // If the map was registered with its prototype before, ensure that it
+ // registers with its new prototype now. This preserves the invariant that
+ // when a map on a prototype chain is registered with its prototype, then
+ // all prototypes further up the chain are also registered with their
+ // respective prototypes.
+ UpdatePrototypeUserRegistration(old_map, new_map, isolate);
+}
+
+namespace {
+// To migrate a fast instance to a fast map:
+// - First check whether the instance needs to be rewritten. If not, simply
+// change the map.
+// - Otherwise, allocate a fixed array large enough to hold all fields, in
+// addition to unused space.
+// - Copy all existing properties in, in the following order: backing store
+// properties, unused fields, inobject properties.
+// - If all allocation succeeded, commit the state atomically:
+// * Copy inobject properties from the backing store back into the object.
+// * Trim the difference in instance size of the object. This also cleanly
+// frees inobject properties that moved to the backing store.
+// * If there are properties left in the backing store, trim of the space used
+// to temporarily store the inobject properties.
+// * If there are properties left in the backing store, install the backing
+// store.
+void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map(object->map(), isolate);
+ // In case of a regular transition.
+ if (new_map->GetBackPointer() == *old_map) {
+ // If the map does not add named properties, simply set the map.
+ if (old_map->NumberOfOwnDescriptors() ==
+ new_map->NumberOfOwnDescriptors()) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ PropertyDetails details = new_map->GetLastDescriptorDetails();
+ int target_index = details.field_index() - new_map->GetInObjectProperties();
+ int property_array_length = object->property_array()->length();
+ bool have_space = old_map->UnusedPropertyFields() > 0 ||
+ (details.location() == kField && target_index >= 0 &&
+ property_array_length > target_index);
+ // Either new_map adds an kDescriptor property, or a kField property for
+ // which there is still space, and which does not require a mutable double
+ // box (an out-of-object double).
+ if (details.location() == kDescriptor ||
+ (have_space && ((FLAG_unbox_double_fields && target_index < 0) ||
+ !details.representation().IsDouble()))) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ // If there is still space in the object, we need to allocate a mutable
+ // double box.
+ if (have_space) {
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+ DCHECK(details.representation().IsDouble());
+ DCHECK(!new_map->IsUnboxedDoubleField(index));
+ auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ object->RawFastPropertyAtPut(index, *value);
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ // This migration is a transition from a map that has run out of property
+ // space. Extend the backing store.
+ int grow_by = new_map->UnusedPropertyFields() + 1;
+ Handle<PropertyArray> old_storage(object->property_array(), isolate);
+ Handle<PropertyArray> new_storage =
+ isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
+
+ // Properly initialize newly added property.
+ Handle<Object> value;
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ } else {
+ value = isolate->factory()->uninitialized_value();
+ }
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(kData, details.kind());
+ DCHECK_GE(target_index, 0); // Must be a backing store index.
+ new_storage->set(target_index, *value);
+
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
+
+ // Set the new property value and do the map transition.
+ object->SetProperties(*new_storage);
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ int old_number_of_fields;
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->GetInObjectProperties();
+ int unused = new_map->UnusedPropertyFields();
+
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
+ if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
+ unused, &old_number_of_fields)) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ Handle<PropertyArray> array = isolate->factory()->NewPropertyArray(external);
+
+ // We use this array to temporarily store the inobject properties.
+ Handle<FixedArray> inobject_props =
+ isolate->factory()->NewFixedArray(inobject);
+
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors(),
+ isolate);
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors(),
+ isolate);
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ int new_nof = new_map->NumberOfOwnDescriptors();
+
+ // This method only supports generalizing instances to at least the same
+ // number of properties.
+ DCHECK(old_nof <= new_nof);
+
+ for (int i = 0; i < old_nof; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ Representation old_representation = old_details.representation();
+ Representation representation = details.representation();
+ Handle<Object> value;
+ if (old_details.location() == kDescriptor) {
+ if (old_details.kind() == kAccessor) {
+ // In case of kAccessor -> kData property reconfiguration, the property
+ // must already be prepared for data of certain type.
+ DCHECK(!details.representation().IsNone());
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ } else {
+ value = isolate->factory()->uninitialized_value();
+ }
+ } else {
+ DCHECK_EQ(kData, old_details.kind());
+ value = handle(old_descriptors->GetStrongValue(i), isolate);
+ DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
+ }
+ } else {
+ DCHECK_EQ(kField, old_details.location());
+ FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
+ if (object->IsUnboxedDoubleField(index)) {
+ uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
+ if (representation.IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
+ } else {
+ value = isolate->factory()->NewHeapNumberFromBits(old_bits);
+ }
+ } else {
+ value = handle(object->RawFastPropertyAt(index), isolate);
+ if (!old_representation.IsDouble() && representation.IsDouble()) {
+ DCHECK_IMPLIES(old_representation.IsNone(),
+ value->IsUninitialized(isolate));
+ value = Object::NewStorageFor(isolate, value, representation);
+ } else if (old_representation.IsDouble() &&
+ !representation.IsDouble()) {
+ value = Object::WrapForRead(isolate, value, old_representation);
+ }
+ }
+ }
+ DCHECK(!(representation.IsDouble() && value->IsSmi()));
+ int target_index = new_descriptors->GetFieldIndex(i);
+ if (target_index < inobject) {
+ inobject_props->set(target_index, *value);
+ } else {
+ array->set(target_index - inobject, *value);
+ }
+ }
+
+ for (int i = old_nof; i < new_nof; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ Handle<Object> value;
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ } else {
+ value = isolate->factory()->uninitialized_value();
+ }
+ int target_index = new_descriptors->GetFieldIndex(i);
+ if (target_index < inobject) {
+ inobject_props->set(target_index, *value);
+ } else {
+ array->set(target_index - inobject, *value);
+ }
+ }
+
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
+
+ Heap* heap = isolate->heap();
+
+ int old_instance_size = old_map->instance_size();
+
+ heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
+
+ // Copy (real) inobject properties. If necessary, stop at number_of_fields to
+ // avoid overwriting |one_pointer_filler_map|.
+ int limit = Min(inobject, number_of_fields);
+ for (int i = 0; i < limit; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+ Object value = inobject_props->get(i);
+ // Can't use JSObject::FastPropertyAtPut() because proper map was not set
+ // yet.
+ if (new_map->IsUnboxedDoubleField(index)) {
+ DCHECK(value->IsMutableHeapNumber());
+ // Ensure that all bits of the double value are preserved.
+ object->RawFastDoublePropertyAsBitsAtPut(
+ index, MutableHeapNumber::cast(value)->value_as_bits());
+ if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
+ // Transition from tagged to untagged slot.
+ heap->ClearRecordedSlot(*object,
+ HeapObject::RawField(*object, index.offset()));
+ } else {
+#ifdef DEBUG
+ heap->VerifyClearedSlot(*object,
+ HeapObject::RawField(*object, index.offset()));
+#endif
+ }
+ } else {
+ object->RawFastPropertyAtPut(index, value);
+ }
+ }
+
+ object->SetProperties(*array);
+
+ // Create filler object past the new instance size.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_instance_size - new_instance_size;
+ DCHECK_GE(instance_size_delta, 0);
+
+ if (instance_size_delta > 0) {
+ Address address = object->address();
+ heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
+ ClearRecordedSlots::kYes);
+ }
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ object->synchronized_set_map(*new_map);
+}
+
+void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
+ int expected_additional_properties) {
+ // The global object is always normalized.
+ DCHECK(!object->IsJSGlobalObject());
+ // JSGlobalProxy must never be normalized
+ DCHECK(!object->IsJSGlobalProxy());
+
+ DCHECK_IMPLIES(new_map->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(*new_map));
+
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map(), isolate);
+
+ // Allocate new content.
+ int real_size = map->NumberOfOwnDescriptors();
+ int property_count = real_size;
+ if (expected_additional_properties > 0) {
+ property_count += expected_additional_properties;
+ } else {
+ // Make space for two more properties.
+ property_count += NameDictionary::kInitialCapacity;
+ }
+ Handle<NameDictionary> dictionary =
+ NameDictionary::New(isolate, property_count);
+
+ Handle<DescriptorArray> descs(map->instance_descriptors(), isolate);
+ for (int i = 0; i < real_size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Handle<Name> key(descs->GetKey(i), isolate);
+ Handle<Object> value;
+ if (details.location() == kField) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (details.kind() == kData) {
+ if (object->IsUnboxedDoubleField(index)) {
+ double old_value = object->RawFastDoublePropertyAt(index);
+ value = isolate->factory()->NewHeapNumber(old_value);
+ } else {
+ value = handle(object->RawFastPropertyAt(index), isolate);
+ if (details.representation().IsDouble()) {
+ DCHECK(value->IsMutableHeapNumber());
+ double old_value = Handle<MutableHeapNumber>::cast(value)->value();
+ value = isolate->factory()->NewHeapNumber(old_value);
+ }
+ }
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ value = handle(object->RawFastPropertyAt(index), isolate);
+ }
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ value = handle(descs->GetStrongValue(i), isolate);
+ }
+ DCHECK(!value.is_null());
+ PropertyDetails d(details.kind(), details.attributes(),
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
+ }
+
+ // Copy the next enumeration index from instance descriptor.
+ dictionary->SetNextEnumerationIndex(real_size + 1);
+
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
+
+ Heap* heap = isolate->heap();
+ int old_instance_size = map->instance_size();
+ heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
+
+ // Resize the object in the heap if necessary.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_instance_size - new_instance_size;
+ DCHECK_GE(instance_size_delta, 0);
+
+ if (instance_size_delta > 0) {
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta, ClearRecordedSlots::kYes);
+ }
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ object->synchronized_set_map(*new_map);
+
+ object->SetProperties(*dictionary);
+
+ // Ensure that in-object space of slow-mode object does not contain random
+ // garbage.
+ int inobject_properties = new_map->GetInObjectProperties();
+ if (inobject_properties) {
+ Heap* heap = isolate->heap();
+ heap->ClearRecordedSlotRange(
+ object->address() + map->GetInObjectPropertyOffset(0),
+ object->address() + new_instance_size);
+
+ for (int i = 0; i < inobject_properties; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+ object->RawFastPropertyAtPut(index, Smi::kZero);
+ }
+ }
+
+ isolate->counters()->props_to_dictionary()->Increment();
+
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ StdoutStream os;
+ os << "Object properties have been normalized:\n";
+ object->Print(os);
+ }
+#endif
+}
+
+} // namespace
+
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+ int expected_additional_properties) {
+ if (object->map() == *new_map) return;
+ Handle<Map> old_map(object->map(), object->GetIsolate());
+ NotifyMapChange(old_map, new_map, object->GetIsolate());
+
+ if (old_map->is_dictionary_map()) {
+ // For slow-to-fast migrations JSObject::MigrateSlowToFast()
+ // must be used instead.
+ CHECK(new_map->is_dictionary_map());
+
+ // Slow-to-slow migration is trivial.
+ object->synchronized_set_map(*new_map);
+ } else if (!new_map->is_dictionary_map()) {
+ MigrateFastToFast(object, new_map);
+ if (old_map->is_prototype_map()) {
+ DCHECK(!old_map->is_stable());
+ DCHECK(new_map->is_stable());
+ DCHECK(new_map->owns_descriptors());
+ DCHECK(old_map->owns_descriptors());
+ // Transfer ownership to the new map. Keep the descriptor pointer of the
+ // old map intact because the concurrent marker might be iterating the
+ // object with the old map.
+ old_map->set_owns_descriptors(false);
+ DCHECK(old_map->is_abandoned_prototype_map());
+ // Ensure that no transition was inserted for prototype migrations.
+ DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
+ .NumberOfTransitions());
+ DCHECK(new_map->GetBackPointer()->IsUndefined());
+ DCHECK(object->map() != *old_map);
+ }
+ } else {
+ MigrateFastToSlow(object, new_map, expected_additional_properties);
+ }
+
+ // Careful: Don't allocate here!
+ // For some callers of this method, |object| might be in an inconsistent
+ // state now: the new map might have a new elements_kind, but the object's
+ // elements pointer hasn't been updated yet. Callers will fix this, but in
+ // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
+ // When adding code here, add a DisallowHeapAllocation too.
+}
+
+void JSObject::ForceSetPrototype(Handle<JSObject> object,
+ Handle<Object> proto) {
+ // object.__proto__ = proto;
+ Handle<Map> old_map = Handle<Map>(object->map(), object->GetIsolate());
+ Handle<Map> new_map =
+ Map::Copy(object->GetIsolate(), old_map, "ForceSetPrototype");
+ Map::SetPrototype(object->GetIsolate(), new_map, proto);
+ JSObject::MigrateToMap(object, new_map);
+}
+
+Maybe<bool> JSObject::SetPropertyWithInterceptor(
+ LookupIterator* it, Maybe<ShouldThrow> should_throw, Handle<Object> value) {
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ return SetPropertyWithInterceptorInternal(it, it->GetInterceptor(),
+ should_throw, value);
+}
+
+Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ Handle<Map> map(object->map(), object->GetIsolate());
+ return Map::TransitionElementsTo(object->GetIsolate(), map, to_kind);
+}
+
+// static
+MaybeHandle<NativeContext> JSObject::GetFunctionRealm(Handle<JSObject> object) {
+ DCHECK(object->map()->is_constructor());
+ DCHECK(!object->IsJSFunction());
+ return object->GetCreationContext();
+}
+
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
+ DCHECK(object->map()->GetInObjectProperties() ==
+ map->GetInObjectProperties());
+ ElementsKind obj_kind = object->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
+ if (IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ if (IsDictionaryElementsKind(to_kind)) {
+ NormalizeElements(object);
+ } else {
+ TransitionElementsKind(object, to_kind);
+ }
+ map = Map::ReconfigureElementsKind(object->GetIsolate(), map, to_kind);
+ }
+ int number_of_fields = map->NumberOfFields();
+ int inobject = map->GetInObjectProperties();
+ int unused = map->UnusedPropertyFields();
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ // Allocate mutable double boxes if necessary. It is always necessary if we
+ // have external properties, but is also necessary if we only have inobject
+ // properties but don't unbox double fields.
+ if (!FLAG_unbox_double_fields || external > 0) {
+ Isolate* isolate = object->GetIsolate();
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<FixedArray> storage;
+ if (!FLAG_unbox_double_fields) {
+ storage = isolate->factory()->NewFixedArray(inobject);
+ }
+
+ Handle<PropertyArray> array =
+ isolate->factory()->NewPropertyArray(external);
+
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ Representation representation = details.representation();
+ if (!representation.IsDouble()) continue;
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (map->IsUnboxedDoubleField(index)) continue;
+ auto box = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ if (index.is_inobject()) {
+ storage->set(index.property_index(), *box);
+ } else {
+ array->set(index.outobject_array_index(), *box);
+ }
+ }
+
+ object->SetProperties(*array);
+
+ if (!FLAG_unbox_double_fields) {
+ for (int i = 0; i < inobject; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+ Object value = storage->get(i);
+ object->RawFastPropertyAtPut(index, value);
+ }
+ }
+ }
+ object->synchronized_set_map(*map);
+}
+
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ Handle<Map> original_map(object->map(), object->GetIsolate());
+ Handle<Map> map = Map::Update(object->GetIsolate(), original_map);
+ map->set_is_migration_target(true);
+ MigrateToMap(object, map);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, *map);
+ }
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ object->JSObjectVerify(object->GetIsolate());
+ }
+#endif
+}
+
+// static
+bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ DisallowDeoptimization no_deoptimization(isolate);
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<Map> new_map;
+ if (!Map::TryUpdate(isolate, original_map).ToHandle(&new_map)) {
+ return false;
+ }
+ JSObject::MigrateToMap(object, new_map);
+ if (FLAG_trace_migration && *original_map != object->map()) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ object->JSObjectVerify(isolate);
+ }
+#endif
+ return true;
+}
+
+void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ LookupIterator it(isolate, object, name, object,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+#ifdef DEBUG
+ uint32_t index;
+ DCHECK(!object->IsJSProxy());
+ DCHECK(!name->AsArrayIndex(&index));
+ Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
+ DCHECK(maybe.IsJust());
+ DCHECK(!it.IsFound());
+ DCHECK(object->map()->is_extensible() || name->IsPrivate());
+#endif
+ CHECK(Object::AddDataProperty(&it, value, attributes,
+ Just(ShouldThrow::kThrowOnError),
+ StoreOrigin::kNamed)
+ .IsJust());
+}
+
+void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
+ const char* name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ JSObject::AddProperty(isolate, object,
+ isolate->factory()->InternalizeUtf8String(name), value,
+ attributes);
+}
+
+// Reconfigures a property to a data property with attributes, even if it is not
+// reconfigurable.
+// Requires a LookupIterator that does not look at the prototype chain beyond
+// hidden prototypes.
+MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ AccessorInfoHandling handling) {
+ MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
+ it, value, attributes, Just(ShouldThrow::kThrowOnError), handling));
+ return value;
+}
+
+Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ Maybe<ShouldThrow> should_throw, AccessorInfoHandling handling) {
+ it->UpdateProtector();
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
+
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK:
+ if (!it->HasAccess()) {
+ it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
+ }
+ break;
+
+ // If there's an interceptor, try to store the property with the
+ // interceptor.
+ // In case of success, the attributes will have been reset to the default
+ // attributes of the interceptor, rather than the incoming attributes.
+ //
+ // TODO(verwaest): JSProxy afterwards verify the attributes that the
+ // JSProxy claims it has, and verifies that they are compatible. If not,
+ // they throw. Here we should do the same.
+ case LookupIterator::INTERCEPTOR:
+ if (handling == DONT_FORCE_FIELD) {
+ Maybe<bool> result =
+ JSObject::SetPropertyWithInterceptor(it, should_throw, value);
+ if (result.IsNothing() || result.FromJust()) return result;
+ }
+ break;
+
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> accessors = it->GetAccessors();
+
+ // Special handling for AccessorInfo, which behaves like a data
+ // property.
+ if (accessors->IsAccessorInfo() && handling == DONT_FORCE_FIELD) {
+ PropertyAttributes current_attributes = it->property_attributes();
+ // Ensure the context isn't changed after calling into accessors.
+ AssertNoContextChange ncc(it->isolate());
+
+ // Update the attributes before calling the setter. The setter may
+ // later change the shape of the property.
+ if (current_attributes != attributes) {
+ it->TransitionToAccessorPair(accessors, attributes);
+ }
+
+ return Object::SetPropertyWithAccessor(it, value, should_throw);
+ }
+
+ it->ReconfigureDataProperty(value, attributes);
+ return Just(true);
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return Object::RedefineIncompatibleProperty(
+ it->isolate(), it->GetName(), value, should_throw);
+
+ case LookupIterator::DATA: {
+ // Regular property update if the attributes match.
+ if (it->property_attributes() == attributes) {
+ return Object::SetDataProperty(it, value);
+ }
+
+ // Special case: properties of typed arrays cannot be reconfigured to
+ // non-writable nor to non-enumerable.
+ if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ return Object::RedefineIncompatibleProperty(
+ it->isolate(), it->GetName(), value, should_throw);
+ }
+
+ // Reconfigure the data property if the attributes mismatch.
+ it->ReconfigureDataProperty(value, attributes);
+
+ return Just(true);
+ }
+ }
+ }
+
+ return Object::AddDataProperty(it, value, attributes, should_throw,
+ StoreOrigin::kNamed);
+}
+
+MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK(!value->IsTheHole());
+ LookupIterator it(object, name, object, LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
+}
+
+MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
+}
+
+MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, object, LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
+}
+
+Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
+ LookupIterator* it) {
+ return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
+}
+
+void JSObject::NormalizeProperties(Handle<JSObject> object,
+ PropertyNormalizationMode mode,
+ int expected_additional_properties,
+ const char* reason) {
+ if (!object->HasFastProperties()) return;
+
+ Handle<Map> map(object->map(), object->GetIsolate());
+ Handle<Map> new_map = Map::Normalize(object->GetIsolate(), map, mode, reason);
+
+ MigrateToMap(object, new_map, expected_additional_properties);
+}
+
+void JSObject::MigrateSlowToFast(Handle<JSObject> object,
+ int unused_property_fields,
+ const char* reason) {
+ if (object->HasFastProperties()) return;
+ DCHECK(!object->IsJSGlobalObject());
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ // Make sure we preserve dictionary representation if there are too many
+ // descriptors.
+ int number_of_elements = dictionary->NumberOfElements();
+ if (number_of_elements > kMaxNumberOfDescriptors) return;
+
+ Handle<FixedArray> iteration_order =
+ NameDictionary::IterationIndices(isolate, dictionary);
+
+ int instance_descriptor_length = iteration_order->length();
+ int number_of_fields = 0;
+
+ // Compute the length of the instance descriptor.
+ ReadOnlyRoots roots(isolate);
+ for (int i = 0; i < instance_descriptor_length; i++) {
+ int index = Smi::ToInt(iteration_order->get(i));
+ DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(index)));
+
+ PropertyKind kind = dictionary->DetailsAt(index).kind();
+ if (kind == kData) {
+ if (FLAG_track_constant_fields) {
+ number_of_fields += 1;
+ } else {
+ Object value = dictionary->ValueAt(index);
+ if (!value->IsJSFunction()) {
+ number_of_fields += 1;
+ }
+ }
+ }
+ }
+
+ Handle<Map> old_map(object->map(), isolate);
+
+ int inobject_props = old_map->GetInObjectProperties();
+
+ // Allocate new map.
+ Handle<Map> new_map = Map::CopyDropDescriptors(isolate, old_map);
+ // We should not only set this bit if we need to. We should not retain the
+ // old bit because turning a map into dictionary always sets this bit.
+ new_map->set_may_have_interesting_symbols(new_map->has_named_interceptor() ||
+ new_map->is_access_check_needed());
+ new_map->set_is_dictionary_map(false);
+
+ NotifyMapChange(old_map, new_map, isolate);
+
+ if (instance_descriptor_length == 0) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_LE(unused_property_fields, inobject_props);
+ // Transform the object.
+ new_map->SetInObjectUnusedPropertyFields(inobject_props);
+ object->synchronized_set_map(*new_map);
+ object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
+ // Check that it really works.
+ DCHECK(object->HasFastProperties());
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ }
+ return;
+ }
+
+ // Allocate the instance descriptor.
+ Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
+ isolate, instance_descriptor_length, 0, AllocationType::kOld);
+
+ int number_of_allocated_fields =
+ number_of_fields + unused_property_fields - inobject_props;
+ if (number_of_allocated_fields < 0) {
+ // There is enough inobject space for all fields (including unused).
+ number_of_allocated_fields = 0;
+ unused_property_fields = inobject_props - number_of_fields;
+ }
+
+ // Allocate the property array for the fields.
+ Handle<PropertyArray> fields =
+ factory->NewPropertyArray(number_of_allocated_fields);
+
+ bool is_transitionable_elements_kind =
+ IsTransitionableFastElementsKind(old_map->elements_kind());
+
+ // Fill in the instance descriptor and the fields.
+ int current_offset = 0;
+ for (int i = 0; i < instance_descriptor_length; i++) {
+ int index = Smi::ToInt(iteration_order->get(i));
+ Name k = dictionary->NameAt(index);
+ // Dictionary keys are internalized upon insertion.
+ // TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
+ CHECK(k->IsUniqueName());
+ Handle<Name> key(k, isolate);
+
+ // Properly mark the {new_map} if the {key} is an "interesting symbol".
+ if (key->IsInterestingSymbol()) {
+ new_map->set_may_have_interesting_symbols(true);
+ }
+
+ Object value = dictionary->ValueAt(index);
+
+ PropertyDetails details = dictionary->DetailsAt(index);
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyConstness::kMutable, details.constness());
+
+ Descriptor d;
+ if (details.kind() == kData) {
+ if (!FLAG_track_constant_fields && value->IsJSFunction()) {
+ d = Descriptor::DataConstant(key, handle(value, isolate),
+ details.attributes());
+ } else {
+ // Ensure that we make constant field only when elements kind is not
+ // transitionable.
+ PropertyConstness constness =
+ FLAG_track_constant_fields && !is_transitionable_elements_kind
+ ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
+ d = Descriptor::DataField(
+ key, current_offset, details.attributes(), constness,
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged(),
+ MaybeObjectHandle(FieldType::Any(isolate)));
+ }
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ d = Descriptor::AccessorConstant(key, handle(value, isolate),
+ details.attributes());
+ }
+ details = d.GetDetails();
+ if (details.location() == kField) {
+ if (current_offset < inobject_props) {
+ object->InObjectPropertyAtPut(current_offset, value,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ int offset = current_offset - inobject_props;
+ fields->set(offset, value);
+ }
+ current_offset += details.field_width_in_words();
+ }
+ descriptors->Set(i, &d);
+ }
+ DCHECK(current_offset == number_of_fields);
+
+ descriptors->Sort();
+
+ Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
+ isolate, new_map, descriptors, descriptors->number_of_descriptors());
+
+ DisallowHeapAllocation no_gc;
+ new_map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ if (number_of_allocated_fields == 0) {
+ new_map->SetInObjectUnusedPropertyFields(unused_property_fields);
+ } else {
+ new_map->SetOutOfObjectUnusedPropertyFields(unused_property_fields);
+ }
+
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ }
+ // Transform the object.
+ object->synchronized_set_map(*new_map);
+
+ object->SetProperties(*fields);
+ DCHECK(object->IsJSObject());
+
+ // Check that it really works.
+ DCHECK(object->HasFastProperties());
+}
+
+void JSObject::RequireSlowElements(NumberDictionary dictionary) {
+ if (dictionary->requires_slow_elements()) return;
+ dictionary->set_requires_slow_elements();
+ if (map()->is_prototype_map()) {
+ // If this object is a prototype (the callee will check), invalidate any
+ // prototype chains involving it.
+ InvalidatePrototypeChains(map());
+ }
+}
+
+Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
+ DCHECK(!object->HasFixedTypedArrayElements());
+ Isolate* isolate = object->GetIsolate();
+ bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase elements = object->elements();
+
+ if (is_sloppy_arguments) {
+ elements = SloppyArgumentsElements::cast(elements)->arguments();
+ }
+
+ if (elements->IsNumberDictionary()) {
+ return handle(NumberDictionary::cast(elements), isolate);
+ }
+ }
+
+ DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
+ object->HasFastArgumentsElements() ||
+ object->HasFastStringWrapperElements());
+
+ Handle<NumberDictionary> dictionary =
+ object->GetElementsAccessor()->Normalize(object);
+
+ // Switch to using the dictionary as the backing storage for elements.
+ ElementsKind target_kind = is_sloppy_arguments
+ ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS
+ : object->HasFastStringWrapperElements()
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
+ // Set the new map first to satify the elements type assert in set_elements().
+ JSObject::MigrateToMap(object, new_map);
+
+ if (is_sloppy_arguments) {
+ SloppyArgumentsElements::cast(object->elements())
+ ->set_arguments(*dictionary);
+ } else {
+ object->set_elements(*dictionary);
+ }
+
+ isolate->counters()->elements_to_dictionary()->Increment();
+
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ StdoutStream os;
+ os << "Object elements have been normalized:\n";
+ object->Print(os);
+ }
+#endif
+
+ DCHECK(object->HasDictionaryElements() ||
+ object->HasSlowArgumentsElements() ||
+ object->HasSlowStringWrapperElements());
+ return dictionary;
+}
+
+Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
+ ShouldThrow should_throw) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ Handle<InterceptorInfo> interceptor(it->GetInterceptor());
+ if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Just(should_throw));
+ Handle<Object> result;
+ if (it->IsElement()) {
+ result = args.CallIndexedDeleter(interceptor, it->index());
+ } else {
+ result = args.CallNamedDeleter(interceptor, it->name());
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.is_null()) return Nothing<bool>();
+
+ DCHECK(result->IsBoolean());
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return Just(result->IsTrue(isolate));
+}
+
+Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value,
+ Maybe<ShouldThrow> should_throw) {
+ DCHECK(it->GetReceiver()->IsJSObject());
+ MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
+
+ if (it->IsFound()) {
+ Maybe<PropertyAttributes> attributes = GetPropertyAttributes(it);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ if ((attributes.FromJust() & DONT_DELETE) != 0) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ } else {
+ if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver()))) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kDefineDisallowed, it->GetName()));
+ }
+ }
+
+ RETURN_ON_EXCEPTION_VALUE(it->isolate(),
+ DefineOwnPropertyIgnoreAttributes(it, value, NONE),
+ Nothing<bool>());
+
+ return Just(true);
+}
+
+namespace {
+
+template <typename Dictionary>
+bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
+ ReadOnlyRoots roots,
+ PropertyAttributes level) {
+ DCHECK(level == SEALED || level == FROZEN);
+
+ uint32_t capacity = dict->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ Object key;
+ if (!dict->ToKey(roots, i, &key)) continue;
+ if (key->FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.IsConfigurable()) return false;
+ if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
+ DCHECK(level == SEALED || level == FROZEN);
+ DCHECK(!map->IsCustomElementsReceiverMap());
+ DCHECK(!map->is_dictionary_map());
+
+ DescriptorArray descriptors = map->instance_descriptors();
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ for (int i = 0; i < number_of_own_descriptors; i++) {
+ if (descriptors->GetKey(i)->IsPrivate()) continue;
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.IsConfigurable()) return false;
+ if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
+ DCHECK(!object->map()->IsCustomElementsReceiverMap());
+
+ if (object->HasFastProperties()) {
+ return TestFastPropertiesIntegrityLevel(object->map(), level);
+ }
+
+ return TestDictionaryPropertiesIntegrityLevel(
+ object->property_dictionary(), object->GetReadOnlyRoots(), level);
+}
+
+bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
+ DCHECK(!object->HasSloppyArgumentsElements());
+
+ ElementsKind kind = object->GetElementsKind();
+
+ if (IsDictionaryElementsKind(kind)) {
+ return TestDictionaryPropertiesIntegrityLevel(
+ NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
+ level);
+ }
+ if (IsFixedTypedArrayElementsKind(kind)) {
+ return TestPropertiesIntegrityLevel(object, level);
+ }
+
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ // Only DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS have
+ // PropertyAttributes so just test if empty
+ return accessor->NumberOfElements(object) == 0;
+}
+
+bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
+ DCHECK(!object->map()->IsCustomElementsReceiverMap());
+
+ return !object->map()->is_extensible() &&
+ TestElementsIntegrityLevel(object, level) &&
+ TestPropertiesIntegrityLevel(object, level);
+}
+
+} // namespace
+
+Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
+ IntegrityLevel level) {
+ if (!object->map()->IsCustomElementsReceiverMap() &&
+ !object->HasSloppyArgumentsElements()) {
+ return Just(FastTestIntegrityLevel(*object, level));
+ }
+ return GenericTestIntegrityLevel(Handle<JSReceiver>::cast(object), level);
+}
+
+Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
+ ShouldThrow should_throw) {
+ Isolate* isolate = object->GetIsolate();
+
+ if (!object->HasSloppyArgumentsElements()) {
+ return PreventExtensionsWithTransition<NONE>(object, should_throw);
+ }
+
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
+ }
+
+ if (!object->map()->is_extensible()) return Just(true);
+
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, object);
+ if (iter.IsAtEnd()) return Just(true);
+ DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+ return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter),
+ should_throw);
+ }
+
+ if (object->map()->has_named_interceptor() ||
+ object->map()->has_indexed_interceptor()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCannotPreventExt));
+ }
+
+ if (!object->HasFixedTypedArrayElements()) {
+ // If there are fast elements we normalize.
+ Handle<NumberDictionary> dictionary = NormalizeElements(object);
+ DCHECK(object->HasDictionaryElements() ||
+ object->HasSlowArgumentsElements());
+
+ // Make sure that we never go back to fast case.
+ object->RequireSlowElements(*dictionary);
+ }
+
+ // Do a map transition, other objects with this map may still
+ // be extensible.
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(object->map(), isolate), "PreventExtensions");
+
+ new_map->set_is_extensible(false);
+ JSObject::MigrateToMap(object, new_map);
+ DCHECK(!object->map()->is_extensible());
+
+ return Just(true);
+}
+
+bool JSObject::IsExtensible(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
+ return true;
+ }
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, *object);
+ if (iter.IsAtEnd()) return false;
+ DCHECK(iter.GetCurrent()->IsJSGlobalObject());
+ return iter.GetCurrent<JSObject>()->map()->is_extensible();
+ }
+ return object->map()->is_extensible();
+}
+
+namespace {
+
+template <typename Dictionary>
+void ApplyAttributesToDictionary(Isolate* isolate, ReadOnlyRoots roots,
+ Handle<Dictionary> dictionary,
+ const PropertyAttributes attributes) {
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object k;
+ if (!dictionary->ToKey(roots, i, &k)) continue;
+ if (k->FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ int attrs = attributes;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
+ Object v = dictionary->ValueAt(i);
+ if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
+ }
+ details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
+ dictionary->DetailsAtPut(isolate, i, details);
+ }
+}
+
+} // namespace
+
+template <PropertyAttributes attrs>
+Maybe<bool> JSObject::PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw) {
+ STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
+
+ // Sealing/freezing sloppy arguments or namespace objects should be handled
+ // elsewhere.
+ DCHECK(!object->HasSloppyArgumentsElements());
+ DCHECK_IMPLIES(object->IsJSModuleNamespace(), attrs == NONE);
+
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
+ }
+
+ if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
+
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, object);
+ if (iter.IsAtEnd()) return Just(true);
+ DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+ return PreventExtensionsWithTransition<attrs>(
+ PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
+ }
+
+ if (object->map()->has_named_interceptor() ||
+ object->map()->has_indexed_interceptor()) {
+ MessageTemplate message = MessageTemplate::kNone;
+ switch (attrs) {
+ case NONE:
+ message = MessageTemplate::kCannotPreventExt;
+ break;
+
+ case SEALED:
+ message = MessageTemplate::kCannotSeal;
+ break;
+
+ case FROZEN:
+ message = MessageTemplate::kCannotFreeze;
+ break;
+ }
+ RETURN_FAILURE(isolate, should_throw, NewTypeError(message));
+ }
+
+ Handle<NumberDictionary> new_element_dictionary;
+ if (!object->HasFixedTypedArrayElements() &&
+ !object->HasDictionaryElements() &&
+ !object->HasSlowStringWrapperElements()) {
+ int length = object->IsJSArray()
+ ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
+ : object->elements()->length();
+ new_element_dictionary =
+ length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : object->GetElementsAccessor()->Normalize(object);
+ }
+
+ Handle<Symbol> transition_marker;
+ if (attrs == NONE) {
+ transition_marker = isolate->factory()->nonextensible_symbol();
+ } else if (attrs == SEALED) {
+ transition_marker = isolate->factory()->sealed_symbol();
+ } else {
+ DCHECK(attrs == FROZEN);
+ transition_marker = isolate->factory()->frozen_symbol();
+ }
+
+ Handle<Map> old_map(object->map(), isolate);
+ TransitionsAccessor transitions(isolate, old_map);
+ Map transition = transitions.SearchSpecial(*transition_marker);
+ if (!transition.is_null()) {
+ Handle<Map> transition_map(transition, isolate);
+ DCHECK(transition_map->has_dictionary_elements() ||
+ transition_map->has_fixed_typed_array_elements() ||
+ transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
+ DCHECK(!transition_map->is_extensible());
+ JSObject::MigrateToMap(object, transition_map);
+ } else if (transitions.CanHaveMoreTransitions()) {
+ // Create a new descriptor array with the appropriate property attributes
+ Handle<Map> new_map = Map::CopyForPreventExtensions(
+ isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
+ JSObject::MigrateToMap(object, new_map);
+ } else {
+ DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
+ // Slow path: need to normalize properties for safety
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0,
+ "SlowPreventExtensions");
+
+ // Create a new map, since other objects with this map may be extensible.
+ // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
+ Handle<Map> new_map = Map::Copy(isolate, handle(object->map(), isolate),
+ "SlowCopyForPreventExtensions");
+ new_map->set_is_extensible(false);
+ if (!new_element_dictionary.is_null()) {
+ ElementsKind new_kind =
+ IsStringWrapperElementsKind(old_map->elements_kind())
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
+ new_map->set_elements_kind(new_kind);
+ }
+ JSObject::MigrateToMap(object, new_map);
+
+ if (attrs != NONE) {
+ ReadOnlyRoots roots(isolate);
+ if (object->IsJSGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary(),
+ isolate);
+ ApplyAttributesToDictionary(isolate, roots, dictionary, attrs);
+ }
+ }
+ }
+
+ // Both seal and preventExtensions always go through without modifications to
+ // typed array elements. Freeze works only if there are no actual elements.
+ if (object->HasFixedTypedArrayElements()) {
+ if (attrs == FROZEN &&
+ JSArrayBufferView::cast(*object)->byte_length() > 0) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kCannotFreezeArrayBufferView));
+ return Nothing<bool>();
+ }
+ return Just(true);
+ }
+
+ DCHECK(object->map()->has_dictionary_elements() ||
+ object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
+ if (!new_element_dictionary.is_null()) {
+ object->set_elements(*new_element_dictionary);
+ }
+
+ if (object->elements() !=
+ ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+ Handle<NumberDictionary> dictionary(object->element_dictionary(), isolate);
+ // Make sure we never go back to the fast case
+ object->RequireSlowElements(*dictionary);
+ if (attrs != NONE) {
+ ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate), dictionary,
+ attrs);
+ }
+ }
+
+ return Just(true);
+}
+
+Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ FieldIndex index) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsUnboxedDoubleField(index)) {
+ double value = object->RawFastDoublePropertyAt(index);
+ return isolate->factory()->NewHeapNumber(value);
+ }
+ Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
+ return Object::WrapForRead(isolate, raw_value, representation);
+}
+
+// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
+bool JSObject::HasEnumerableElements() {
+ // TODO(cbruni): cleanup
+ JSObject object = *this;
+ switch (object->GetElementsKind()) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object)->length())
+ : object->elements()->length();
+ return length > 0;
+ }
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
+ FixedArray elements = FixedArray::cast(object->elements());
+ int length = object->IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object)->length())
+ : elements->length();
+ Isolate* isolate = GetIsolate();
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(isolate, i)) return true;
+ }
+ return false;
+ }
+ case HOLEY_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object)->length())
+ : object->elements()->length();
+ // Zero-length arrays would use the empty FixedArray...
+ if (length == 0) return false;
+ // ...so only cast to FixedDoubleArray otherwise.
+ FixedDoubleArray elements = FixedDoubleArray::cast(object->elements());
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) return true;
+ }
+ return false;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ int length = object->elements()->length();
+ return length > 0;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary elements = NumberDictionary::cast(object->elements());
+ return elements->NumberOfEnumerableProperties() > 0;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ // We're approximating non-empty arguments objects here.
+ return true;
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ if (String::cast(JSValue::cast(object)->value())->length() > 0) {
+ return true;
+ }
+ return object->elements()->length() > 0;
+ case NO_ELEMENTS:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ return DefineAccessor(&it, getter, setter, attributes);
+}
+
+MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ Isolate* isolate = it->isolate();
+
+ it->UpdateProtector();
+
+ if (it->state() == LookupIterator::ACCESS_CHECK) {
+ if (!it->HasAccess()) {
+ isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
+ }
+ it->Next();
+ }
+
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
+ // Ignore accessors on typed arrays.
+ if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ return it->factory()->undefined_value();
+ }
+
+ DCHECK(getter->IsCallable() || getter->IsUndefined(isolate) ||
+ getter->IsNull(isolate) || getter->IsFunctionTemplateInfo());
+ DCHECK(setter->IsCallable() || setter->IsUndefined(isolate) ||
+ setter->IsNull(isolate) || setter->IsFunctionTemplateInfo());
+ it->TransitionToAccessorProperty(getter, setter, attributes);
+
+ return isolate->factory()->undefined_value();
+}
+
+MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<AccessorInfo> info,
+ PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+
+ // Duplicate ACCESS_CHECK outside of GetPropertyAttributes for the case that
+ // the FailedAccessCheckCallbackFunction doesn't throw an exception.
+ //
+ // TODO(verwaest): Force throw an exception if the callback doesn't, so we can
+ // remove reliance on default return values.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return it.factory()->undefined_value();
+ }
+ it.Next();
+ }
+
+ // Ignore accessors on typed arrays.
+ if (it.IsElement() && object->HasFixedTypedArrayElements()) {
+ return it.factory()->undefined_value();
+ }
+
+ CHECK(GetPropertyAttributes(&it).IsJust());
+
+ // ES5 forbids turning a property into an accessor if it's not
+ // configurable. See 8.6.1 (Table 5).
+ if (it.IsFound() && !it.IsConfigurable()) {
+ return it.factory()->undefined_value();
+ }
+
+ it.TransitionToAccessorPair(info, attributes);
+
+ return object;
+}
+
+Object JSObject::SlowReverseLookup(Object value) {
+ if (HasFastProperties()) {
+ int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
+ DescriptorArray descs = map()->instance_descriptors();
+ bool value_is_number = value->IsNumber();
+ for (int i = 0; i < number_of_own_descriptors; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
+ FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+ if (IsUnboxedDoubleField(field_index)) {
+ if (value_is_number) {
+ double property = RawFastDoublePropertyAt(field_index);
+ if (property == value->Number()) {
+ return descs->GetKey(i);
+ }
+ }
+ } else {
+ Object property = RawFastPropertyAt(field_index);
+ if (field_index.is_double()) {
+ DCHECK(property->IsMutableHeapNumber());
+ if (value_is_number && property->Number() == value->Number()) {
+ return descs->GetKey(i);
+ }
+ } else if (property == value) {
+ return descs->GetKey(i);
+ }
+ }
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ if (descs->GetStrongValue(i) == value) {
+ return descs->GetKey(i);
+ }
+ }
+ }
+ }
+ return GetReadOnlyRoots().undefined_value();
+ } else if (IsJSGlobalObject()) {
+ return JSGlobalObject::cast(*this)->global_dictionary()->SlowReverseLookup(
+ value);
+ } else {
+ return property_dictionary()->SlowReverseLookup(value);
+ }
+}
+
+void JSObject::PrototypeRegistryCompactionCallback(HeapObject value,
+ int old_index,
+ int new_index) {
+ DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
+ Map map = Map::cast(value);
+ DCHECK(map->prototype_info()->IsPrototypeInfo());
+ PrototypeInfo proto_info = PrototypeInfo::cast(map->prototype_info());
+ DCHECK_EQ(old_index, proto_info->registry_slot());
+ proto_info->set_registry_slot(new_index);
+}
+
+// static
+void JSObject::MakePrototypesFast(Handle<Object> receiver,
+ WhereToStart where_to_start,
+ Isolate* isolate) {
+ if (!receiver->IsJSReceiver()) return;
+ for (PrototypeIterator iter(isolate, Handle<JSReceiver>::cast(receiver),
+ where_to_start);
+ !iter.IsAtEnd(); iter.Advance()) {
+ Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+ if (!current->IsJSObject()) return;
+ Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
+ Map current_map = current_obj->map();
+ if (current_map->is_prototype_map()) {
+ // If the map is already marked as should be fast, we're done. Its
+ // prototypes will have been marked already as well.
+ if (current_map->should_be_fast_prototype_map()) return;
+ Handle<Map> map(current_map, isolate);
+ Map::SetShouldBeFastPrototypeMap(map, true, isolate);
+ JSObject::OptimizeAsPrototype(current_obj);
+ }
+ }
+}
+
+static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
+ DisallowHeapAllocation no_gc;
+ if (!object->HasFastProperties()) return false;
+ if (object->IsJSGlobalProxy()) return false;
+ if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
+ return !object->map()->is_prototype_map() ||
+ !object->map()->should_be_fast_prototype_map();
+}
+
+// static
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
+ bool enable_setup_mode) {
+ if (object->IsJSGlobalObject()) return;
+ if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
+ // First normalize to ensure all JSFunctions are DATA_CONSTANT.
+ JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+ "NormalizeAsPrototype");
+ }
+ if (object->map()->is_prototype_map()) {
+ if (object->map()->should_be_fast_prototype_map() &&
+ !object->HasFastProperties()) {
+ JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
+ }
+ } else {
+ Handle<Map> new_map = Map::Copy(object->GetIsolate(),
+ handle(object->map(), object->GetIsolate()),
+ "CopyAsPrototype");
+ JSObject::MigrateToMap(object, new_map);
+ object->map()->set_is_prototype_map(true);
+
+ // Replace the pointer to the exact constructor with the Object function
+ // from the same context if undetectable from JS. This is to avoid keeping
+ // memory alive unnecessarily.
+ Object maybe_constructor = object->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ if (!constructor->shared()->IsApiFunction()) {
+ Context context = constructor->context()->native_context();
+ JSFunction object_function = context->object_function();
+ object->map()->SetConstructor(object_function);
+ }
+ }
+ }
+}
+
+// static
+void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
+ if (!object->map()->is_prototype_map()) return;
+ if (!object->map()->should_be_fast_prototype_map()) return;
+ OptimizeAsPrototype(object);
+}
+
+// static
+void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
+ // Contract: In line with InvalidatePrototypeChains()'s requirements,
+ // leaf maps don't need to register as users, only prototypes do.
+ DCHECK(user->is_prototype_map());
+
+ Handle<Map> current_user = user;
+ Handle<PrototypeInfo> current_user_info =
+ Map::GetOrCreatePrototypeInfo(user, isolate);
+ for (PrototypeIterator iter(isolate, user); !iter.IsAtEnd(); iter.Advance()) {
+ // Walk up the prototype chain as far as links haven't been registered yet.
+ if (current_user_info->registry_slot() != PrototypeInfo::UNREGISTERED) {
+ break;
+ }
+ Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
+ // Proxies on the prototype chain are not supported. They make it
+ // impossible to make any assumptions about the prototype chain anyway.
+ if (maybe_proto->IsJSProxy()) return;
+ Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
+ Handle<PrototypeInfo> proto_info =
+ Map::GetOrCreatePrototypeInfo(proto, isolate);
+ Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
+ Handle<WeakArrayList> registry =
+ maybe_registry->IsSmi()
+ ? handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(),
+ isolate)
+ : Handle<WeakArrayList>::cast(maybe_registry);
+ int slot = 0;
+ Handle<WeakArrayList> new_array =
+ PrototypeUsers::Add(isolate, registry, current_user, &slot);
+ current_user_info->set_registry_slot(slot);
+ if (!maybe_registry.is_identical_to(new_array)) {
+ proto_info->set_prototype_users(*new_array);
+ }
+ if (FLAG_trace_prototype_users) {
+ PrintF("Registering %p as a user of prototype %p (map=%p).\n",
+ reinterpret_cast<void*>(current_user->ptr()),
+ reinterpret_cast<void*>(proto->ptr()),
+ reinterpret_cast<void*>(proto->map()->ptr()));
+ }
+
+ current_user = handle(proto->map(), isolate);
+ current_user_info = proto_info;
+ }
+}
+
+// Can be called regardless of whether |user| was actually registered with
+// |prototype|. Returns true when there was a registration.
+// static
+bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
+ DCHECK(user->is_prototype_map());
+ // If it doesn't have a PrototypeInfo, it was never registered.
+ if (!user->prototype_info()->IsPrototypeInfo()) return false;
+ // If it had no prototype before, see if it had users that might expect
+ // registration.
+ if (!user->prototype()->IsJSObject()) {
+ Object users =
+ PrototypeInfo::cast(user->prototype_info())->prototype_users();
+ return users->IsWeakArrayList();
+ }
+ Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
+ Handle<PrototypeInfo> user_info =
+ Map::GetOrCreatePrototypeInfo(user, isolate);
+ int slot = user_info->registry_slot();
+ if (slot == PrototypeInfo::UNREGISTERED) return false;
+ DCHECK(prototype->map()->is_prototype_map());
+ Object maybe_proto_info = prototype->map()->prototype_info();
+ // User knows its registry slot, prototype info and user registry must exist.
+ DCHECK(maybe_proto_info->IsPrototypeInfo());
+ Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
+ isolate);
+ Handle<WeakArrayList> prototype_users(
+ WeakArrayList::cast(proto_info->prototype_users()), isolate);
+ DCHECK_EQ(prototype_users->Get(slot), HeapObjectReference::Weak(*user));
+ PrototypeUsers::MarkSlotEmpty(*prototype_users, slot);
+ if (FLAG_trace_prototype_users) {
+ PrintF("Unregistering %p as a user of prototype %p.\n",
+ reinterpret_cast<void*>(user->ptr()),
+ reinterpret_cast<void*>(prototype->ptr()));
+ }
+ return true;
+}
+
+namespace {
+
+// This function must be kept in sync with
+// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
+// before jumping here.
+void InvalidateOnePrototypeValidityCellInternal(Map map) {
+ DCHECK(map->is_prototype_map());
+ if (FLAG_trace_prototype_users) {
+ PrintF("Invalidating prototype map %p 's cell\n",
+ reinterpret_cast<void*>(map.ptr()));
+ }
+ Object maybe_cell = map->prototype_validity_cell();
+ if (maybe_cell->IsCell()) {
+ // Just set the value; the cell will be replaced lazily.
+ Cell cell = Cell::cast(maybe_cell);
+ cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
+ }
+}
+
+void InvalidatePrototypeChainsInternal(Map map) {
+ InvalidateOnePrototypeValidityCellInternal(map);
+
+ Object maybe_proto_info = map->prototype_info();
+ if (!maybe_proto_info->IsPrototypeInfo()) return;
+ PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
+ if (!proto_info->prototype_users()->IsWeakArrayList()) {
+ return;
+ }
+ WeakArrayList prototype_users =
+ WeakArrayList::cast(proto_info->prototype_users());
+ // For now, only maps register themselves as users.
+ for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
+ ++i) {
+ HeapObject heap_object;
+ if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ heap_object->IsMap()) {
+ // Walk the prototype chain (backwards, towards leaf objects) if
+ // necessary.
+ InvalidatePrototypeChainsInternal(Map::cast(heap_object));
+ }
+ }
+}
+
+} // namespace
+
+// static
+Map JSObject::InvalidatePrototypeChains(Map map) {
+ DisallowHeapAllocation no_gc;
+ InvalidatePrototypeChainsInternal(map);
+ return map;
+}
+
+// We also invalidate global objects validity cell when a new lexical
+// environment variable is added. This is necessary to ensure that
+// Load/StoreGlobalIC handlers that load/store from global object's prototype
+// get properly invalidated.
+// Note, that the normal Load/StoreICs that load/store through the global object
+// in the prototype chain are not affected by appearance of a new lexical
+// variable and therefore we don't propagate invalidation down.
+// static
+void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
+ DisallowHeapAllocation no_gc;
+ InvalidateOnePrototypeValidityCellInternal(global->map());
+}
+
+Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = object->GetIsolate();
+
+#ifdef DEBUG
+ int size = object->Size();
+#endif
+
+ if (from_javascript) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context(), isolate), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
+ }
+ } else {
+ DCHECK(!object->IsAccessCheckNeeded());
+ }
+
+ // Silently ignore the change if value is not a JSObject or null.
+ // SpiderMonkey behaves this way.
+ if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
+
+ bool all_extensible = object->map()->is_extensible();
+ Handle<JSObject> real_receiver = object;
+ if (from_javascript) {
+ // Find the first object in the chain whose prototype object is not
+ // hidden.
+ PrototypeIterator iter(isolate, real_receiver, kStartAtPrototype,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ while (!iter.IsAtEnd()) {
+ // Casting to JSObject is fine because hidden prototypes are never
+ // JSProxies.
+ real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
+ iter.Advance();
+ all_extensible = all_extensible && real_receiver->map()->is_extensible();
+ }
+ }
+ Handle<Map> map(real_receiver->map(), isolate);
+
+ // Nothing to do if prototype is already set.
+ if (map->prototype() == *value) return Just(true);
+
+ bool immutable_proto = map->is_immutable_proto();
+ if (immutable_proto) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kImmutablePrototypeSet, object));
+ }
+
+ // From 8.6.2 Object Internal Methods
+ // ...
+ // In addition, if [[Extensible]] is false the value of the [[Class]] and
+ // [[Prototype]] internal properties of the object may not be modified.
+ // ...
+ // Implementation specific extensions that modify [[Class]], [[Prototype]]
+ // or [[Extensible]] must not violate the invariants defined in the preceding
+ // paragraph.
+ if (!all_extensible) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNonExtensibleProto, object));
+ }
+
+ // Before we can set the prototype we need to be sure prototype cycles are
+ // prevented. It is sufficient to validate that the receiver is not in the
+ // new prototype chain.
+ if (value->IsJSReceiver()) {
+ for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
+ kStartAtReceiver);
+ !iter.IsAtEnd(); iter.Advance()) {
+ if (iter.GetCurrent<JSReceiver>() == *object) {
+ // Cycle detected.
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCyclicProto));
+ }
+ }
+ }
+
+ // Set the new prototype of the object.
+
+ isolate->UpdateNoElementsProtectorOnSetPrototype(real_receiver);
+
+ Handle<Map> new_map = Map::TransitionToPrototype(isolate, map, value);
+ DCHECK(new_map->prototype() == *value);
+ JSObject::MigrateToMap(real_receiver, new_map);
+
+ DCHECK(size == object->Size());
+ return Just(true);
+}
+
+// static
+void JSObject::SetImmutableProto(Handle<JSObject> object) {
+ DCHECK(!object->IsAccessCheckNeeded()); // Never called from JS
+ Handle<Map> map(object->map(), object->GetIsolate());
+
+ // Nothing to do if prototype is already set.
+ if (map->is_immutable_proto()) return;
+
+ Handle<Map> new_map =
+ Map::TransitionToImmutableProto(object->GetIsolate(), map);
+ object->synchronized_set_map(*new_map);
+}
+
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Arguments* args, uint32_t first_arg,
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
+ // Elements in |Arguments| are ordered backwards (because they're on the
+ // stack), but the method that's called here iterates over them in forward
+ // direction.
+ return EnsureCanContainElements(
+ object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
+}
+
+ElementsAccessor* JSObject::GetElementsAccessor() {
+ return ElementsAccessor::ForKind(GetElementsKind());
+}
+
+void JSObject::ValidateElements(JSObject object) {
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ object->GetElementsAccessor()->Validate(object);
+ }
+#endif
+}
+
+bool JSObject::WouldConvertToSlowElements(uint32_t index) {
+ if (!HasFastElements()) return false;
+ uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t new_capacity;
+ return ShouldConvertToSlowElements(*this, capacity, index, &new_capacity);
+}
+
+static bool ShouldConvertToFastElements(JSObject object,
+ NumberDictionary dictionary,
+ uint32_t index,
+ uint32_t* new_capacity) {
+ // If properties with non-standard attributes or accessors were added, we
+ // cannot go back to fast elements.
+ if (dictionary->requires_slow_elements()) return false;
+
+ // Adding a property with this index will require slow elements.
+ if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
+
+ if (object->IsJSArray()) {
+ Object length = JSArray::cast(object)->length();
+ if (!length->IsSmi()) return false;
+ *new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
+ } else if (object->IsJSSloppyArgumentsObject()) {
+ return false;
+ } else {
+ *new_capacity = dictionary->max_number_key() + 1;
+ }
+ *new_capacity = Max(index + 1, *new_capacity);
+
+ uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+ NumberDictionary::kEntrySize;
+
+ // Turn fast if the dictionary only saves 50% space.
+ return 2 * dictionary_size >= *new_capacity;
+}
+
+static ElementsKind BestFittingFastElementsKind(JSObject object) {
+ if (!object->map()->CanHaveFastTransitionableElementsKind()) {
+ return HOLEY_ELEMENTS;
+ }
+ if (object->HasSloppyArgumentsElements()) {
+ return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+ }
+ if (object->HasStringWrapperElements()) {
+ return FAST_STRING_WRAPPER_ELEMENTS;
+ }
+ DCHECK(object->HasDictionaryElements());
+ NumberDictionary dictionary = object->element_dictionary();
+ ElementsKind kind = HOLEY_SMI_ELEMENTS;
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ Object value = dictionary->ValueAt(i);
+ if (!value->IsNumber()) return HOLEY_ELEMENTS;
+ if (!value->IsSmi()) {
+ if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
+ kind = HOLEY_DOUBLE_ELEMENTS;
+ }
+ }
+ }
+ return kind;
+}
+
+// static
+void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK(object->map()->is_extensible());
+
+ Isolate* isolate = object->GetIsolate();
+
+ uint32_t old_length = 0;
+ uint32_t new_capacity = 0;
+
+ if (object->IsJSArray()) {
+ CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
+ }
+
+ ElementsKind kind = object->GetElementsKind();
+ FixedArrayBase elements = object->elements();
+ ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
+ if (IsSloppyArgumentsElementsKind(kind)) {
+ elements = SloppyArgumentsElements::cast(elements)->arguments();
+ dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+ } else if (IsStringWrapperElementsKind(kind)) {
+ dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
+ }
+
+ if (attributes != NONE) {
+ kind = dictionary_kind;
+ } else if (elements->IsNumberDictionary()) {
+ kind = ShouldConvertToFastElements(
+ *object, NumberDictionary::cast(elements), index, &new_capacity)
+ ? BestFittingFastElementsKind(*object)
+ : dictionary_kind;
+ } else if (ShouldConvertToSlowElements(
+ *object, static_cast<uint32_t>(elements->length()), index,
+ &new_capacity)) {
+ kind = dictionary_kind;
+ }
+
+ ElementsKind to = value->OptimalElementsKind();
+ if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
+ to = GetHoleyElementsKind(to);
+ kind = GetHoleyElementsKind(kind);
+ }
+ to = GetMoreGeneralElementsKind(kind, to);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
+ accessor->Add(object, index, value, attributes, new_capacity);
+
+ if (object->IsJSArray() && index >= old_length) {
+ Handle<Object> new_length =
+ isolate->factory()->NewNumberFromUint(index + 1);
+ JSArray::cast(*object)->set_length(*new_length);
+ }
+}
+
+template <AllocationSiteUpdateMode update_or_check>
+bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ if (!object->IsJSArray()) return false;
+
+ if (!Heap::InYoungGeneration(*object)) return false;
+
+ if (Heap::IsLargeObject(*object)) return false;
+
+ Handle<AllocationSite> site;
+ {
+ DisallowHeapAllocation no_allocation;
+
+ Heap* heap = object->GetHeap();
+ AllocationMemento memento =
+ heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
+ if (memento.is_null()) return false;
+
+ // Walk through to the Allocation Site
+ site = handle(memento->GetAllocationSite(), heap->isolate());
+ }
+ return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
+ to_kind);
+}
+
+template bool
+JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+ Handle<JSObject> object, ElementsKind to_kind);
+
+template bool JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kUpdate>(
+ Handle<JSObject> object, ElementsKind to_kind);
+
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = object->GetElementsKind();
+
+ if (IsHoleyElementsKind(from_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+
+ if (from_kind == to_kind) return;
+
+ // This method should never be called for any other case.
+ DCHECK(IsFastElementsKind(from_kind));
+ DCHECK(IsFastElementsKind(to_kind));
+ DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
+
+ UpdateAllocationSite(object, to_kind);
+ if (object->elements() == object->GetReadOnlyRoots().empty_fixed_array() ||
+ IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
+ // No change is needed to the elements() buffer, the transition
+ // only requires a map change.
+ Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
+ MigrateToMap(object, new_map);
+ if (FLAG_trace_elements_transitions) {
+ Handle<FixedArrayBase> elms(object->elements(), object->GetIsolate());
+ PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
+ }
+ } else {
+ DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
+ (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
+ uint32_t c = static_cast<uint32_t>(object->elements()->length());
+ ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
+ }
+}
+
+template <typename BackingStore>
+static int HoleyElementsUsage(JSObject object, BackingStore store) {
+ Isolate* isolate = object->GetIsolate();
+ int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
+ : store->length();
+ int used = 0;
+ for (int i = 0; i < limit; ++i) {
+ if (!store->is_the_hole(isolate, i)) ++used;
+ }
+ return used;
+}
+
+int JSObject::GetFastElementsUsage() {
+ FixedArrayBase store = elements();
+ switch (GetElementsKind()) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case PACKED_ELEMENTS:
+ return IsJSArray() ? Smi::ToInt(JSArray::cast(*this)->length())
+ : store->length();
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ store = SloppyArgumentsElements::cast(store)->arguments();
+ V8_FALLTHROUGH;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ return HoleyElementsUsage(*this, FixedArray::cast(store));
+ case HOLEY_DOUBLE_ELEMENTS:
+ if (elements()->length() == 0) return 0;
+ return HoleyElementsUsage(*this, FixedDoubleArray::cast(store));
+
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NO_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
+ bool* done) {
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ return GetPropertyWithInterceptorInternal(it, it->GetInterceptor(), done);
+}
+
+Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ return HasProperty(&it);
+}
+
+Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, object,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ return HasProperty(&it);
+}
+
+Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+ return maybe_result.IsJust() ? Just(it.state() == LookupIterator::ACCESSOR)
+ : Nothing<bool>();
+}
+
+bool JSObject::IsApiWrapper() {
+ // These object types can carry information relevant for embedders. The
+ // *_API_* types are generated through templates which can have embedder
+ // fields. The other types have their embedder fields added at compile time.
+ auto instance_type = map()->instance_type();
+ return instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_ARRAY_BUFFER_TYPE ||
+ instance_type == JS_DATA_VIEW_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE ||
+ instance_type == JS_TYPED_ARRAY_TYPE;
+}
+
+bool JSObject::IsDroppableApiWrapper() {
+ auto instance_type = map()->instance_type();
+ return instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+}
+
+// static
+MaybeHandle<NativeContext> JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return JSReceiver::GetFunctionRealm(
+ handle(function->bound_target_function(), function->GetIsolate()));
+}
+
+// static
+MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
+ Handle<JSBoundFunction> function) {
+ Handle<String> prefix = isolate->factory()->bound__string();
+ Handle<String> target_name = prefix;
+ Factory* factory = isolate->factory();
+ // Concatenate the "bound " up to the last non-bound target.
+ while (function->bound_target_function()->IsJSBoundFunction()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, target_name,
+ factory->NewConsString(prefix, target_name),
+ String);
+ function = handle(JSBoundFunction::cast(function->bound_target_function()),
+ isolate);
+ }
+ if (function->bound_target_function()->IsJSFunction()) {
+ Handle<JSFunction> target(
+ JSFunction::cast(function->bound_target_function()), isolate);
+ Handle<Object> name = JSFunction::GetName(isolate, target);
+ if (!name->IsString()) return target_name;
+ return factory->NewConsString(target_name, Handle<String>::cast(name));
+ }
+ // This will omit the proper target name for bound JSProxies.
+ return target_name;
+}
+
+// static
+Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
+ Handle<JSBoundFunction> function) {
+ int nof_bound_arguments = function->bound_arguments()->length();
+ while (function->bound_target_function()->IsJSBoundFunction()) {
+ function = handle(JSBoundFunction::cast(function->bound_target_function()),
+ isolate);
+ // Make sure we never overflow {nof_bound_arguments}, the number of
+ // arguments of a function is strictly limited by the max length of an
+ // JSAarray, Smi::kMaxValue is thus a reasonably good overestimate.
+ int length = function->bound_arguments()->length();
+ if (V8_LIKELY(Smi::kMaxValue - nof_bound_arguments > length)) {
+ nof_bound_arguments += length;
+ } else {
+ nof_bound_arguments = Smi::kMaxValue;
+ }
+ }
+ // All non JSFunction targets get a direct property and don't use this
+ // accessor.
+ Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+ isolate);
+ Maybe<int> target_length = JSFunction::GetLength(isolate, target);
+ if (target_length.IsNothing()) return target_length;
+
+ int length = Max(0, target_length.FromJust() - nof_bound_arguments);
+ return Just(length);
+}
+
+// static
+Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ return isolate->factory()->function_native_code_string();
+}
+
+// static
+Handle<Object> JSFunction::GetName(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (function->shared()->name_should_print_as_anonymous()) {
+ return isolate->factory()->anonymous_string();
+ }
+ return handle(function->shared()->Name(), isolate);
+}
+
+// static
+Maybe<int> JSFunction::GetLength(Isolate* isolate,
+ Handle<JSFunction> function) {
+ int length = 0;
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ if (is_compiled_scope.is_compiled()) {
+ length = function->shared()->GetLength();
+ } else {
+ // If the function isn't compiled yet, the length is not computed
+ // correctly yet. Compile it now and return the right length.
+ if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
+ length = function->shared()->GetLength();
+ }
+ if (isolate->has_pending_exception()) return Nothing<int>();
+ }
+ DCHECK_GE(length, 0);
+ return Just(length);
+}
+
+// static
+Handle<NativeContext> JSFunction::GetFunctionRealm(
+ Handle<JSFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return handle(function->context()->native_context(), function->GetIsolate());
+}
+
+void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
+ Isolate* isolate = GetIsolate();
+ if (!isolate->concurrent_recompilation_enabled() ||
+ isolate->bootstrapper()->IsActive()) {
+ mode = ConcurrencyMode::kNotConcurrent;
+ }
+
+ DCHECK(!is_compiled() || IsInterpreted());
+ DCHECK(shared()->IsInterpreted());
+ DCHECK(!IsOptimized());
+ DCHECK(!HasOptimizedCode());
+ DCHECK(shared()->allows_lazy_compilation() ||
+ !shared()->optimization_disabled());
+
+ if (mode == ConcurrencyMode::kConcurrent) {
+ if (IsInOptimizationQueue()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Not marking ");
+ ShortPrint();
+ PrintF(" -- already in optimization queue.\n");
+ }
+ return;
+ }
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Marking ");
+ ShortPrint();
+ PrintF(" for concurrent recompilation.\n");
+ }
+ }
+
+ SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
+ ? OptimizationMarker::kCompileOptimizedConcurrent
+ : OptimizationMarker::kCompileOptimized);
+}
+
+// static
+void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ DCHECK(function->shared()->is_compiled());
+ DCHECK(FLAG_lite_mode || function->shared()->HasFeedbackMetadata());
+ if (!function->has_feedback_vector() &&
+ function->shared()->HasFeedbackMetadata()) {
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (!shared->HasAsmWasmData()) {
+ DCHECK(function->shared()->HasBytecodeArray());
+ Handle<FeedbackVector> feedback_vector =
+ FeedbackVector::New(isolate, shared);
+ if (function->raw_feedback_cell() ==
+ isolate->heap()->many_closures_cell()) {
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->NewOneClosureCell(feedback_vector);
+ function->set_raw_feedback_cell(*feedback_cell);
+ } else {
+ function->raw_feedback_cell()->set_value(*feedback_vector);
+ }
+ }
+ }
+}
+
+namespace {
+
+void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
+ Handle<JSReceiver> value) {
+ // Now some logic for the maps of the objects that are created by using this
+ // function as a constructor.
+ if (function->has_initial_map()) {
+ // If the function has allocated the initial map replace it with a
+ // copy containing the new prototype. Also complete any in-object
+ // slack tracking that is in progress at this point because it is
+ // still tracking the old copy.
+ function->CompleteInobjectSlackTrackingIfActive();
+
+ Handle<Map> initial_map(function->initial_map(), isolate);
+
+ if (!isolate->bootstrapper()->IsActive() &&
+ initial_map->instance_type() == JS_OBJECT_TYPE) {
+ // Put the value in the initial map field until an initial map is needed.
+ // At that point, a new initial map is created and the prototype is put
+ // into the initial map where it belongs.
+ function->set_prototype_or_initial_map(*value);
+ } else {
+ Handle<Map> new_map =
+ Map::Copy(isolate, initial_map, "SetInstancePrototype");
+ JSFunction::SetInitialMap(function, new_map, value);
+
+ // If the function is used as the global Array function, cache the
+ // updated initial maps (and transitioned versions) in the native context.
+ Handle<Context> native_context(function->context()->native_context(),
+ isolate);
+ Handle<Object> array_function(
+ native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
+ if (array_function->IsJSFunction() &&
+ *function == JSFunction::cast(*array_function)) {
+ CacheInitialJSArrayMaps(native_context, new_map);
+ }
+ }
+
+ // Deoptimize all code that embeds the previous initial map.
+ initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kInitialMapChangedGroup);
+ } else {
+ // Put the value in the initial map field until an initial map is
+ // needed. At that point, a new initial map is created and the
+ // prototype is put into the initial map where it belongs.
+ function->set_prototype_or_initial_map(*value);
+ if (value->IsJSObject()) {
+ // Optimize as prototype to detach it from its transition tree.
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
+ }
+ }
+}
+
+} // anonymous namespace
+
+void JSFunction::SetPrototype(Handle<JSFunction> function,
+ Handle<Object> value) {
+ DCHECK(function->IsConstructor() ||
+ IsGeneratorFunction(function->shared()->kind()));
+ Isolate* isolate = function->GetIsolate();
+ Handle<JSReceiver> construct_prototype;
+
+ // If the value is not a JSReceiver, store the value in the map's
+ // constructor field so it can be accessed. Also, set the prototype
+ // used for constructing objects to the original object prototype.
+ // See ECMA-262 13.2.2.
+ if (!value->IsJSReceiver()) {
+ // Copy the map so this does not affect unrelated functions.
+ // Remove map transitions because they point to maps with a
+ // different prototype.
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
+
+ JSObject::MigrateToMap(function, new_map);
+ new_map->SetConstructor(*value);
+ new_map->set_has_non_instance_prototype(true);
+
+ FunctionKind kind = function->shared()->kind();
+ Handle<Context> native_context(function->context()->native_context(),
+ isolate);
+
+ construct_prototype = Handle<JSReceiver>(
+ IsGeneratorFunction(kind)
+ ? IsAsyncFunction(kind)
+ ? native_context->initial_async_generator_prototype()
+ : native_context->initial_generator_prototype()
+ : native_context->initial_object_prototype(),
+ isolate);
+ } else {
+ construct_prototype = Handle<JSReceiver>::cast(value);
+ function->map()->set_has_non_instance_prototype(false);
+ }
+
+ SetInstancePrototype(isolate, function, construct_prototype);
+}
+
+void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
+ Handle<Object> prototype) {
+ if (map->prototype() != *prototype)
+ Map::SetPrototype(function->GetIsolate(), map, prototype);
+ function->set_prototype_or_initial_map(*map);
+ map->SetConstructor(*function);
+ if (FLAG_trace_maps) {
+ LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
+ function->shared()->DebugName()));
+ }
+}
+
+void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ DCHECK(function->has_prototype_slot());
+ DCHECK(function->IsConstructor() ||
+ IsResumableFunction(function->shared()->kind()));
+ if (function->has_initial_map()) return;
+ Isolate* isolate = function->GetIsolate();
+
+ // First create a new map with the size and number of in-object properties
+ // suggested by the function.
+ InstanceType instance_type;
+ if (IsResumableFunction(function->shared()->kind())) {
+ instance_type = IsAsyncGeneratorFunction(function->shared()->kind())
+ ? JS_ASYNC_GENERATOR_OBJECT_TYPE
+ : JS_GENERATOR_OBJECT_TYPE;
+ } else {
+ instance_type = JS_OBJECT_TYPE;
+ }
+
+ int instance_size;
+ int inobject_properties;
+ int expected_nof_properties =
+ CalculateExpectedNofProperties(isolate, function);
+ CalculateInstanceSizeHelper(instance_type, false, 0, expected_nof_properties,
+ &instance_size, &inobject_properties);
+
+ Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size,
+ TERMINAL_FAST_ELEMENTS_KIND,
+ inobject_properties);
+
+ // Fetch or allocate prototype.
+ Handle<Object> prototype;
+ if (function->has_instance_prototype()) {
+ prototype = handle(function->instance_prototype(), isolate);
+ } else {
+ prototype = isolate->factory()->NewFunctionPrototype(function);
+ }
+ DCHECK(map->has_fast_object_elements());
+
+ // Finally link initial map and constructor function.
+ DCHECK(prototype->IsJSReceiver());
+ JSFunction::SetInitialMap(function, map, prototype);
+ map->StartInobjectSlackTracking();
+}
+
+#ifdef DEBUG
+namespace {
+
+bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
+ switch (instance_type) {
+ case JS_API_OBJECT_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_DATE_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+#endif
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ case JS_MAP_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_SET_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case WASM_GLOBAL_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
+ return true;
+
+ case BIGINT_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case CELL_TYPE:
+ case CODE_TYPE:
+ case FILLER_TYPE:
+ case FIXED_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case FEEDBACK_METADATA_TYPE:
+ case FOREIGN_TYPE:
+ case FREE_SPACE_TYPE:
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_PROXY_TYPE:
+ case MAP_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case ODDBALL_TYPE:
+ case PROPERTY_CELL_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+ case SYMBOL_TYPE:
+ case ALLOCATION_SITE_TYPE:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+#undef TYPED_ARRAY_CASE
+
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ // We must not end up here for these instance types at all.
+ UNREACHABLE();
+ // Fall through.
+ default:
+ return false;
+ }
+}
+
+} // namespace
+#endif
+
+namespace {
+
+bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
+ Handle<JSFunction> constructor,
+ Handle<Map> constructor_initial_map) {
+ // Use the default intrinsic prototype instead.
+ if (!new_target->has_prototype_slot()) return false;
+ // Check that |function|'s initial map still in sync with the |constructor|,
+ // otherwise we must create a new initial map for |function|.
+ if (new_target->has_initial_map() &&
+ new_target->initial_map()->GetConstructor() == *constructor) {
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ return true;
+ }
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ DCHECK(CanSubclassHaveInobjectProperties(instance_type));
+ // Create a new map with the size and number of in-object properties
+ // suggested by |function|.
+
+ // Link initial map and constructor function if the new.target is actually a
+ // subclass constructor.
+ if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
+
+ int instance_size;
+ int in_object_properties;
+ int embedder_fields =
+ JSObject::GetEmbedderFieldCount(*constructor_initial_map);
+ int expected_nof_properties =
+ JSFunction::CalculateExpectedNofProperties(isolate, new_target);
+ JSFunction::CalculateInstanceSizeHelper(
+ instance_type, true, embedder_fields, expected_nof_properties,
+ &instance_size, &in_object_properties);
+
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->UnusedPropertyFields();
+ CHECK_LE(constructor_initial_map->UsedInstanceSize(), instance_size);
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map =
+ Map::CopyInitialMap(isolate, constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+ map->set_new_target_is_base(false);
+ Handle<Object> prototype(new_target->instance_prototype(), isolate);
+ JSFunction::SetInitialMap(new_target, map, prototype);
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ map->SetConstructor(*constructor);
+ map->set_construction_counter(Map::kNoSlackTracking);
+ map->StartInobjectSlackTracking();
+ return true;
+}
+
+} // namespace
+
+// static
+MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target) {
+ EnsureHasInitialMap(constructor);
+
+ Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
+ if (*new_target == *constructor) return constructor_initial_map;
+
+ Handle<Map> result_map;
+ // Fast case, new.target is a subclass of constructor. The map is cacheable
+ // (and may already have been cached). new.target.prototype is guaranteed to
+ // be a JSReceiver.
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+ if (FastInitializeDerivedMap(isolate, function, constructor,
+ constructor_initial_map)) {
+ return handle(function->initial_map(), isolate);
+ }
+ }
+
+ // Slow path, new.target is either a proxy or can't cache the map.
+ // new.target.prototype is not guaranteed to be a JSReceiver, and may need to
+ // fall back to the intrinsicDefaultProto.
+ Handle<Object> prototype;
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+ if (function->has_prototype_slot()) {
+ // Make sure the new.target.prototype is cached.
+ EnsureHasInitialMap(function);
+ prototype = handle(function->prototype(), isolate);
+ } else {
+ // No prototype property, use the intrinsict default proto further down.
+ prototype = isolate->factory()->undefined_value();
+ }
+ } else {
+ Handle<String> prototype_string = isolate->factory()->prototype_string();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ JSReceiver::GetProperty(isolate, new_target, prototype_string), Map);
+ // The above prototype lookup might change the constructor and its
+ // prototype, hence we have to reload the initial map.
+ EnsureHasInitialMap(constructor);
+ constructor_initial_map = handle(constructor->initial_map(), isolate);
+ }
+
+ // If prototype is not a JSReceiver, fetch the intrinsicDefaultProto from the
+ // correct realm. Rather than directly fetching the .prototype, we fetch the
+ // constructor that points to the .prototype. This relies on
+ // constructor.prototype being FROZEN for those constructors.
+ if (!prototype->IsJSReceiver()) {
+ Handle<Context> context;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, context,
+ JSReceiver::GetFunctionRealm(new_target), Map);
+ DCHECK(context->IsNativeContext());
+ Handle<Object> maybe_index = JSReceiver::GetDataProperty(
+ constructor, isolate->factory()->native_context_index_symbol());
+ int index = maybe_index->IsSmi() ? Smi::ToInt(*maybe_index)
+ : Context::OBJECT_FUNCTION_INDEX;
+ Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)),
+ isolate);
+ prototype = handle(realm_constructor->prototype(), isolate);
+ }
+
+ Handle<Map> map = Map::CopyInitialMap(isolate, constructor_initial_map);
+ map->set_new_target_is_base(false);
+ CHECK(prototype->IsJSReceiver());
+ if (map->prototype() != *prototype)
+ Map::SetPrototype(isolate, map, prototype);
+ map->SetConstructor(*constructor);
+ return map;
+}
+
+int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
+ CHECK(has_initial_map());
+ if (initial_map()->IsInobjectSlackTrackingInProgress()) {
+ int slack = initial_map()->ComputeMinObjectSlack(isolate);
+ return initial_map()->InstanceSizeFromSlack(slack);
+ }
+ return initial_map()->instance_size();
+}
+
+void JSFunction::PrintName(FILE* out) {
+ std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
+ PrintF(out, "%s", name.get());
+}
+
+Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Object> name =
+ JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ return handle(function->shared()->DebugName(), isolate);
+}
+
+Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Object> name = JSReceiver::GetDataProperty(
+ function, isolate->factory()->display_name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ return JSFunction::GetName(function);
+}
+
+bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
+ Handle<String> prefix) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<String> function_name;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name,
+ Name::ToFunctionName(isolate, name), false);
+ if (prefix->length() > 0) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(prefix);
+ builder.AppendCharacter(' ');
+ builder.AppendString(function_name);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name, builder.Finish(),
+ false);
+ }
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ JSObject::DefinePropertyOrElementIgnoreAttributes(
+ function, isolate->factory()->name_string(), function_name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY)),
+ false);
+ return true;
+}
+
+namespace {
+
+Handle<String> NativeCodeFunctionSourceString(
+ Handle<SharedFunctionInfo> shared_info) {
+ Isolate* const isolate = shared_info->GetIsolate();
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(handle(shared_info->Name(), isolate));
+ builder.AppendCString("() { [native code] }");
+ return builder.Finish().ToHandleChecked();
+}
+
+} // namespace
+
+// static
+Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+
+ // Check if {function} should hide its source code.
+ if (!shared_info->IsUserJavaScript()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ // Check if we should print {function} as a class.
+ Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_positions_symbol());
+ if (maybe_class_positions->IsClassPositions()) {
+ ClassPositions class_positions =
+ ClassPositions::cast(*maybe_class_positions);
+ int start_position = class_positions->start();
+ int end_position = class_positions->end();
+ Handle<String> script_source(
+ String::cast(Script::cast(shared_info->script())->source()), isolate);
+ return isolate->factory()->NewSubString(script_source, start_position,
+ end_position);
+ }
+
+ // Check if we have source code for the {function}.
+ if (!shared_info->HasSourceCode()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ if (shared_info->function_token_position() == kNoSourcePosition) {
+ // If the function token position isn't valid, return [native code] to
+ // ensure calling eval on the returned source code throws rather than
+ // giving inconsistent call behaviour.
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kFunctionTokenOffsetTooLongForToString);
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+ return Handle<String>::cast(
+ SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
+}
+
+// static
+int JSFunction::CalculateExpectedNofProperties(Isolate* isolate,
+ Handle<JSFunction> function) {
+ int expected_nof_properties = 0;
+ for (PrototypeIterator iter(isolate, function, kStartAtReceiver);
+ !iter.IsAtEnd(); iter.Advance()) {
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ if (!current->IsJSFunction()) break;
+ Handle<JSFunction> func = Handle<JSFunction>::cast(current);
+ // The super constructor should be compiled for the number of expected
+ // properties to be available.
+ Handle<SharedFunctionInfo> shared(func->shared(), isolate);
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ if (is_compiled_scope.is_compiled() ||
+ Compiler::Compile(func, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ DCHECK(shared->is_compiled());
+ int count = shared->expected_nof_properties();
+ // Check that the estimate is sane.
+ if (expected_nof_properties <= JSObject::kMaxInObjectProperties - count) {
+ expected_nof_properties += count;
+ } else {
+ return JSObject::kMaxInObjectProperties;
+ }
+ } else {
+ // In case there was a compilation error for the constructor we will
+ // throw an error during instantiation.
+ break;
+ }
+ }
+ // Inobject slack tracking will reclaim redundant inobject space
+ // later, so we can afford to adjust the estimate generously,
+ // meaning we over-allocate by at least 8 slots in the beginning.
+ if (expected_nof_properties > 0) {
+ expected_nof_properties += 8;
+ if (expected_nof_properties > JSObject::kMaxInObjectProperties) {
+ expected_nof_properties = JSObject::kMaxInObjectProperties;
+ }
+ }
+ return expected_nof_properties;
+}
+
+// static
+void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
+ bool has_prototype_slot,
+ int requested_embedder_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties) {
+ DCHECK_LE(static_cast<unsigned>(requested_embedder_fields),
+ JSObject::kMaxEmbedderFields);
+ int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
+ if (requested_embedder_fields) {
+ // If there are embedder fields, then the embedder fields start offset must
+ // be properly aligned (embedder fields are located between object header
+ // and inobject fields).
+ header_size = RoundUp<kSystemPointerSize>(header_size);
+ requested_embedder_fields *= kEmbedderDataSlotSizeInTaggedSlots;
+ }
+ int max_nof_fields =
+ (JSObject::kMaxInstanceSize - header_size) >> kTaggedSizeLog2;
+ CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
+ CHECK_LE(static_cast<unsigned>(requested_embedder_fields),
+ static_cast<unsigned>(max_nof_fields));
+ *in_object_properties = Min(requested_in_object_properties,
+ max_nof_fields - requested_embedder_fields);
+ *instance_size =
+ header_size +
+ ((requested_embedder_fields + *in_object_properties) << kTaggedSizeLog2);
+ CHECK_EQ(*in_object_properties,
+ ((*instance_size - header_size) >> kTaggedSizeLog2) -
+ requested_embedder_fields);
+ CHECK_LE(static_cast<unsigned>(*instance_size),
+ static_cast<unsigned>(JSObject::kMaxInstanceSize));
+}
+
+void JSFunction::ClearTypeFeedbackInfo() {
+ ResetIfBytecodeFlushed();
+ if (has_feedback_vector()) {
+ FeedbackVector vector = feedback_vector();
+ Isolate* isolate = GetIsolate();
+ if (vector->ClearSlots(isolate)) {
+ IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
+ "ClearTypeFeedbackInfo");
+ }
+ }
+}
+
+void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
+ Handle<Name> name) {
+ // Regardless of whether the property is there or not invalidate
+ // Load/StoreGlobalICs that load/store through global object's prototype.
+ JSObject::InvalidatePrototypeValidityCell(*global);
+
+ DCHECK(!global->HasFastProperties());
+ auto dictionary = handle(global->global_dictionary(), global->GetIsolate());
+ int entry = dictionary->FindEntry(global->GetIsolate(), name);
+ if (entry == GlobalDictionary::kNotFound) return;
+ PropertyCell::InvalidateEntry(global->GetIsolate(), dictionary, entry);
+}
+
+Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name,
+ PropertyCellType cell_type, int* entry_out) {
+ Isolate* isolate = global->GetIsolate();
+ DCHECK(!global->HasFastProperties());
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(), isolate);
+ int entry = dictionary->FindEntry(isolate, name);
+ Handle<PropertyCell> cell;
+ if (entry != GlobalDictionary::kNotFound) {
+ if (entry_out) *entry_out = entry;
+ cell = handle(dictionary->CellAt(entry), isolate);
+ PropertyCellType original_cell_type = cell->property_details().cell_type();
+ DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
+ original_cell_type == PropertyCellType::kUninitialized);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ if (original_cell_type == PropertyCellType::kInvalidated) {
+ cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
+ }
+ PropertyDetails details(kData, NONE, cell_type);
+ cell->set_property_details(details);
+ return cell;
+ }
+ cell = isolate->factory()->NewPropertyCell(name);
+ PropertyDetails details(kData, NONE, cell_type);
+ dictionary = GlobalDictionary::Add(isolate, dictionary, name, cell, details,
+ entry_out);
+ // {*entry_out} is initialized inside GlobalDictionary::Add().
+ global->SetProperties(*dictionary);
+ return cell;
+}
+
+// static
+MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target, double tv) {
+ Isolate* const isolate = constructor->GetIsolate();
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ JSDate);
+ if (-DateCache::kMaxTimeInMs <= tv && tv <= DateCache::kMaxTimeInMs) {
+ tv = DoubleToInteger(tv) + 0.0;
+ } else {
+ tv = std::numeric_limits<double>::quiet_NaN();
+ }
+ Handle<Object> value = isolate->factory()->NewNumber(tv);
+ Handle<JSDate>::cast(result)->SetValue(*value, std::isnan(tv));
+ return Handle<JSDate>::cast(result);
+}
+
+// static
+double JSDate::CurrentTimeValue(Isolate* isolate) {
+ if (FLAG_log_internal_timer_events) LOG(isolate, CurrentTimeEvent());
+
+ // According to ECMA-262, section 15.9.1, page 117, the precision of
+ // the number in a Date object representing a particular instant in
+ // time is milliseconds. Therefore, we floor the result of getting
+ // the OS time.
+ return std::floor(V8::GetCurrentPlatform()->CurrentClockTimeMillis());
+}
+
+// static
+Address JSDate::GetField(Address raw_object, Address smi_index) {
+ Object object(raw_object);
+ Smi index(smi_index);
+ return JSDate::cast(object)
+ ->DoGetField(static_cast<FieldIndex>(index->value()))
+ ->ptr();
+}
+
+Object JSDate::DoGetField(FieldIndex index) {
+ DCHECK_NE(index, kDateValue);
+
+ DateCache* date_cache = GetIsolate()->date_cache();
+
+ if (index < kFirstUncachedField) {
+ Object stamp = cache_stamp();
+ if (stamp != date_cache->stamp() && stamp->IsSmi()) {
+ // Since the stamp is not NaN, the value is also not NaN.
+ int64_t local_time_ms =
+ date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
+ SetCachedFields(local_time_ms, date_cache);
+ }
+ switch (index) {
+ case kYear:
+ return year();
+ case kMonth:
+ return month();
+ case kDay:
+ return day();
+ case kWeekday:
+ return weekday();
+ case kHour:
+ return hour();
+ case kMinute:
+ return min();
+ case kSecond:
+ return sec();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (index >= kFirstUTCField) {
+ return GetUTCField(index, value()->Number(), date_cache);
+ }
+
+ double time = value()->Number();
+ if (std::isnan(time)) return GetReadOnlyRoots().nan_value();
+
+ int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
+ int days = DateCache::DaysFromTime(local_time_ms);
+
+ if (index == kDays) return Smi::FromInt(days);
+
+ int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
+ if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
+ DCHECK_EQ(index, kTimeInDay);
+ return Smi::FromInt(time_in_day_ms);
+}
+
+Object JSDate::GetUTCField(FieldIndex index, double value,
+ DateCache* date_cache) {
+ DCHECK_GE(index, kFirstUTCField);
+
+ if (std::isnan(value)) return GetReadOnlyRoots().nan_value();
+
+ int64_t time_ms = static_cast<int64_t>(value);
+
+ if (index == kTimezoneOffset) {
+ GetIsolate()->CountUsage(v8::Isolate::kDateGetTimezoneOffset);
+ return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
+ }
+
+ int days = DateCache::DaysFromTime(time_ms);
+
+ if (index == kWeekdayUTC) return Smi::FromInt(date_cache->Weekday(days));
+
+ if (index <= kDayUTC) {
+ int year, month, day;
+ date_cache->YearMonthDayFromDays(days, &year, &month, &day);
+ if (index == kYearUTC) return Smi::FromInt(year);
+ if (index == kMonthUTC) return Smi::FromInt(month);
+ DCHECK_EQ(index, kDayUTC);
+ return Smi::FromInt(day);
+ }
+
+ int time_in_day_ms = DateCache::TimeInDay(time_ms, days);
+ switch (index) {
+ case kHourUTC:
+ return Smi::FromInt(time_in_day_ms / (60 * 60 * 1000));
+ case kMinuteUTC:
+ return Smi::FromInt((time_in_day_ms / (60 * 1000)) % 60);
+ case kSecondUTC:
+ return Smi::FromInt((time_in_day_ms / 1000) % 60);
+ case kMillisecondUTC:
+ return Smi::FromInt(time_in_day_ms % 1000);
+ case kDaysUTC:
+ return Smi::FromInt(days);
+ case kTimeInDayUTC:
+ return Smi::FromInt(time_in_day_ms);
+ default:
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+// static
+Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
+ Isolate* const isolate = date->GetIsolate();
+ Handle<Object> value = isolate->factory()->NewNumber(v);
+ bool value_is_nan = std::isnan(v);
+ date->SetValue(*value, value_is_nan);
+ return value;
+}
+
+void JSDate::SetValue(Object value, bool is_value_nan) {
+ set_value(value);
+ if (is_value_nan) {
+ HeapNumber nan = GetReadOnlyRoots().nan_value();
+ set_cache_stamp(nan, SKIP_WRITE_BARRIER);
+ set_year(nan, SKIP_WRITE_BARRIER);
+ set_month(nan, SKIP_WRITE_BARRIER);
+ set_day(nan, SKIP_WRITE_BARRIER);
+ set_hour(nan, SKIP_WRITE_BARRIER);
+ set_min(nan, SKIP_WRITE_BARRIER);
+ set_sec(nan, SKIP_WRITE_BARRIER);
+ set_weekday(nan, SKIP_WRITE_BARRIER);
+ } else {
+ set_cache_stamp(Smi::FromInt(DateCache::kInvalidStamp), SKIP_WRITE_BARRIER);
+ }
+}
+
+void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
+ int days = DateCache::DaysFromTime(local_time_ms);
+ int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
+ int year, month, day;
+ date_cache->YearMonthDayFromDays(days, &year, &month, &day);
+ int weekday = date_cache->Weekday(days);
+ int hour = time_in_day_ms / (60 * 60 * 1000);
+ int min = (time_in_day_ms / (60 * 1000)) % 60;
+ int sec = (time_in_day_ms / 1000) % 60;
+ set_cache_stamp(date_cache->stamp());
+ set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER);
+ set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER);
+ set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER);
+ set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER);
+ set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER);
+ set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER);
+ set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
+}
+
+int JSMessageObject::GetLineNumber() const {
+ if (start_position() == -1) return Message::kNoLineNumberInfo;
+
+ Handle<Script> the_script(script(), GetIsolate());
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
+ return Message::kNoLineNumberInfo;
+ }
+
+ return info.line + 1;
+}
+
+int JSMessageObject::GetColumnNumber() const {
+ if (start_position() == -1) return -1;
+
+ Handle<Script> the_script(script(), GetIsolate());
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
+ return -1;
+ }
+
+ return info.column; // Note: No '+1' in contrast to GetLineNumber.
+}
+
+Handle<String> JSMessageObject::GetSourceLine() const {
+ Isolate* isolate = GetIsolate();
+ Handle<Script> the_script(script(), isolate);
+
+ if (the_script->type() == Script::TYPE_WASM) {
+ return isolate->factory()->empty_string();
+ }
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
+ return isolate->factory()->empty_string();
+ }
+
+ Handle<String> src = handle(String::cast(the_script->source()), isolate);
+ return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 0eab21c137..0d88d564d0 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -19,6 +19,7 @@ namespace internal {
enum InstanceType : uint16_t;
class JSGlobalObject;
class JSGlobalProxy;
+class NativeContext;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
@@ -77,7 +78,10 @@ class JSReceiver : public HeapObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
+ static MaybeHandle<NativeContext> GetFunctionRealm(
+ Handle<JSReceiver> receiver);
+ V8_EXPORT_PRIVATE static MaybeHandle<NativeContext> GetContextForMicrotask(
+ Handle<JSReceiver> receiver);
// Get the first non-hidden prototype.
static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
@@ -96,7 +100,8 @@ class JSReceiver : public HeapObject {
bool use_set = true);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(
+ LookupIterator* it);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasElement(
@@ -137,33 +142,35 @@ class JSReceiver : public HeapObject {
// "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
// ES6 7.3.4 (when passed kDontThrow)
V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
Isolate* isolate, Handle<JSReceiver> object, Handle<Name> key,
- Handle<Object> value, ShouldThrow should_throw);
+ Handle<Object> value, Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
// ES6 9.1.6.1
V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
- LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
+ LookupIterator* it, PropertyDescriptor* desc,
+ Maybe<ShouldThrow> should_throw);
// ES6 9.1.6.2
V8_WARN_UNUSED_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
Isolate* isolate, bool extensible, PropertyDescriptor* desc,
PropertyDescriptor* current, Handle<Name> property_name,
- ShouldThrow should_throw);
+ Maybe<ShouldThrow> should_throw);
// ES6 9.1.6.3
// |it| can be NULL in cases where the ES spec passes |undefined| as the
// receiver. Exactly one of |it| and |property_name| must be provided.
V8_WARN_UNUSED_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
Isolate* isolate, LookupIterator* it, bool extensible,
PropertyDescriptor* desc, PropertyDescriptor* current,
- ShouldThrow should_throw, Handle<Name> property_name);
+ Maybe<ShouldThrow> should_throw, Handle<Name> property_name);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool>
GetOwnPropertyDescriptor(Isolate* isolate, Handle<JSReceiver> object,
@@ -201,7 +208,7 @@ class JSReceiver : public HeapObject {
// function that was used to instantiate the object).
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
- Handle<Context> GetCreationContext();
+ Handle<NativeContext> GetCreationContext();
V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
GetPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
@@ -257,13 +264,17 @@ class JSReceiver : public HeapObject {
static const int kHashMask = PropertyArray::HashField::kMask;
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, JSRECEIVER_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_JSRECEIVER_FIELDS)
static const int kHeaderSize = kSize;
bool HasProxyInPrototype(Isolate* isolate);
bool HasComplexElements();
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetPrivateEntries(
+ Isolate* isolate, Handle<JSReceiver> receiver);
+
OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
};
@@ -279,7 +290,7 @@ class JSObject : public JSReceiver {
Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
Handle<AllocationSite> site);
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
+ static MaybeHandle<NativeContext> GetFunctionRealm(Handle<JSObject> object);
// 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
// Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
@@ -331,6 +342,8 @@ class JSObject : public JSReceiver {
inline bool HasSloppyArgumentsElements();
inline bool HasStringWrapperElements();
inline bool HasDictionaryElements();
+ // Returns true if an object has elements of PACKED_ELEMENTS
+ inline bool HasPackedElements();
inline bool HasFixedTypedArrayElements();
@@ -359,7 +372,8 @@ class JSObject : public JSReceiver {
static void EnsureWritableFastElements(Handle<JSObject> object);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithInterceptor(
- LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
+ LookupIterator* it, Maybe<ShouldThrow> should_throw,
+ Handle<Object> value);
// The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
// AccessorInfo objects to data fields. We allow FORCE_FIELD as an exception
@@ -373,7 +387,7 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw,
+ Maybe<ShouldThrow> should_throw,
AccessorInfoHandling handling = DONT_FORCE_FIELD);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
@@ -398,7 +412,7 @@ class JSObject : public JSReceiver {
// cannot.
V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value,
- ShouldThrow should_throw = kDontThrow);
+ Maybe<ShouldThrow> should_throw = Just(kDontThrow));
static void AddProperty(Isolate* isolate, Handle<JSObject> object,
Handle<Name> name, Handle<Object> value,
@@ -747,14 +761,10 @@ class JSObject : public JSReceiver {
PropertyArray::kMaxLength);
// Layout description.
-#define JS_OBJECT_FIELDS(V) \
- V(kElementsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0) \
- V(kOptionalEmbedderFieldPadding, \
- POINTER_SIZE_PADDING(kOptionalEmbedderFieldPadding)) \
- /* Header size aligned to kSystemPointerSize. */ \
- V(kHeaderSizeForEmbedderFields, 0)
+#define JS_OBJECT_FIELDS(V) \
+ V(kElementsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_OBJECT_FIELDS)
#undef JS_OBJECT_FIELDS
@@ -764,14 +774,11 @@ class JSObject : public JSReceiver {
(kMaxInstanceSize - kHeaderSize) >> kTaggedSizeLog2;
STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
- STATIC_ASSERT(kHeaderSizeForEmbedderFields ==
- Internals::kJSObjectHeaderSizeForEmbedderFields);
static const int kMaxFirstInobjectPropertyOffset =
(1 << kFirstInobjectPropertyOffsetBitCount) - 1;
static const int kMaxEmbedderFields =
- (kMaxFirstInobjectPropertyOffset - kHeaderSizeForEmbedderFields) /
- kEmbedderDataSlotSize;
- STATIC_ASSERT(kHeaderSizeForEmbedderFields +
+ (kMaxFirstInobjectPropertyOffset - kHeaderSize) / kEmbedderDataSlotSize;
+ STATIC_ASSERT(kHeaderSize +
kMaxEmbedderFields * kEmbedderDataSlotSizeInTaggedSlots <=
kMaxInstanceSize);
@@ -794,7 +801,8 @@ class JSObject : public JSReceiver {
GetPropertyWithFailedAccessCheck(LookupIterator* it);
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ LookupIterator* it, Handle<Object> value,
+ Maybe<ShouldThrow> should_throw);
V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw);
@@ -818,17 +826,9 @@ class JSObject : public JSReceiver {
class JSAccessorPropertyDescriptor : public JSObject {
public:
// Layout description.
-#define JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS(V) \
- V(kGetOffset, kTaggedSize) \
- V(kSetOffset, kTaggedSize) \
- V(kEnumerableOffset, kTaggedSize) \
- V(kConfigurableOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
-#undef JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
// Indices of in-object properties.
static const int kGetIndex = 0;
@@ -917,7 +917,7 @@ class JSBoundFunction : public JSObject {
Handle<JSBoundFunction> function);
static Maybe<int> GetLength(Isolate* isolate,
Handle<JSBoundFunction> function);
- static MaybeHandle<Context> GetFunctionRealm(
+ static MaybeHandle<NativeContext> GetFunctionRealm(
Handle<JSBoundFunction> function);
DECL_CAST(JSBoundFunction)
@@ -931,7 +931,8 @@ class JSBoundFunction : public JSObject {
static Handle<String> ToString(Handle<JSBoundFunction> function);
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSBOUND_FUNCTION_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSBOUND_FUNCTION_FIELDS)
OBJECT_CONSTRUCTORS(JSBoundFunction, JSObject);
};
@@ -956,11 +957,11 @@ class JSFunction : public JSObject {
inline bool has_context() const;
inline void set_context(Object context);
inline JSGlobalProxy global_proxy();
- inline Context native_context();
+ inline NativeContext native_context();
static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
static Maybe<int> GetLength(Isolate* isolate, Handle<JSFunction> function);
- static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
+ static Handle<NativeContext> GetFunctionRealm(Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
// when the function is invoked, e.g. foo() or new foo(). See
@@ -1087,10 +1088,8 @@ class JSFunction : public JSObject {
DECL_CAST(JSFunction)
// Calculate the instance size and in-object properties count.
- static bool CalculateInstanceSizeForDerivedClass(
- Handle<JSFunction> function, InstanceType instance_type,
- int requested_embedder_fields, int* instance_size,
- int* in_object_properties);
+ static V8_WARN_UNUSED_RESULT int CalculateExpectedNofProperties(
+ Isolate* isolate, Handle<JSFunction> function);
static void CalculateInstanceSizeHelper(InstanceType instance_type,
bool has_prototype_slot,
int requested_embedder_fields,
@@ -1123,7 +1122,8 @@ class JSFunction : public JSObject {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSFUNCTION_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSFUNCTION_FIELDS)
static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
static constexpr int kSizeWithPrototype = kSize;
@@ -1156,13 +1156,8 @@ class JSGlobalProxy : public JSObject {
DECL_VERIFIER(JSGlobalProxy)
// Layout description.
-#define JS_GLOBAL_PROXY_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GLOBAL_PROXY_FIELDS)
-#undef JS_GLOBAL_PROXY_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSGLOBAL_PROXY_FIELDS)
OBJECT_CONSTRUCTORS(JSGlobalProxy, JSObject);
};
@@ -1171,7 +1166,7 @@ class JSGlobalProxy : public JSObject {
class JSGlobalObject : public JSObject {
public:
// [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, Context)
+ DECL_ACCESSORS(native_context, NativeContext)
// [global proxy]: the global proxy object of the context
DECL_ACCESSORS(global_proxy, JSObject)
@@ -1222,13 +1217,8 @@ class JSValue : public JSObject {
DECL_VERIFIER(JSValue)
// Layout description.
-#define JS_VALUE_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_VALUE_FIELDS)
-#undef JS_VALUE_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSVALUE_FIELDS)
OBJECT_CONSTRUCTORS(JSValue, JSObject);
};
@@ -1413,7 +1403,7 @@ class JSMessageObject : public JSObject {
kSize>
BodyDescriptor;
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject)
+ OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index aa126b0ce1..f7248431e8 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -39,7 +39,7 @@ inline JSPluralRules::Type JSPluralRules::type() const {
return TypeBits::decode(flags());
}
-CAST_ACCESSOR(JSPluralRules);
+CAST_ACCESSOR(JSPluralRules)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 59b52424ef..da349dcd81 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -223,7 +223,7 @@ void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
// This is a brand new JSObject that shouldn't already have the same
// key so this shouldn't fail.
CHECK(JSReceiver::CreateDataProperty(isolate, options, key_str, value,
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
}
@@ -314,16 +314,13 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
return options;
}
-std::set<std::string> JSPluralRules::GetAvailableLocales() {
- int32_t num_locales = 0;
+const std::set<std::string>& JSPluralRules::GetAvailableLocales() {
// TODO(ftang): For PluralRules, filter out locales that
// don't support PluralRules.
// PluralRules is missing an appropriate getAvailableLocales method,
// so we should filter from all locales, but it's not clear how; see
// https://ssl.icu-project.org/trac/ticket/12756
- const icu::Locale* icu_available_locales =
- icu::Locale::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+ return Intl::GetAvailableLocalesForLocale();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 12b796fa02..30b4fd1d03 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -40,7 +40,7 @@ class JSPluralRules : public JSObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePlural(
Isolate* isolate, Handle<JSPluralRules> plural_rules, double number);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
// [[Type]] is one of the values "cardinal" or "ordinal",
// identifying the plural rules used.
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index e2e41ec598..19fd8bdeb0 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -68,14 +68,8 @@ class JSPromise : public JSObject {
DECL_PRINTER(JSPromise)
DECL_VERIFIER(JSPromise)
-#define JS_PROMISE_FIELDS(V) \
- V(kReactionsOrResultOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PROMISE_FIELDS)
-#undef JS_PROMISE_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSPROMISE_FIELDS)
static const int kSizeWithEmbedderFields =
kSize + v8::Promise::kEmbedderFieldCount * kEmbedderDataSlotSize;
@@ -100,7 +94,7 @@ class JSPromise : public JSObject {
Handle<Object> argument,
PromiseReaction::Type type);
- OBJECT_CONSTRUCTORS(JSPromise, JSObject)
+ OBJECT_CONSTRUCTORS(JSPromise, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 3ba7b7e974..a933e6598a 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -25,7 +25,7 @@ class JSProxy : public JSReceiver {
// [target]: The target property.
DECL_ACCESSORS(target, Object)
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
+ static MaybeHandle<NativeContext> GetFunctionRealm(Handle<JSProxy> proxy);
DECL_CAST(JSProxy)
@@ -57,7 +57,7 @@ class JSProxy : public JSReceiver {
// ES6 9.5.6
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
// ES6 9.5.7
V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
@@ -85,7 +85,7 @@ class JSProxy : public JSReceiver {
// ES6 9.5.9
V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
Handle<JSProxy> proxy, Handle<Name> name, Handle<Object> value,
- Handle<Object> receiver, LanguageMode language_mode);
+ Handle<Object> receiver, Maybe<ShouldThrow> should_throw);
// ES6 9.5.10 (when passed LanguageMode::kSloppy)
V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
@@ -106,14 +106,8 @@ class JSProxy : public JSReceiver {
static const int kMaxIterationLimit = 100 * 1024;
// Layout description.
-#define JS_PROXY_FIELDS(V) \
- V(kTargetOffset, kTaggedSize) \
- V(kHandlerOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_PROXY_FIELDS)
-#undef JS_PROXY_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize,
+ TORQUE_GENERATED_JSPROXY_FIELDS)
// kTargetOffset aliases with the elements of JSObject. The fact that
// JSProxy::target is a Javascript value which cannot be confused with an
@@ -128,7 +122,7 @@ class JSProxy : public JSReceiver {
static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ Maybe<ShouldThrow> should_throw);
OBJECT_CONSTRUCTORS(JSProxy, JSReceiver);
};
@@ -139,15 +133,8 @@ class JSProxy : public JSReceiver {
class JSProxyRevocableResult : public JSObject {
public:
// Layout description.
-#define JS_PROXY_REVOCATABLE_RESULT_FIELDS(V) \
- V(kProxyOffset, kTaggedSize) \
- V(kRevokeOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_PROXY_REVOCATABLE_RESULT_FIELDS)
-#undef JS_PROXY_REVOCATABLE_RESULT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize, TORQUE_GENERATED_JSPROXY_REVOCABLE_RESULT_FIELDS)
// Indices of in-object properties.
static const int kProxyIndex = 0;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 23f7c69ade..8322a3c258 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -79,8 +79,18 @@ void JSRegExp::SetDataAt(int index, Object value) {
}
bool JSRegExp::HasCompiledCode() const {
- return TypeTag() == IRREGEXP && (DataAt(kIrregexpLatin1CodeIndex)->IsCode() ||
- DataAt(kIrregexpUC16CodeIndex)->IsCode());
+ if (TypeTag() != IRREGEXP) return false;
+#ifdef DEBUG
+ DCHECK(DataAt(kIrregexpLatin1CodeIndex)->IsCode() ||
+ DataAt(kIrregexpLatin1CodeIndex)->IsByteArray() ||
+ DataAt(kIrregexpLatin1CodeIndex) == Smi::FromInt(kUninitializedValue));
+ DCHECK(DataAt(kIrregexpUC16CodeIndex)->IsCode() ||
+ DataAt(kIrregexpUC16CodeIndex)->IsByteArray() ||
+ DataAt(kIrregexpUC16CodeIndex) == Smi::FromInt(kUninitializedValue));
+#endif // DEBUG
+ Smi uninitialized = Smi::FromInt(kUninitializedValue);
+ return (DataAt(kIrregexpLatin1CodeIndex) != uninitialized ||
+ DataAt(kIrregexpUC16CodeIndex) != uninitialized);
}
void JSRegExp::DiscardCompiledCodeForSerialization() {
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 5380b79c6c..5012e2c01b 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -181,7 +181,7 @@ class JSRegExp : public JSObject {
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
- OBJECT_CONSTRUCTORS(JSRegExp, JSObject)
+ OBJECT_CONSTRUCTORS(JSRegExp, JSObject);
};
DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 1824b4b4ca..6b8b4550ac 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -48,7 +48,7 @@ inline JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::numeric() const {
return NumericBits::decode(flags());
}
-CAST_ACCESSOR(JSRelativeTimeFormat);
+CAST_ACCESSOR(JSRelativeTimeFormat)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 29896a926e..02039fa9e0 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -18,7 +18,6 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
-#include "unicode/datefmt.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
@@ -273,8 +272,9 @@ MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
Handle<String> unit = UnitAsString(isolate, unit_enum);
- Maybe<int> maybe_format_to_parts =
- JSNumberFormat::FormatToParts(isolate, array, index, nf, number, unit);
+ Handle<Object> number_obj = factory->NewNumber(number);
+ Maybe<int> maybe_format_to_parts = JSNumberFormat::FormatToParts(
+ isolate, array, index, nf, number_obj, unit);
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
index = maybe_format_to_parts.FromJust();
@@ -408,11 +408,10 @@ MaybeHandle<Object> JSRelativeTimeFormat::Format(
formatted.length()));
}
-std::set<std::string> JSRelativeTimeFormat::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::DateFormat::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSRelativeTimeFormat::GetAvailableLocales() {
+ // Since RelativeTimeFormatter does not have a method to list all
+ // available locales, work around by calling the DateFormat.
+ return Intl::GetAvailableLocalesForDateFormat();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index c90e24118b..4bdaee9dfc 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -50,7 +50,7 @@ class JSRelativeTimeFormat : public JSObject {
Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
bool to_parts);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
DECL_CAST(JSRelativeTimeFormat)
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index 1ee6087d1e..0c1a3e4eec 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -31,7 +31,7 @@ BIT_FIELD_ACCESSORS(JSSegmentIterator, flags, is_break_type_set,
SMI_ACCESSORS(JSSegmentIterator, flags, kFlagsOffset)
-CAST_ACCESSOR(JSSegmentIterator);
+CAST_ACCESSOR(JSSegmentIterator)
inline void JSSegmentIterator::set_granularity(
JSSegmenter::Granularity granularity) {
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 74b0330719..570c71dd21 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -166,19 +166,20 @@ MaybeHandle<JSReceiver> JSSegmentIterator::Next(
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
// 11. Perform ! CreateDataProperty(result "segment", segment).
- CHECK(JSReceiver::CreateDataProperty(
- isolate, result, factory->segment_string(), segment, kDontThrow)
+ CHECK(JSReceiver::CreateDataProperty(isolate, result,
+ factory->segment_string(), segment,
+ Just(kDontThrow))
.FromJust());
// 12. Perform ! CreateDataProperty(result, "breakType", breakType).
CHECK(JSReceiver::CreateDataProperty(isolate, result,
factory->breakType_string(), break_type,
- kDontThrow)
+ Just(kDontThrow))
.FromJust());
// 13. Perform ! CreateDataProperty(result, "index", newIndex).
CHECK(JSReceiver::CreateDataProperty(isolate, result, factory->index_string(),
- new_index, kDontThrow)
+ new_index, Just(kDontThrow))
.FromJust());
// 14. Return CreateIterResultObject(result, false).
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 327af7f485..05935fa905 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -37,7 +37,7 @@ inline JSSegmenter::Granularity JSSegmenter::granularity() const {
return GranularityBits::decode(flags());
}
-CAST_ACCESSOR(JSSegmenter);
+CAST_ACCESSOR(JSSegmenter)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index aba1c7bdf2..7548b65f23 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -162,11 +162,10 @@ Handle<String> JSSegmenter::GranularityAsString() const {
}
}
-std::set<std::string> JSSegmenter::GetAvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- icu::BreakIterator::getAvailableLocales(num_locales);
- return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+const std::set<std::string>& JSSegmenter::GetAvailableLocales() {
+ static base::LazyInstance<Intl::AvailableLocales<icu::BreakIterator>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 64de1d9be7..b54e042d17 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -39,7 +39,7 @@ class JSSegmenter : public JSObject {
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder);
- static std::set<std::string> GetAvailableLocales();
+ static const std::set<std::string>& GetAvailableLocales();
Handle<String> GranularityAsString() const;
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 12006f2927..a08cb08fcf 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -18,156 +18,254 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSWeakCell, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WeakCell, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(JSWeakRef, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakFactory, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSWeakFactoryCleanupIterator, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(WeakFactoryCleanupJobTask, Microtask)
-
-ACCESSORS(JSWeakFactory, native_context, NativeContext, kNativeContextOffset)
-ACCESSORS(JSWeakFactory, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSWeakFactory, active_cells, Object, kActiveCellsOffset)
-ACCESSORS(JSWeakFactory, cleared_cells, Object, kClearedCellsOffset)
-SMI_ACCESSORS(JSWeakFactory, flags, kFlagsOffset)
-ACCESSORS(JSWeakFactory, next, Object, kNextOffset)
-CAST_ACCESSOR(JSWeakFactory)
-
-ACCESSORS(JSWeakCell, factory, Object, kFactoryOffset)
-ACCESSORS(JSWeakCell, target, Object, kTargetOffset)
-ACCESSORS(JSWeakCell, holdings, Object, kHoldingsOffset)
-ACCESSORS(JSWeakCell, next, Object, kNextOffset)
-ACCESSORS(JSWeakCell, prev, Object, kPrevOffset)
-CAST_ACCESSOR(JSWeakCell)
+OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroup, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(FinalizationGroupCleanupJobTask, Microtask)
+
+ACCESSORS(JSFinalizationGroup, native_context, NativeContext,
+ kNativeContextOffset)
+ACCESSORS(JSFinalizationGroup, cleanup, Object, kCleanupOffset)
+ACCESSORS(JSFinalizationGroup, active_cells, Object, kActiveCellsOffset)
+ACCESSORS(JSFinalizationGroup, cleared_cells, Object, kClearedCellsOffset)
+ACCESSORS(JSFinalizationGroup, key_map, Object, kKeyMapOffset)
+SMI_ACCESSORS(JSFinalizationGroup, flags, kFlagsOffset)
+ACCESSORS(JSFinalizationGroup, next, Object, kNextOffset)
+CAST_ACCESSOR(JSFinalizationGroup)
+
+ACCESSORS(WeakCell, finalization_group, Object, kFinalizationGroupOffset)
+ACCESSORS(WeakCell, target, HeapObject, kTargetOffset)
+ACCESSORS(WeakCell, holdings, Object, kHoldingsOffset)
+ACCESSORS(WeakCell, next, Object, kNextOffset)
+ACCESSORS(WeakCell, prev, Object, kPrevOffset)
+ACCESSORS(WeakCell, key, Object, kKeyOffset)
+ACCESSORS(WeakCell, key_list_next, Object, kKeyListNextOffset)
+ACCESSORS(WeakCell, key_list_prev, Object, kKeyListPrevOffset)
+CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(JSWeakRef)
-ACCESSORS(JSWeakRef, target, Object, kTargetOffset)
+ACCESSORS(JSWeakRef, target, HeapObject, kTargetOffset)
-ACCESSORS(JSWeakFactoryCleanupIterator, factory, JSWeakFactory, kFactoryOffset)
-CAST_ACCESSOR(JSWeakFactoryCleanupIterator)
+ACCESSORS(JSFinalizationGroupCleanupIterator, finalization_group,
+ JSFinalizationGroup, kFinalizationGroupOffset)
+CAST_ACCESSOR(JSFinalizationGroupCleanupIterator)
-ACCESSORS(WeakFactoryCleanupJobTask, factory, JSWeakFactory, kFactoryOffset)
-CAST_ACCESSOR(WeakFactoryCleanupJobTask)
+ACCESSORS(FinalizationGroupCleanupJobTask, finalization_group,
+ JSFinalizationGroup, kFinalizationGroupOffset)
+CAST_ACCESSOR(FinalizationGroupCleanupJobTask)
+
+void JSFinalizationGroup::Register(
+ Handle<JSFinalizationGroup> finalization_group, Handle<JSReceiver> target,
+ Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell();
+ weak_cell->set_finalization_group(*finalization_group);
+ weak_cell->set_target(*target);
+ weak_cell->set_holdings(*holdings);
+ weak_cell->set_prev(ReadOnlyRoots(isolate).undefined_value());
+ weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
+ weak_cell->set_key(*key);
+ weak_cell->set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
+ weak_cell->set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
-void JSWeakFactory::AddWeakCell(JSWeakCell weak_cell) {
- weak_cell->set_factory(*this);
- weak_cell->set_next(active_cells());
- if (active_cells()->IsJSWeakCell()) {
- JSWeakCell::cast(active_cells())->set_prev(weak_cell);
+ // Add to active_cells.
+ weak_cell->set_next(finalization_group->active_cells());
+ if (finalization_group->active_cells()->IsWeakCell()) {
+ WeakCell::cast(finalization_group->active_cells())->set_prev(*weak_cell);
+ }
+ finalization_group->set_active_cells(*weak_cell);
+
+ if (!key->IsUndefined(isolate)) {
+ Handle<ObjectHashTable> key_map;
+ if (finalization_group->key_map()->IsUndefined(isolate)) {
+ key_map = ObjectHashTable::New(isolate, 1);
+ } else {
+ key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ }
+
+ Object value = key_map->Lookup(key);
+ if (value->IsWeakCell()) {
+ WeakCell existing_weak_cell = WeakCell::cast(value);
+ existing_weak_cell->set_key_list_prev(*weak_cell);
+ weak_cell->set_key_list_next(existing_weak_cell);
+ } else {
+ DCHECK(value->IsTheHole(isolate));
+ }
+ key_map = ObjectHashTable::Put(key_map, key, weak_cell);
+ finalization_group->set_key_map(*key_map);
+ }
+}
+
+void JSFinalizationGroup::Unregister(
+ Handle<JSFinalizationGroup> finalization_group, Handle<Object> key,
+ Isolate* isolate) {
+ // Iterate through the doubly linked list of WeakCells associated with the
+ // key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
+ // its FinalizationGroup; remove it from there.
+ if (!finalization_group->key_map()->IsUndefined(isolate)) {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ Object value = key_map->Lookup(key);
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ while (value->IsWeakCell()) {
+ WeakCell weak_cell = WeakCell::cast(value);
+ weak_cell->RemoveFromFinalizationGroupCells(isolate);
+ value = weak_cell->key_list_next();
+ weak_cell->set_key_list_prev(undefined);
+ weak_cell->set_key_list_next(undefined);
+ }
+ bool was_present;
+ key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
+ finalization_group->set_key_map(*key_map);
}
- set_active_cells(weak_cell);
}
-bool JSWeakFactory::NeedsCleanup() const {
- return cleared_cells()->IsJSWeakCell();
+bool JSFinalizationGroup::NeedsCleanup() const {
+ return cleared_cells()->IsWeakCell();
}
-bool JSWeakFactory::scheduled_for_cleanup() const {
+bool JSFinalizationGroup::scheduled_for_cleanup() const {
return ScheduledForCleanupField::decode(flags());
}
-void JSWeakFactory::set_scheduled_for_cleanup(bool scheduled_for_cleanup) {
+void JSFinalizationGroup::set_scheduled_for_cleanup(
+ bool scheduled_for_cleanup) {
set_flags(ScheduledForCleanupField::update(flags(), scheduled_for_cleanup));
}
-JSWeakCell JSWeakFactory::PopClearedCell(Isolate* isolate) {
- JSWeakCell weak_cell = JSWeakCell::cast(cleared_cells());
+Object JSFinalizationGroup::PopClearedCellHoldings(
+ Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
+ Handle<WeakCell> weak_cell =
+ handle(WeakCell::cast(finalization_group->cleared_cells()), isolate);
DCHECK(weak_cell->prev()->IsUndefined(isolate));
- set_cleared_cells(weak_cell->next());
+ finalization_group->set_cleared_cells(weak_cell->next());
weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
- if (cleared_cells()->IsJSWeakCell()) {
- JSWeakCell cleared_cells_head = JSWeakCell::cast(cleared_cells());
- DCHECK_EQ(cleared_cells_head->prev(), weak_cell);
+ if (finalization_group->cleared_cells()->IsWeakCell()) {
+ WeakCell cleared_cells_head =
+ WeakCell::cast(finalization_group->cleared_cells());
+ DCHECK_EQ(cleared_cells_head->prev(), *weak_cell);
cleared_cells_head->set_prev(ReadOnlyRoots(isolate).undefined_value());
} else {
- DCHECK(cleared_cells()->IsUndefined(isolate));
+ DCHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
}
- return weak_cell;
+
+ // Also remove the WeakCell from the key_map (if it's there).
+ if (!weak_cell->key()->IsUndefined(isolate)) {
+ if (weak_cell->key_list_prev()->IsUndefined(isolate) &&
+ weak_cell->key_list_next()->IsUndefined(isolate)) {
+ // weak_cell is the only one associated with its key; remove the key
+ // from the hash table.
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ Handle<Object> key = handle(weak_cell->key(), isolate);
+ bool was_present;
+ key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
+ DCHECK(was_present);
+ finalization_group->set_key_map(*key_map);
+ } else if (weak_cell->key_list_prev()->IsUndefined()) {
+ // weak_cell is the list head for its key; we need to change the value of
+ // the key in the hash table.
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ Handle<Object> key = handle(weak_cell->key(), isolate);
+ Handle<WeakCell> next =
+ handle(WeakCell::cast(weak_cell->key_list_next()), isolate);
+ DCHECK_EQ(next->key_list_prev(), *weak_cell);
+ next->set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
+ weak_cell->set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
+ key_map = ObjectHashTable::Put(key_map, key, next);
+ finalization_group->set_key_map(*key_map);
+ } else {
+ // weak_cell is somewhere in the middle of its key list.
+ WeakCell prev = WeakCell::cast(weak_cell->key_list_prev());
+ prev->set_key_list_next(weak_cell->key_list_next());
+ if (!weak_cell->key_list_next()->IsUndefined()) {
+ WeakCell next = WeakCell::cast(weak_cell->key_list_next());
+ next->set_key_list_prev(weak_cell->key_list_prev());
+ }
+ }
+ }
+
+ return weak_cell->holdings();
}
-void JSWeakCell::Nullify(
+void WeakCell::Nullify(
Isolate* isolate,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot) {
+ // Remove from the WeakCell from the "active_cells" list of its
+ // JSFinalizationGroup and insert it into the "cleared_cells" list. This is
+ // only called for WeakCells which haven't been unregistered yet, so they will
+ // be in the active_cells list. (The caller must guard against calling this
+ // for unregistered WeakCells by checking that the target is not undefined.)
DCHECK(target()->IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
- JSWeakFactory weak_factory = JSWeakFactory::cast(factory());
- // Remove from the JSWeakCell from the "active_cells" list of its
- // JSWeakFactory and insert it into the "cleared" list.
- if (prev()->IsJSWeakCell()) {
- DCHECK_NE(weak_factory->active_cells(), *this);
- JSWeakCell prev_cell = JSWeakCell::cast(prev());
+ JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
+ if (prev()->IsWeakCell()) {
+ DCHECK_NE(fg->active_cells(), *this);
+ WeakCell prev_cell = WeakCell::cast(prev());
prev_cell->set_next(next());
- gc_notify_updated_slot(prev_cell,
- prev_cell.RawField(JSWeakCell::kNextOffset), next());
+ gc_notify_updated_slot(prev_cell, prev_cell.RawField(WeakCell::kNextOffset),
+ next());
} else {
- DCHECK_EQ(weak_factory->active_cells(), *this);
- weak_factory->set_active_cells(next());
+ DCHECK_EQ(fg->active_cells(), *this);
+ fg->set_active_cells(next());
gc_notify_updated_slot(
- weak_factory, weak_factory.RawField(JSWeakFactory::kActiveCellsOffset),
- next());
+ fg, fg.RawField(JSFinalizationGroup::kActiveCellsOffset), next());
}
- if (next()->IsJSWeakCell()) {
- JSWeakCell next_cell = JSWeakCell::cast(next());
+ if (next()->IsWeakCell()) {
+ WeakCell next_cell = WeakCell::cast(next());
next_cell->set_prev(prev());
- gc_notify_updated_slot(next_cell,
- next_cell.RawField(JSWeakCell::kPrevOffset), prev());
+ gc_notify_updated_slot(next_cell, next_cell.RawField(WeakCell::kPrevOffset),
+ prev());
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
- Object cleared_head = weak_factory->cleared_cells();
- if (cleared_head->IsJSWeakCell()) {
- JSWeakCell cleared_head_cell = JSWeakCell::cast(cleared_head);
+ Object cleared_head = fg->cleared_cells();
+ if (cleared_head->IsWeakCell()) {
+ WeakCell cleared_head_cell = WeakCell::cast(cleared_head);
cleared_head_cell->set_prev(*this);
gc_notify_updated_slot(cleared_head_cell,
- cleared_head_cell.RawField(JSWeakCell::kPrevOffset),
+ cleared_head_cell.RawField(WeakCell::kPrevOffset),
*this);
}
- set_next(weak_factory->cleared_cells());
- gc_notify_updated_slot(*this, RawField(JSWeakCell::kNextOffset), next());
- weak_factory->set_cleared_cells(*this);
+ set_next(fg->cleared_cells());
+ gc_notify_updated_slot(*this, RawField(WeakCell::kNextOffset), next());
+ fg->set_cleared_cells(*this);
gc_notify_updated_slot(
- weak_factory, weak_factory.RawField(JSWeakFactory::kClearedCellsOffset),
- *this);
+ fg, fg.RawField(JSFinalizationGroup::kClearedCellsOffset), *this);
}
-void JSWeakCell::Clear(Isolate* isolate) {
- // Unlink the JSWeakCell from the list it's in (if any). The JSWeakCell can be
- // in its JSWeakFactory's active_cells list, cleared_cells list or neither (if
- // it has been already taken out).
+void WeakCell::RemoveFromFinalizationGroupCells(Isolate* isolate) {
+ // Remove the WeakCell from the list it's in (either "active_cells" or
+ // "cleared_cells" of its JSFinalizationGroup).
+ // It's important to set_target to undefined here. This guards that we won't
+ // call Nullify (which assumes that the WeakCell is in active_cells).
DCHECK(target()->IsUndefined() || target()->IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
- if (factory()->IsJSWeakFactory()) {
- JSWeakFactory weak_factory = JSWeakFactory::cast(factory());
- if (weak_factory->active_cells() == *this) {
- DCHECK(!prev()->IsJSWeakCell());
- weak_factory->set_active_cells(next());
- } else if (weak_factory->cleared_cells() == *this) {
- DCHECK(!prev()->IsJSWeakCell());
- weak_factory->set_cleared_cells(next());
- } else if (prev()->IsJSWeakCell()) {
- JSWeakCell prev_cell = JSWeakCell::cast(prev());
- prev_cell->set_next(next());
- }
- if (next()->IsJSWeakCell()) {
- JSWeakCell next_cell = JSWeakCell::cast(next());
- next_cell->set_prev(prev());
- }
- set_prev(ReadOnlyRoots(isolate).undefined_value());
- set_next(ReadOnlyRoots(isolate).undefined_value());
-
- set_holdings(ReadOnlyRoots(isolate).undefined_value());
- set_factory(ReadOnlyRoots(isolate).undefined_value());
- } else {
- // Already cleared.
- DCHECK(next()->IsUndefined(isolate));
+ JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
+ if (fg->active_cells() == *this) {
DCHECK(prev()->IsUndefined(isolate));
- DCHECK(holdings()->IsUndefined(isolate));
- DCHECK(factory()->IsUndefined(isolate));
+ fg->set_active_cells(next());
+ } else if (fg->cleared_cells() == *this) {
+ DCHECK(!prev()->IsWeakCell());
+ fg->set_cleared_cells(next());
+ } else {
+ DCHECK(prev()->IsWeakCell());
+ WeakCell prev_cell = WeakCell::cast(prev());
+ prev_cell->set_next(next());
}
+ if (next()->IsWeakCell()) {
+ WeakCell next_cell = WeakCell::cast(next());
+ next_cell->set_prev(prev());
+ }
+ set_prev(ReadOnlyRoots(isolate).undefined_value());
+ set_next(ReadOnlyRoots(isolate).undefined_value());
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 5938c27b2f..975d8beca9 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -14,29 +14,35 @@
namespace v8 {
namespace internal {
-class JSWeakCell;
class NativeContext;
+class WeakCell;
-// WeakFactory object from the JS Weak Refs spec proposal:
+// FinalizationGroup object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
-class JSWeakFactory : public JSObject {
+class JSFinalizationGroup : public JSObject {
public:
- DECL_PRINTER(JSWeakFactory)
- DECL_VERIFIER(JSWeakFactory)
- DECL_CAST(JSWeakFactory)
+ DECL_PRINTER(JSFinalizationGroup)
+ DECL_VERIFIER(JSFinalizationGroup)
+ DECL_CAST(JSFinalizationGroup)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(cleanup, Object)
+
DECL_ACCESSORS(active_cells, Object)
DECL_ACCESSORS(cleared_cells, Object)
+ DECL_ACCESSORS(key_map, Object)
- // For storing a list of JSWeakFactory objects in NativeContext.
+ // For storing a list of JSFinalizationGroup objects in NativeContext.
DECL_ACCESSORS(next, Object)
DECL_INT_ACCESSORS(flags)
- // Adds a newly constructed JSWeakCell object into this JSWeakFactory.
- inline void AddWeakCell(JSWeakCell weak_cell);
+ inline static void Register(Handle<JSFinalizationGroup> finalization_group,
+ Handle<JSReceiver> target,
+ Handle<Object> holdings, Handle<Object> key,
+ Isolate* isolate);
+ inline static void Unregister(Handle<JSFinalizationGroup> finalization_group,
+ Handle<Object> key, Isolate* isolate);
// Returns true if the cleared_cells list is non-empty.
inline bool NeedsCleanup() const;
@@ -44,76 +50,92 @@ class JSWeakFactory : public JSObject {
inline bool scheduled_for_cleanup() const;
inline void set_scheduled_for_cleanup(bool scheduled_for_cleanup);
- // Get and remove the first cleared JSWeakCell from the cleared_cells
- // list. (Assumes there is one.)
- inline JSWeakCell PopClearedCell(Isolate* isolate);
+ // Remove the first cleared WeakCell from the cleared_cells
+ // list (assumes there is one) and return its holdings.
+ inline static Object PopClearedCellHoldings(
+ Handle<JSFinalizationGroup> finalization_group, Isolate* isolate);
// Constructs an iterator for the WeakCells in the cleared_cells list and
// calls the user's cleanup function.
- static void Cleanup(Handle<JSWeakFactory> weak_factory, Isolate* isolate);
+ static void Cleanup(Handle<JSFinalizationGroup> finalization_group,
+ Isolate* isolate);
// Layout description.
-#define JS_WEAK_FACTORY_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- V(kCleanupOffset, kTaggedSize) \
- V(kActiveCellsOffset, kTaggedSize) \
- V(kClearedCellsOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
+#define JS_FINALIZATION_GROUP_FIELDS(V) \
+ V(kNativeContextOffset, kTaggedSize) \
+ V(kCleanupOffset, kTaggedSize) \
+ V(kActiveCellsOffset, kTaggedSize) \
+ V(kClearedCellsOffset, kTaggedSize) \
+ V(kKeyMapOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
V(kSize, 0)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_FACTORY_FIELDS)
-#undef JS_WEAK_FACTORY_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_FINALIZATION_GROUP_FIELDS)
+#undef JS_FINALIZATION_GROUP_FIELDS
// Bitfields in flags.
class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
- OBJECT_CONSTRUCTORS(JSWeakFactory, JSObject);
+ OBJECT_CONSTRUCTORS(JSFinalizationGroup, JSObject);
};
-// WeakCell object from the JS Weak Refs spec proposal.
-class JSWeakCell : public JSObject {
+// Internal object for storing weak references in JSFinalizationGroup.
+class WeakCell : public HeapObject {
public:
- DECL_PRINTER(JSWeakCell)
- DECL_VERIFIER(JSWeakCell)
- DECL_CAST(JSWeakCell)
+ DECL_PRINTER(WeakCell)
+ DECL_VERIFIER(WeakCell)
+ DECL_CAST(WeakCell)
- DECL_ACCESSORS(factory, Object)
- DECL_ACCESSORS(target, Object)
+ DECL_ACCESSORS(finalization_group, Object)
+ DECL_ACCESSORS(target, HeapObject)
DECL_ACCESSORS(holdings, Object)
- // For storing doubly linked lists of JSWeakCells in JSWeakFactory.
+ // For storing doubly linked lists of WeakCells in JSFinalizationGroup's
+ // "active_cells" and "cleared_cells" lists.
DECL_ACCESSORS(prev, Object)
DECL_ACCESSORS(next, Object)
+ // For storing doubly linked lists of WeakCells per key in
+ // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its
+ // key, so that we can remove the key from the key_map when we remove the last
+ // WeakCell associated with it.
+ DECL_ACCESSORS(key, Object)
+ DECL_ACCESSORS(key_list_prev, Object)
+ DECL_ACCESSORS(key_list_next, Object)
+
// Layout description.
-#define JS_WEAK_CELL_FIELDS(V) \
- V(kFactoryOffset, kTaggedSize) \
- V(kTargetOffset, kTaggedSize) \
- V(kHoldingsOffset, kTaggedSize) \
- V(kPrevOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- /* Header size. */ \
+#define WEAK_CELL_FIELDS(V) \
+ V(kFinalizationGroupOffset, kTaggedSize) \
+ V(kTargetOffset, kTaggedSize) \
+ V(kHoldingsOffset, kTaggedSize) \
+ V(kPrevOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ V(kKeyOffset, kTaggedSize) \
+ V(kKeyListPrevOffset, kTaggedSize) \
+ V(kKeyListNextOffset, kTaggedSize) \
+ /* Header size. */ \
V(kSize, 0)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_CELL_FIELDS)
-#undef JS_WEAK_CELL_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_CELL_FIELDS)
+#undef WEAK_CELL_FIELDS
class BodyDescriptor;
- // Nullify is called during GC and it modifies the pointers in JSWeakCell and
- // JSWeakFactory. Thus we need to tell the GC about the modified slots via the
- // gc_notify_updated_slot function. The normal write barrier is not enough,
- // since it's disabled before GC.
+ // Nullify is called during GC and it modifies the pointers in WeakCell and
+ // JSFinalizationGroup. Thus we need to tell the GC about the modified slots
+ // via the gc_notify_updated_slot function. The normal write barrier is not
+ // enough, since it's disabled before GC.
inline void Nullify(
Isolate* isolate,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
- inline void Clear(Isolate* isolate);
+ inline void RemoveFromFinalizationGroupCells(Isolate* isolate);
- OBJECT_CONSTRUCTORS(JSWeakCell, JSObject);
+ OBJECT_CONSTRUCTORS(WeakCell, HeapObject);
};
class JSWeakRef : public JSObject {
@@ -122,56 +144,62 @@ class JSWeakRef : public JSObject {
DECL_VERIFIER(JSWeakRef)
DECL_CAST(JSWeakRef)
- DECL_ACCESSORS(target, Object)
+ DECL_ACCESSORS(target, HeapObject)
+
+// Layout description.
+#define JS_WEAK_REF_FIELDS(V) \
+ V(kTargetOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
- static const int kTargetOffset = JSObject::kHeaderSize;
- static const int kSize = kTargetOffset + kPointerSize;
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_REF_FIELDS)
+#undef JS_WEAK_REF_FIELDS
class BodyDescriptor;
OBJECT_CONSTRUCTORS(JSWeakRef, JSObject);
};
-class WeakFactoryCleanupJobTask : public Microtask {
+class FinalizationGroupCleanupJobTask : public Microtask {
public:
- DECL_ACCESSORS(factory, JSWeakFactory)
+ DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
- DECL_CAST(WeakFactoryCleanupJobTask)
- DECL_VERIFIER(WeakFactoryCleanupJobTask)
- DECL_PRINTER(WeakFactoryCleanupJobTask)
+ DECL_CAST(FinalizationGroupCleanupJobTask)
+ DECL_VERIFIER(FinalizationGroupCleanupJobTask)
+ DECL_PRINTER(FinalizationGroupCleanupJobTask)
// Layout description.
-#define WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS(V) \
- V(kFactoryOffset, kTaggedSize) \
- /* Total size. */ \
+#define FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS(V) \
+ V(kFinalizationGroupOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
- WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS)
-#undef WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS
+ FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS)
+#undef FINALIZATION_GROUP_CLEANUP_JOB_TASK_FIELDS
- OBJECT_CONSTRUCTORS(WeakFactoryCleanupJobTask, Microtask)
+ OBJECT_CONSTRUCTORS(FinalizationGroupCleanupJobTask, Microtask);
};
-class JSWeakFactoryCleanupIterator : public JSObject {
+class JSFinalizationGroupCleanupIterator : public JSObject {
public:
- DECL_PRINTER(JSWeakFactoryCleanupIterator)
- DECL_VERIFIER(JSWeakFactoryCleanupIterator)
- DECL_CAST(JSWeakFactoryCleanupIterator)
+ DECL_PRINTER(JSFinalizationGroupCleanupIterator)
+ DECL_VERIFIER(JSFinalizationGroupCleanupIterator)
+ DECL_CAST(JSFinalizationGroupCleanupIterator)
- DECL_ACCESSORS(factory, JSWeakFactory)
+ DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
// Layout description.
-#define JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS(V) \
- V(kFactoryOffset, kTaggedSize) \
- /* Header size. */ \
+#define JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS(V) \
+ V(kFinalizationGroupOffset, kTaggedSize) \
+ /* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS)
-#undef JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS
+ JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
+#undef JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS
- OBJECT_CONSTRUCTORS(JSWeakFactoryCleanupIterator, JSObject);
+ OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index dadbd0e363..fafbb17f88 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -17,8 +17,10 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(ObjectBoilerplateDescription, FixedArray)
+CAST_ACCESSOR(ObjectBoilerplateDescription)
+
SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
- FixedArray::OffsetOfElementAt(kLiteralTypeOffset));
+ FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
OBJECT_CONSTRUCTORS_IMPL(ClassBoilerplate, FixedArray)
CAST_ACCESSOR(ClassBoilerplate)
@@ -30,34 +32,34 @@ BIT_FIELD_ACCESSORS(ClassBoilerplate, flags, arguments_count,
ClassBoilerplate::Flags::ArgumentsCountBits)
SMI_ACCESSORS(ClassBoilerplate, flags,
- FixedArray::OffsetOfElementAt(kFlagsIndex));
+ FixedArray::OffsetOfElementAt(kFlagsIndex))
ACCESSORS(ClassBoilerplate, static_properties_template, Object,
- FixedArray::OffsetOfElementAt(kClassPropertiesTemplateIndex));
+ FixedArray::OffsetOfElementAt(kClassPropertiesTemplateIndex))
ACCESSORS(ClassBoilerplate, static_elements_template, Object,
- FixedArray::OffsetOfElementAt(kClassElementsTemplateIndex));
+ FixedArray::OffsetOfElementAt(kClassElementsTemplateIndex))
ACCESSORS(ClassBoilerplate, static_computed_properties, FixedArray,
- FixedArray::OffsetOfElementAt(kClassComputedPropertiesIndex));
+ FixedArray::OffsetOfElementAt(kClassComputedPropertiesIndex))
ACCESSORS(ClassBoilerplate, instance_properties_template, Object,
- FixedArray::OffsetOfElementAt(kPrototypePropertiesTemplateIndex));
+ FixedArray::OffsetOfElementAt(kPrototypePropertiesTemplateIndex))
ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
- FixedArray::OffsetOfElementAt(kPrototypeElementsTemplateIndex));
+ FixedArray::OffsetOfElementAt(kPrototypeElementsTemplateIndex))
ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
- FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex));
+ FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex))
OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription, Struct)
CAST_ACCESSOR(ArrayBoilerplateDescription)
-SMI_ACCESSORS(ArrayBoilerplateDescription, flags, kFlagsOffset);
+SMI_ACCESSORS(ArrayBoilerplateDescription, flags, kFlagsOffset)
ACCESSORS(ArrayBoilerplateDescription, constant_elements, FixedArrayBase,
- kConstantElementsOffset);
+ kConstantElementsOffset)
ElementsKind ArrayBoilerplateDescription::elements_kind() const {
return static_cast<ElementsKind>(flags());
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index b868b8c7fc..024d6845b9 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -493,10 +493,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
attribs);
}
{
- Handle<Smi> start_position(Smi::FromInt(expr->start_position()), isolate);
- Handle<Smi> end_position(Smi::FromInt(expr->end_position()), isolate);
- Handle<Tuple2> class_positions =
- factory->NewTuple2(start_position, end_position, NOT_TENURED);
+ Handle<ClassPositions> class_positions = factory->NewClassPositions(
+ expr->start_position(), expr->end_position());
static_desc.AddConstant(isolate, factory->class_positions_symbol(),
class_positions, DONT_ENUM);
}
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 178306162d..35ae98a05b 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -49,7 +49,7 @@ class ObjectBoilerplateDescription : public FixedArray {
private:
bool has_number_of_properties() const;
- OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray)
+ OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray);
};
class ArrayBoilerplateDescription : public Struct {
@@ -68,15 +68,9 @@ class ArrayBoilerplateDescription : public Struct {
DECL_VERIFIER(ArrayBoilerplateDescription)
void BriefPrintDetails(std::ostream& os);
-#define ARRAY_BOILERPLATE_DESCRIPTION_FIELDS(V) \
- V(kFlagsOffset, kTaggedSize) \
- V(kConstantElementsOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- ARRAY_BOILERPLATE_DESCRIPTION_FIELDS)
-#undef ARRAY_BOILERPLATE_DESCRIPTION_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ HeapObject::kHeaderSize,
+ TORQUE_GENERATED_ARRAY_BOILERPLATE_DESCRIPTION_FIELDS)
private:
DECL_INT_ACCESSORS(flags)
@@ -155,7 +149,7 @@ class ClassBoilerplate : public FixedArray {
private:
DECL_INT_ACCESSORS(flags)
- OBJECT_CONSTRUCTORS(ClassBoilerplate, FixedArray)
+ OBJECT_CONSTRUCTORS(ClassBoilerplate, FixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 338ac78ec4..c91902f130 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -8,12 +8,12 @@
#include "src/objects/map.h"
#include "src/field-type.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/layout-descriptor-inl.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
-#include "src/objects/descriptor-array.h"
+#include "src/objects/descriptor-array-inl.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/prototype-info-inl.h"
#include "src/objects/shared-function-info.h"
@@ -102,19 +102,9 @@ InterceptorInfo Map::GetIndexedInterceptor() {
return InterceptorInfo::cast(info->GetIndexedPropertyHandler());
}
-bool Map::IsInplaceGeneralizableField(PropertyConstness constness,
- Representation representation,
- FieldType field_type) {
- if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
- (constness == PropertyConstness::kConst)) {
- // VariableMode::kConst -> PropertyConstness::kMutable field generalization
- // may happen in-place.
- return true;
- }
- if (representation.IsHeapObject() && !field_type->IsAny()) {
- return true;
- }
- return false;
+bool Map::IsMostGeneralFieldType(Representation representation,
+ FieldType field_type) {
+ return !representation.IsHeapObject() || field_type->IsAny();
}
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
@@ -135,13 +125,7 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
// kind transitions because they are inserted into the transition tree
// before field transitions. In order to avoid complexity of handling
// such a case we ensure that all maps with transitionable elements kinds
- // do not have fields that can be generalized in-place (without creation
- // of a new map).
- if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
- // The constness is either already PropertyConstness::kMutable or should
- // become PropertyConstness::kMutable if it was VariableMode::kConst.
- *constness = PropertyConstness::kMutable;
- }
+ // have the most general field type.
if (representation->IsHeapObject()) {
// The field type is either already Any or should become Any if it was
// something else.
@@ -213,32 +197,33 @@ FixedArrayBase Map::GetInitialElements() const {
} else if (has_fast_sloppy_arguments_elements()) {
result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
} else if (has_fixed_typed_array_elements()) {
- result = GetReadOnlyRoots().EmptyFixedTypedArrayForMap(*this);
+ result =
+ GetReadOnlyRoots().EmptyFixedTypedArrayForTypedArray(elements_kind());
} else if (has_dictionary_elements()) {
result = GetReadOnlyRoots().empty_slow_element_dictionary();
} else {
UNREACHABLE();
}
- DCHECK(!Heap::InNewSpace(result));
+ DCHECK(!ObjectInYoungGeneration(result));
return result;
}
VisitorId Map::visitor_id() const {
return static_cast<VisitorId>(
- RELAXED_READ_BYTE_FIELD(this, kVisitorIdOffset));
+ RELAXED_READ_BYTE_FIELD(*this, kVisitorIdOffset));
}
void Map::set_visitor_id(VisitorId id) {
CHECK_LT(static_cast<unsigned>(id), 256);
- RELAXED_WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
+ RELAXED_WRITE_BYTE_FIELD(*this, kVisitorIdOffset, static_cast<byte>(id));
}
int Map::instance_size_in_words() const {
- return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
+ return RELAXED_READ_BYTE_FIELD(*this, kInstanceSizeInWordsOffset);
}
void Map::set_instance_size_in_words(int value) {
- RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
+ RELAXED_WRITE_BYTE_FIELD(*this, kInstanceSizeInWordsOffset,
static_cast<byte>(value));
}
@@ -255,14 +240,14 @@ void Map::set_instance_size(int value) {
int Map::inobject_properties_start_or_constructor_function_index() const {
return RELAXED_READ_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+ *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
}
void Map::set_inobject_properties_start_or_constructor_function_index(
int value) {
CHECK_LT(static_cast<unsigned>(value), 256);
RELAXED_WRITE_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
static_cast<byte>(value));
}
@@ -305,11 +290,11 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
InstanceType Map::instance_type() const {
return static_cast<InstanceType>(
- READ_UINT16_FIELD(this, kInstanceTypeOffset));
+ READ_UINT16_FIELD(*this, kInstanceTypeOffset));
}
void Map::set_instance_type(InstanceType value) {
- WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
+ WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
}
int Map::UnusedPropertyFields() const {
@@ -338,12 +323,12 @@ int Map::UnusedInObjectProperties() const {
}
int Map::used_or_unused_instance_size_in_words() const {
- return RELAXED_READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
+ return RELAXED_READ_BYTE_FIELD(*this, kUsedOrUnusedInstanceSizeInWordsOffset);
}
void Map::set_used_or_unused_instance_size_in_words(int value) {
CHECK_LE(static_cast<unsigned>(value), 255);
- RELAXED_WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
+ RELAXED_WRITE_BYTE_FIELD(*this, kUsedOrUnusedInstanceSizeInWordsOffset,
static_cast<byte>(value));
}
@@ -431,16 +416,18 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
-byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
+byte Map::bit_field() const { return READ_BYTE_FIELD(*this, kBitFieldOffset); }
void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
+ WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
}
-byte Map::bit_field2() const { return READ_BYTE_FIELD(this, kBitField2Offset); }
+byte Map::bit_field2() const {
+ return READ_BYTE_FIELD(*this, kBitField2Offset);
+}
void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(this, kBitField2Offset, value);
+ WRITE_BYTE_FIELD(*this, kBitField2Offset, value);
}
bool Map::is_abandoned_prototype_map() const {
@@ -565,7 +552,7 @@ bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-Object Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
+Object Map::prototype() const { return READ_FIELD(*this, kPrototypeOffset); }
void Map::set_prototype(Object value, WriteBarrierMode mode) {
DCHECK(value->IsNull() || value->IsJSReceiver());
@@ -631,14 +618,18 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
}
void Map::set_bit_field3(uint32_t bits) {
- if (kInt32Size != kTaggedSize) {
- RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
- }
- RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
+ RELAXED_WRITE_UINT32_FIELD(*this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() const {
- return RELAXED_READ_UINT32_FIELD(this, kBitField3Offset);
+ return RELAXED_READ_UINT32_FIELD(*this, kBitField3Offset);
+}
+
+void Map::clear_padding() {
+ if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
}
LayoutDescriptor Map::GetLayoutDescriptor() const {
@@ -694,7 +685,7 @@ Map Map::ElementsTransitionMap() {
Object Map::prototype_info() const {
DCHECK(is_prototype_map());
- return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
+ return READ_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset);
}
void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
@@ -782,23 +773,23 @@ int Map::SlackForArraySize(int old_size, int size_limit) {
return Min(max_slack, old_size / 4);
}
+int Map::InstanceSizeFromSlack(int slack) const {
+ return instance_size() - slack * kTaggedSize;
+}
+
+OBJECT_CONSTRUCTORS_IMPL(NormalizedMapCache, WeakFixedArray)
+CAST_ACCESSOR(NormalizedMapCache)
NEVER_READ_ONLY_SPACE_IMPL(NormalizedMapCache)
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
-bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject obj) {
- if (!obj->IsWeakFixedArray()) return false;
- if (WeakFixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
+bool HeapObject::IsNormalizedMapCache() const {
+ if (!IsWeakFixedArray()) return false;
+ if (WeakFixedArray::cast(*this)->length() != NormalizedMapCache::kEntries) {
return false;
}
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- NormalizedMapCache cache = NormalizedMapCache::cast(obj);
- cache->NormalizedMapCacheVerify(cache->GetIsolate());
- }
-#endif
return true;
}
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
new file mode 100644
index 0000000000..1e40d32650
--- /dev/null
+++ b/deps/v8/src/objects/map.cc
@@ -0,0 +1,2700 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/map.h"
+
+#include "src/bootstrapper.h"
+#include "src/counters-inl.h"
+#include "src/field-type.h"
+#include "src/frames.h"
+#include "src/handles-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/isolate.h"
+#include "src/layout-descriptor.h"
+#include "src/log.h"
+#include "src/map-updater.h"
+#include "src/maybe-handles.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/oddball.h"
+#include "src/ostreams.h"
+#include "src/property.h"
+#include "src/transitions-inl.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
+ DisallowHeapAllocation no_alloc;
+ if (IsJSReceiverMap()) {
+ return *this;
+ }
+ int constructor_function_index = GetConstructorFunctionIndex();
+ if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
+ Context native_context = isolate->context()->native_context();
+ JSFunction constructor_function =
+ JSFunction::cast(native_context->get(constructor_function_index));
+ return constructor_function->initial_map();
+ }
+ return ReadOnlyRoots(isolate).null_value()->map();
+}
+
+// static
+MaybeHandle<JSFunction> Map::GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context) {
+ if (map->IsPrimitiveMap()) {
+ int const constructor_function_index = map->GetConstructorFunctionIndex();
+ if (constructor_function_index != kNoConstructorFunctionIndex) {
+ return handle(
+ JSFunction::cast(native_context->get(constructor_function_index)),
+ native_context->GetIsolate());
+ }
+ }
+ return MaybeHandle<JSFunction>();
+}
+
+void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
+ PropertyKind kind,
+ PropertyAttributes attributes) {
+ OFStream os(file);
+ os << "[reconfiguring]";
+ Name name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
+ }
+ os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
+ os << attributes << " [";
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
+ os << "]\n";
+}
+
+VisitorId Map::GetVisitorId(Map map) {
+ STATIC_ASSERT(kVisitorIdCount <= 256);
+
+ const int instance_type = map->instance_type();
+
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ switch (instance_type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
+ return kVisitSeqOneByteString;
+ } else {
+ return kVisitSeqTwoByteString;
+ }
+
+ case kConsStringTag:
+ if (IsShortcutCandidate(instance_type)) {
+ return kVisitShortcutCandidate;
+ } else {
+ return kVisitConsString;
+ }
+
+ case kSlicedStringTag:
+ return kVisitSlicedString;
+
+ case kExternalStringTag:
+ return kVisitDataObject;
+
+ case kThinStringTag:
+ return kVisitThinString;
+ }
+ UNREACHABLE();
+ }
+
+ switch (instance_type) {
+ case BYTE_ARRAY_TYPE:
+ return kVisitByteArray;
+
+ case BYTECODE_ARRAY_TYPE:
+ return kVisitBytecodeArray;
+
+ case FREE_SPACE_TYPE:
+ return kVisitFreeSpace;
+
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ return kVisitEmbedderDataArray;
+
+ case FIXED_ARRAY_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case STRING_TABLE_TYPE:
+ case SCOPE_INFO_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ return kVisitFixedArray;
+
+ case AWAIT_CONTEXT_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ return kVisitContext;
+
+ case NATIVE_CONTEXT_TYPE:
+ return kVisitNativeContext;
+
+ case EPHEMERON_HASH_TABLE_TYPE:
+ return kVisitEphemeronHashTable;
+
+ case WEAK_FIXED_ARRAY_TYPE:
+ case WEAK_ARRAY_LIST_TYPE:
+ return kVisitWeakArray;
+
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return kVisitFixedDoubleArray;
+
+ case PROPERTY_ARRAY_TYPE:
+ return kVisitPropertyArray;
+
+ case FEEDBACK_CELL_TYPE:
+ return kVisitFeedbackCell;
+
+ case FEEDBACK_VECTOR_TYPE:
+ return kVisitFeedbackVector;
+
+ case ODDBALL_TYPE:
+ return kVisitOddball;
+
+ case MAP_TYPE:
+ return kVisitMap;
+
+ case CODE_TYPE:
+ return kVisitCode;
+
+ case CELL_TYPE:
+ return kVisitCell;
+
+ case PROPERTY_CELL_TYPE:
+ return kVisitPropertyCell;
+
+ case DESCRIPTOR_ARRAY_TYPE:
+ return kVisitDescriptorArray;
+
+ case TRANSITION_ARRAY_TYPE:
+ return kVisitTransitionArray;
+
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ return kVisitJSWeakCollection;
+
+ case CALL_HANDLER_INFO_TYPE:
+ return kVisitStruct;
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return kVisitSharedFunctionInfo;
+
+ case JS_PROXY_TYPE:
+ return kVisitStruct;
+
+ case SYMBOL_TYPE:
+ return kVisitSymbol;
+
+ case JS_ARRAY_BUFFER_TYPE:
+ return kVisitJSArrayBuffer;
+
+ case JS_DATA_VIEW_TYPE:
+ return kVisitJSDataView;
+
+ case JS_FUNCTION_TYPE:
+ return kVisitJSFunction;
+
+ case JS_TYPED_ARRAY_TYPE:
+ return kVisitJSTypedArray;
+
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return kVisitSmallOrderedHashMap;
+
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return kVisitSmallOrderedHashSet;
+
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return kVisitSmallOrderedNameDictionary;
+
+ case CODE_DATA_CONTAINER_TYPE:
+ return kVisitCodeDataContainer;
+
+ case WASM_INSTANCE_TYPE:
+ return kVisitWasmInstanceObject;
+
+ case PREPARSE_DATA_TYPE:
+ return kVisitPreparseData;
+
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ return kVisitUncompiledDataWithoutPreparseData;
+
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ return kVisitUncompiledDataWithPreparseData;
+
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_ITERATOR_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
+#ifdef V8_INTL_SUPPORT
+ case JS_INTL_V8_BREAK_ITERATOR_TYPE:
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_DATE_TIME_FORMAT_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_NUMBER_FORMAT_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ case JS_INTL_SEGMENTER_TYPE:
+#endif // V8_INTL_SUPPORT
+ case WASM_EXCEPTION_TYPE:
+ case WASM_GLOBAL_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE: {
+ const bool has_raw_data_fields =
+ (FLAG_unbox_double_fields && !map->HasFastPointerLayout()) ||
+ (COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
+ return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
+ }
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return kVisitJSApiObject;
+
+ case JS_WEAK_REF_TYPE:
+ return kVisitJSWeakRef;
+
+ case WEAK_CELL_TYPE:
+ return kVisitWeakCell;
+
+ case FILLER_TYPE:
+ case FOREIGN_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FEEDBACK_METADATA_TYPE:
+ return kVisitDataObject;
+
+ case BIGINT_TYPE:
+ return kVisitBigInt;
+
+ case FIXED_UINT8_ARRAY_TYPE:
+ case FIXED_INT8_ARRAY_TYPE:
+ case FIXED_UINT16_ARRAY_TYPE:
+ case FIXED_INT16_ARRAY_TYPE:
+ case FIXED_UINT32_ARRAY_TYPE:
+ case FIXED_INT32_ARRAY_TYPE:
+ case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ case FIXED_BIGUINT64_ARRAY_TYPE:
+ case FIXED_BIGINT64_ARRAY_TYPE:
+ return kVisitFixedTypedArrayBase;
+
+ case FIXED_FLOAT64_ARRAY_TYPE:
+ return kVisitFixedFloat64Array;
+
+ case ALLOCATION_SITE_TYPE:
+ return kVisitAllocationSite;
+
+#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (instance_type == PROTOTYPE_INFO_TYPE) {
+ return kVisitPrototypeInfo;
+ }
+ return kVisitStruct;
+
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return kVisitDataHandler;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Map::PrintGeneralization(
+ Isolate* isolate, FILE* file, const char* reason, int modify_index,
+ int split, int descriptors, bool descriptor_to_field,
+ Representation old_representation, Representation new_representation,
+ MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
+ MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
+ OFStream os(file);
+ os << "[generalizing]";
+ Name name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
+ }
+ os << ":";
+ if (descriptor_to_field) {
+ os << "c";
+ } else {
+ os << old_representation.Mnemonic() << "{";
+ if (old_field_type.is_null()) {
+ os << Brief(*(old_value.ToHandleChecked()));
+ } else {
+ old_field_type.ToHandleChecked()->PrintTo(os);
+ }
+ os << "}";
+ }
+ os << "->" << new_representation.Mnemonic() << "{";
+ if (new_field_type.is_null()) {
+ os << Brief(*(new_value.ToHandleChecked()));
+ } else {
+ new_field_type.ToHandleChecked()->PrintTo(os);
+ }
+ os << "} (";
+ if (strlen(reason) > 0) {
+ os << reason;
+ } else {
+ os << "+" << (descriptors - split) << " maps";
+ }
+ os << ") [";
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
+ os << "]\n";
+}
+
+// static
+MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
+ if (type->IsClass()) {
+ return MaybeObjectHandle::Weak(type->AsClass(), isolate);
+ }
+ return MaybeObjectHandle(type);
+}
+
+// static
+FieldType Map::UnwrapFieldType(MaybeObject wrapped_type) {
+ if (wrapped_type->IsCleared()) {
+ return FieldType::None();
+ }
+ HeapObject heap_object;
+ if (wrapped_type->GetHeapObjectIfWeak(&heap_object)) {
+ return FieldType::cast(heap_object);
+ }
+ return wrapped_type->cast<FieldType>();
+}
+
+MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name, Handle<FieldType> type,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ Representation representation,
+ TransitionFlag flag) {
+ DCHECK(DescriptorArray::kNotFound ==
+ map->instance_descriptors()->Search(*name,
+ map->NumberOfOwnDescriptors()));
+
+ // Ensure the descriptor array does not get too big.
+ if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
+ return MaybeHandle<Map>();
+ }
+
+ // Compute the new index for new field.
+ int index = map->NextFreePropertyIndex();
+
+ if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
+ constness = PropertyConstness::kMutable;
+ representation = Representation::Tagged();
+ type = FieldType::Any(isolate);
+ } else {
+ Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
+ isolate, map->instance_type(), &constness, &representation, &type);
+ }
+
+ MaybeObjectHandle wrapped_type = WrapFieldType(isolate, type);
+
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ constness == PropertyConstness::kMutable);
+ Descriptor d = Descriptor::DataField(name, index, attributes, constness,
+ representation, wrapped_type);
+ Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
+ new_map->AccountAddedPropertyField();
+ return new_map;
+}
+
+MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ // Ensure the descriptor array does not get too big.
+ if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
+ return MaybeHandle<Map>();
+ }
+
+ if (FLAG_track_constant_fields) {
+ Representation representation = constant->OptimalRepresentation();
+ Handle<FieldType> type = constant->OptimalType(isolate, representation);
+ return CopyWithField(isolate, map, name, type, attributes,
+ PropertyConstness::kConst, representation, flag);
+ } else {
+ // Allocate new instance descriptors with (name, constant) added.
+ Descriptor d =
+ Descriptor::DataConstant(isolate, name, 0, constant, attributes);
+ Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
+ return new_map;
+ }
+}
+
+bool Map::TransitionRemovesTaggedField(Map target) const {
+ int inobject = NumberOfFields();
+ int target_inobject = target->NumberOfFields();
+ for (int i = target_inobject; i < inobject; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
+ if (!IsUnboxedDoubleField(index)) return true;
+ }
+ return false;
+}
+
+bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
+ int inobject = NumberOfFields();
+ int target_inobject = target->NumberOfFields();
+ int limit = Min(inobject, target_inobject);
+ for (int i = 0; i < limit; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
+ if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
+ return TransitionRemovesTaggedField(target) ||
+ TransitionChangesTaggedFieldToUntaggedField(target);
+}
+
+bool Map::InstancesNeedRewriting(Map target) const {
+ int target_number_of_fields = target->NumberOfFields();
+ int target_inobject = target->GetInObjectProperties();
+ int target_unused = target->UnusedPropertyFields();
+ int old_number_of_fields;
+
+ return InstancesNeedRewriting(target, target_number_of_fields,
+ target_inobject, target_unused,
+ &old_number_of_fields);
+}
+
+bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
+ int target_inobject, int target_unused,
+ int* old_number_of_fields) const {
+ // If fields were added (or removed), rewrite the instance.
+ *old_number_of_fields = NumberOfFields();
+ DCHECK(target_number_of_fields >= *old_number_of_fields);
+ if (target_number_of_fields != *old_number_of_fields) return true;
+
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray old_desc = instance_descriptors();
+ DescriptorArray new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() !=
+ old_desc->GetDetails(i).representation().IsDouble()) {
+ return true;
+ }
+ }
+
+ // If no fields were added, and no inobject properties were removed, setting
+ // the map is sufficient.
+ if (target_inobject == GetInObjectProperties()) return false;
+ // In-object slack tracking may have reduced the object size of the new map.
+ // In that case, succeed if all existing fields were inobject, and they still
+ // fit within the new inobject size.
+ DCHECK(target_inobject < GetInObjectProperties());
+ if (target_number_of_fields <= target_inobject) {
+ DCHECK(target_number_of_fields + target_unused == target_inobject);
+ return false;
+ }
+ // Otherwise, properties will need to be moved to the backing store.
+ return true;
+}
+
+int Map::NumberOfFields() const {
+ DescriptorArray descriptors = instance_descriptors();
+ int result = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).location() == kField) result++;
+ }
+ return result;
+}
+
+Map::FieldCounts Map::GetFieldCounts() const {
+ DescriptorArray descriptors = instance_descriptors();
+ int mutable_count = 0;
+ int const_count = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kField) {
+ switch (details.constness()) {
+ case PropertyConstness::kMutable:
+ mutable_count++;
+ break;
+ case PropertyConstness::kConst:
+ const_count++;
+ break;
+ }
+ }
+ }
+ return FieldCounts(mutable_count, const_count);
+}
+
+bool Map::HasOutOfObjectProperties() const {
+ return GetInObjectProperties() < NumberOfFields();
+}
+
+Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
+ ElementsKind elements_kind,
+ int modify_index, PropertyKind kind,
+ PropertyAttributes attributes,
+ const char* reason) {
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descriptors = DescriptorArray::CopyUpTo(
+ isolate, old_descriptors, number_of_own_descriptors);
+ descriptors->GeneralizeAllFields();
+
+ Handle<LayoutDescriptor> new_layout_descriptor(
+ LayoutDescriptor::FastPointerLayout(), isolate);
+ Handle<Map> new_map = CopyReplaceDescriptors(
+ isolate, map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
+ MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
+
+ // Unless the instance is being migrated, ensure that modify_index is a field.
+ if (modify_index >= 0) {
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ if (details.constness() != PropertyConstness::kMutable ||
+ details.location() != kField || details.attributes() != attributes) {
+ int field_index = details.location() == kField
+ ? details.field_index()
+ : new_map->NumberOfFields();
+ Descriptor d = Descriptor::DataField(
+ isolate, handle(descriptors->GetKey(modify_index), isolate),
+ field_index, attributes, Representation::Tagged());
+ descriptors->Replace(modify_index, &d);
+ if (details.location() != kField) {
+ new_map->AccountAddedPropertyField();
+ }
+ } else {
+ DCHECK(details.attributes() == attributes);
+ }
+
+ if (FLAG_trace_generalization) {
+ MaybeHandle<FieldType> field_type = FieldType::None(isolate);
+ if (details.location() == kField) {
+ field_type = handle(
+ map->instance_descriptors()->GetFieldType(modify_index), isolate);
+ }
+ map->PrintGeneralization(
+ isolate, stdout, reason, modify_index,
+ new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
+ details.location() == kDescriptor, details.representation(),
+ Representation::Tagged(), field_type, MaybeHandle<Object>(),
+ FieldType::Any(isolate), MaybeHandle<Object>());
+ }
+ }
+ new_map->set_elements_kind(elements_kind);
+ return new_map;
+}
+
+void Map::DeprecateTransitionTree(Isolate* isolate) {
+ if (is_deprecated()) return;
+ DisallowHeapAllocation no_gc;
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
+ int num_transitions = transitions.NumberOfTransitions();
+ for (int i = 0; i < num_transitions; ++i) {
+ transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
+ }
+ DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
+ set_is_deprecated(true);
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Deprecate", *this, Map()));
+ }
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kTransitionGroup);
+ NotifyLeafMapLayoutChange(isolate);
+}
+
+// Installs |new_descriptors| over the current instance_descriptors to ensure
+// proper sharing of descriptor arrays.
+void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
+ LayoutDescriptor new_layout_descriptor) {
+ // Don't overwrite the empty descriptor array or initial map's descriptors.
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
+ return;
+ }
+
+ DescriptorArray to_replace = instance_descriptors();
+ // Replace descriptors by new_descriptors in all maps that share it. The old
+ // descriptors will not be trimmed in the mark-compactor, we need to mark
+ // all its elements.
+ Map current = *this;
+ MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
+ to_replace->number_of_descriptors());
+ while (current->instance_descriptors() == to_replace) {
+ Object next = current->GetBackPointer();
+ if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current->SetEnumLength(kInvalidEnumCacheSentinel);
+ current->UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
+ current->NumberOfOwnDescriptors());
+ current = Map::cast(next);
+ }
+ set_owns_descriptors(false);
+}
+
+Map Map::FindRootMap(Isolate* isolate) const {
+ Map result = *this;
+ while (true) {
+ Object back = result->GetBackPointer();
+ if (back->IsUndefined(isolate)) {
+ // Initial map always owns descriptors and doesn't have unused entries
+ // in the descriptor array.
+ DCHECK(result->owns_descriptors());
+ DCHECK_EQ(result->NumberOfOwnDescriptors(),
+ result->instance_descriptors()->number_of_descriptors());
+ return result;
+ }
+ result = Map::cast(back);
+ }
+}
+
+Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
+ DisallowHeapAllocation no_allocation;
+ DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
+ Map result = *this;
+ while (true) {
+ Object back = result->GetBackPointer();
+ if (back->IsUndefined(isolate)) break;
+ const Map parent = Map::cast(back);
+ if (parent->NumberOfOwnDescriptors() <= descriptor) break;
+ result = parent;
+ }
+ return result;
+}
+
+void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ const MaybeObjectHandle& new_wrapped_type) {
+ DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
+ // We store raw pointers in the queue, so no allocations are allowed.
+ DisallowHeapAllocation no_allocation;
+ PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
+ if (details.location() != kField) return;
+ DCHECK_EQ(kData, details.kind());
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ZoneQueue<Map> backlog(&zone);
+ backlog.push(*this);
+
+ while (!backlog.empty()) {
+ Map current = backlog.front();
+ backlog.pop();
+
+ TransitionsAccessor transitions(isolate, current, &no_allocation);
+ int num_transitions = transitions.NumberOfTransitions();
+ for (int i = 0; i < num_transitions; ++i) {
+ Map target = transitions.GetTarget(i);
+ backlog.push(target);
+ }
+ DescriptorArray descriptors = current->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ // Currently constness change implies map change.
+ DCHECK_IMPLIES(new_constness != details.constness(),
+ FLAG_modify_map_inplace);
+
+ // It is allowed to change representation here only from None to something.
+ DCHECK(details.representation().Equals(new_representation) ||
+ details.representation().IsNone());
+
+ // Skip if already updated the shared descriptor.
+ if ((FLAG_modify_map_inplace && new_constness != details.constness()) ||
+ descriptors->GetFieldType(descriptor) != *new_wrapped_type.object()) {
+ DCHECK_IMPLIES(!FLAG_track_constant_fields,
+ new_constness == PropertyConstness::kMutable);
+ Descriptor d = Descriptor::DataField(
+ name, descriptors->GetFieldIndex(descriptor), details.attributes(),
+ new_constness, new_representation, new_wrapped_type);
+ descriptors->Replace(descriptor, &d);
+ }
+ }
+}
+
+bool FieldTypeIsCleared(Representation rep, FieldType type) {
+ return type->IsNone() && rep.IsHeapObject();
+}
+
+// static
+Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
+ Handle<FieldType> type1,
+ Representation rep2,
+ Handle<FieldType> type2,
+ Isolate* isolate) {
+ // Cleared field types need special treatment. They represent lost knowledge,
+ // so we must be conservative, so their generalization with any other type
+ // is "Any".
+ if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
+ return FieldType::Any(isolate);
+ }
+ if (type1->NowIs(type2)) return type2;
+ if (type2->NowIs(type1)) return type1;
+ return FieldType::Any(isolate);
+}
+
+// static
+void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ // Check if we actually need to generalize the field type at all.
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ PropertyConstness old_constness = old_details.constness();
+ Representation old_representation = old_details.representation();
+ Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
+ isolate);
+
+ // Return if the current map is general enough to hold requested constness and
+ // representation/field type.
+ if (((FLAG_modify_map_inplace &&
+ IsGeneralizableTo(new_constness, old_constness)) ||
+ (!FLAG_modify_map_inplace && (old_constness == new_constness))) &&
+ old_representation.Equals(new_representation) &&
+ !FieldTypeIsCleared(new_representation, *new_field_type) &&
+ // Checking old_field_type for being cleared is not necessary because
+ // the NowIs check below would fail anyway in that case.
+ new_field_type->NowIs(old_field_type)) {
+ DCHECK(GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate)
+ ->NowIs(old_field_type));
+ return;
+ }
+
+ // Determine the field owner.
+ Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
+ Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
+ isolate);
+ DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
+
+ new_field_type =
+ Map::GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate);
+ if (FLAG_modify_map_inplace) {
+ new_constness = GeneralizeConstness(old_constness, new_constness);
+ }
+
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ Handle<Name> name(descriptors->GetKey(modify_index), isolate);
+
+ MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
+ field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
+ new_representation, wrapped_type);
+ field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kFieldOwnerGroup);
+
+ if (FLAG_trace_generalization) {
+ map->PrintGeneralization(
+ isolate, stdout, "field type generalization", modify_index,
+ map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
+ details.representation(), details.representation(), old_field_type,
+ MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
+ }
+}
+
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map,
+ int modify_index, PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ DCHECK_EQ(kData, new_kind); // Only kData case is supported.
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(modify_index, new_attributes,
+ PropertyConstness::kConst,
+ new_representation, new_field_type);
+}
+
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind new_elements_kind) {
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureElementsKind(new_elements_kind);
+}
+
+namespace {
+
+Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ Map target = old_map;
+ do {
+ target = TransitionsAccessor(isolate, target, &no_allocation)
+ .GetMigrationTarget();
+ } while (!target.is_null() && target->is_deprecated());
+ if (target.is_null()) return Map();
+
+ // TODO(ishell): if this validation ever become a bottleneck consider adding a
+ // bit to the Map telling whether it contains fields whose field types may be
+ // cleared.
+ // TODO(ishell): revisit handling of cleared field types in
+ // TryReplayPropertyTransitions() and consider checking the target map's field
+ // types instead of old_map's types.
+ // Go to slow map updating if the old_map has fast properties with cleared
+ // field types.
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map->instance_descriptors();
+ for (int i = 0; i < old_nof; i++) {
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.location() == kField && old_details.kind() == kData) {
+ FieldType old_type = old_descriptors->GetFieldType(i);
+ if (FieldTypeIsCleared(old_details.representation(), old_type)) {
+ return Map();
+ }
+ }
+ }
+
+ SLOW_DCHECK(Map::TryUpdateSlow(isolate, old_map) == target);
+ return target;
+}
+} // namespace
+
+// TODO(ishell): Move TryUpdate() and friends to MapUpdater
+// static
+MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ if (!old_map->is_deprecated()) return old_map;
+
+ if (FLAG_fast_map_update) {
+ Map target_map = SearchMigrationTarget(isolate, *old_map);
+ if (!target_map.is_null()) {
+ return handle(target_map, isolate);
+ }
+ }
+
+ Map new_map = TryUpdateSlow(isolate, *old_map);
+ if (new_map.is_null()) return MaybeHandle<Map>();
+ if (FLAG_fast_map_update) {
+ TransitionsAccessor(isolate, *old_map, &no_allocation)
+ .SetMigrationTarget(new_map);
+ }
+ return handle(new_map, isolate);
+}
+
+namespace {
+
+struct IntegrityLevelTransitionInfo {
+ explicit IntegrityLevelTransitionInfo(Map map)
+ : integrity_level_source_map(map) {}
+
+ bool has_integrity_level_transition = false;
+ PropertyAttributes integrity_level = NONE;
+ Map integrity_level_source_map;
+ Symbol integrity_level_symbol;
+};
+
+IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
+ Map map, Isolate* isolate, DisallowHeapAllocation* no_allocation) {
+ IntegrityLevelTransitionInfo info(map);
+
+ // Figure out the most restrictive integrity level transition (it should
+ // be the last one in the transition tree).
+ DCHECK(!map->is_extensible());
+ Map previous = Map::cast(map->GetBackPointer());
+ TransitionsAccessor last_transitions(isolate, previous, no_allocation);
+ if (!last_transitions.HasIntegrityLevelTransitionTo(
+ map, &(info.integrity_level_symbol), &(info.integrity_level))) {
+ // The last transition was not integrity level transition - just bail out.
+ // This can happen in the following cases:
+ // - there are private symbol transitions following the integrity level
+ // transitions (see crbug.com/v8/8854).
+ // - there is a getter added in addition to an existing setter (or a setter
+ // in addition to an existing getter).
+ return info;
+ }
+
+ Map source_map = previous;
+ // Now walk up the back pointer chain and skip all integrity level
+ // transitions. If we encounter any non-integrity level transition interleaved
+ // with integrity level transitions, just bail out.
+ while (!source_map->is_extensible()) {
+ previous = Map::cast(source_map->GetBackPointer());
+ TransitionsAccessor transitions(isolate, previous, no_allocation);
+ if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
+ return info;
+ }
+ source_map = previous;
+ }
+
+ // Integrity-level transitions never change number of descriptors.
+ CHECK_EQ(map->NumberOfOwnDescriptors(), source_map->NumberOfOwnDescriptors());
+
+ info.has_integrity_level_transition = true;
+ info.integrity_level_source_map = source_map;
+ return info;
+}
+
+} // namespace
+
+Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ // Check the state of the root map.
+ Map root_map = old_map->FindRootMap(isolate);
+ if (root_map->is_deprecated()) {
+ JSFunction constructor = JSFunction::cast(root_map->GetConstructor());
+ DCHECK(constructor->has_initial_map());
+ DCHECK(constructor->initial_map()->is_dictionary_map());
+ if (constructor->initial_map()->elements_kind() !=
+ old_map->elements_kind()) {
+ return Map();
+ }
+ return constructor->initial_map();
+ }
+ if (!old_map->EquivalentToForTransition(root_map)) return Map();
+
+ ElementsKind from_kind = root_map->elements_kind();
+ ElementsKind to_kind = old_map->elements_kind();
+
+ IntegrityLevelTransitionInfo info(old_map);
+ if (root_map->is_extensible() != old_map->is_extensible()) {
+ DCHECK(!old_map->is_extensible());
+ DCHECK(root_map->is_extensible());
+ info = DetectIntegrityLevelTransitions(old_map, isolate, &no_allocation);
+ // Bail out if there were some private symbol transitions mixed up
+ // with the integrity level transitions.
+ if (!info.has_integrity_level_transition) return Map();
+ // Make sure replay the original elements kind transitions, before
+ // the integrity level transition sets the elements to dictionary mode.
+ DCHECK(to_kind == DICTIONARY_ELEMENTS ||
+ IsFixedTypedArrayElementsKind(to_kind));
+ to_kind = info.integrity_level_source_map->elements_kind();
+ }
+ if (from_kind != to_kind) {
+ // Try to follow existing elements kind transitions.
+ root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
+ if (root_map.is_null()) return Map();
+ // From here on, use the map with correct elements kind as root map.
+ }
+
+ // Replay the transitions as they were before the integrity level transition.
+ Map result = root_map->TryReplayPropertyTransitions(
+ isolate, info.integrity_level_source_map);
+ if (result.is_null()) return Map();
+
+ if (info.has_integrity_level_transition) {
+ // Now replay the integrity level transition.
+ result = TransitionsAccessor(isolate, result, &no_allocation)
+ .SearchSpecial(info.integrity_level_symbol);
+ }
+
+ DCHECK_EQ(old_map->elements_kind(), result->elements_kind());
+ DCHECK_EQ(old_map->instance_type(), result->instance_type());
+ return result;
+}
+
+Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ int root_nof = NumberOfOwnDescriptors();
+
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map->instance_descriptors();
+
+ Map new_map = *this;
+ for (int i = root_nof; i < old_nof; ++i) {
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ Map transition =
+ TransitionsAccessor(isolate, new_map, &no_allocation)
+ .SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (transition.is_null()) return Map();
+ new_map = transition;
+ DescriptorArray new_descriptors = new_map->instance_descriptors();
+
+ PropertyDetails new_details = new_descriptors->GetDetails(i);
+ DCHECK_EQ(old_details.kind(), new_details.kind());
+ DCHECK_EQ(old_details.attributes(), new_details.attributes());
+ if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
+ return Map();
+ }
+ DCHECK(IsGeneralizableTo(old_details.location(), new_details.location()));
+ if (!old_details.representation().fits_into(new_details.representation())) {
+ return Map();
+ }
+ if (new_details.location() == kField) {
+ if (new_details.kind() == kData) {
+ FieldType new_type = new_descriptors->GetFieldType(i);
+ // Cleared field types need special treatment. They represent lost
+ // knowledge, so we must first generalize the new_type to "Any".
+ if (FieldTypeIsCleared(new_details.representation(), new_type)) {
+ return Map();
+ }
+ DCHECK_EQ(kData, old_details.kind());
+ if (old_details.location() == kField) {
+ FieldType old_type = old_descriptors->GetFieldType(i);
+ if (FieldTypeIsCleared(old_details.representation(), old_type) ||
+ !old_type->NowIs(new_type)) {
+ return Map();
+ }
+ } else {
+ DCHECK_EQ(kDescriptor, old_details.location());
+ DCHECK(!FLAG_track_constant_fields);
+ Object old_value = old_descriptors->GetStrongValue(i);
+ if (!new_type->NowContains(old_value)) {
+ return Map();
+ }
+ }
+
+ } else {
+ DCHECK_EQ(kAccessor, new_details.kind());
+#ifdef DEBUG
+ FieldType new_type = new_descriptors->GetFieldType(i);
+ DCHECK(new_type->IsAny());
+#endif
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(kDescriptor, new_details.location());
+ if (old_details.location() == kField ||
+ old_descriptors->GetStrongValue(i) !=
+ new_descriptors->GetStrongValue(i)) {
+ return Map();
+ }
+ }
+ }
+ if (new_map->NumberOfOwnDescriptors() != old_nof) return Map();
+ return new_map;
+}
+
+// static
+Handle<Map> Map::Update(Isolate* isolate, Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+ if (FLAG_fast_map_update) {
+ Map target_map = SearchMigrationTarget(isolate, *map);
+ if (!target_map.is_null()) {
+ return handle(target_map, isolate);
+ }
+ }
+ MapUpdater mu(isolate, map);
+ return mu.Update();
+}
+
+void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
+ // Only supports adding slack to owned descriptors.
+ DCHECK(map->owns_descriptors());
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int old_size = map->NumberOfOwnDescriptors();
+ if (slack <= descriptors->number_of_slack_descriptors()) return;
+
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(isolate, descriptors, old_size, slack);
+
+ DisallowHeapAllocation no_allocation;
+ // The descriptors are still the same, so keep the layout descriptor.
+ LayoutDescriptor layout_descriptor = map->GetLayoutDescriptor();
+
+ if (old_size == 0) {
+ map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->NumberOfOwnDescriptors());
+ return;
+ }
+
+ // If the source descriptors had an enum cache we copy it. This ensures
+ // that the maps to which we push the new descriptor array back can rely
+ // on a cache always being available once it is set. If the map has more
+ // enumerated descriptors than available in the original cache, the cache
+ // will be lazily replaced by the extended cache when needed.
+ new_descriptors->CopyEnumCacheFrom(*descriptors);
+
+ // Replace descriptors by new_descriptors in all maps that share it. The old
+ // descriptors will not be trimmed in the mark-compactor, we need to mark
+ // all its elements.
+ MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors,
+ descriptors->number_of_descriptors());
+
+ Map current = *map;
+ while (current->instance_descriptors() == *descriptors) {
+ Object next = current->GetBackPointer();
+ if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ current->NumberOfOwnDescriptors());
+ current = Map::cast(next);
+ }
+ map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->NumberOfOwnDescriptors());
+}
+
+// static
+Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype) {
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() == *prototype) return map;
+ if (prototype->IsNull(isolate)) {
+ return isolate->slow_object_with_null_prototype_map();
+ }
+ if (prototype->IsJSObject()) {
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) {
+ JSObject::OptimizeAsPrototype(js_prototype);
+ }
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ // TODO(verwaest): Use inobject slack tracking for this map.
+ if (info->HasObjectCreateMap()) {
+ map = handle(info->ObjectCreateMap(), isolate);
+ } else {
+ map = Map::CopyInitialMap(isolate, map);
+ Map::SetPrototype(isolate, map, prototype);
+ PrototypeInfo::SetObjectCreateMap(info, map);
+ }
+ return map;
+ }
+
+ return Map::TransitionToPrototype(isolate, map, prototype);
+}
+
+// static
+MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
+ Handle<HeapObject> prototype) {
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() == *prototype) return map;
+ if (prototype->IsNull(isolate)) {
+ return isolate->slow_object_with_null_prototype_map();
+ }
+ if (!prototype->IsJSObject()) return MaybeHandle<Map>();
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) return MaybeHandle<Map>();
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ if (!info->HasObjectCreateMap()) return MaybeHandle<Map>();
+ return handle(info->ObjectCreateMap(), isolate);
+}
+
+static bool ContainsMap(MapHandles const& maps, Map map) {
+ DCHECK(!map.is_null());
+ for (Handle<Map> current : maps) {
+ if (!current.is_null() && *current == map) return true;
+ }
+ return false;
+}
+
+Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
+ MapHandles const& candidates) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ if (is_prototype_map()) return Map();
+
+ ElementsKind kind = elements_kind();
+ bool packed = IsFastPackedElementsKind(kind);
+
+ Map transition;
+ if (IsTransitionableFastElementsKind(kind)) {
+ // Check the state of the root map.
+ Map root_map = FindRootMap(isolate);
+ if (!EquivalentToForElementsKindTransition(root_map)) return Map();
+ root_map = root_map->LookupElementsTransitionMap(isolate, kind);
+ DCHECK(!root_map.is_null());
+ // Starting from the next existing elements kind transition try to
+ // replay the property transitions that does not involve instance rewriting
+ // (ElementsTransitionAndStoreStub does not support that).
+ for (root_map = root_map->ElementsTransitionMap();
+ !root_map.is_null() && root_map->has_fast_elements();
+ root_map = root_map->ElementsTransitionMap()) {
+ Map current = root_map->TryReplayPropertyTransitions(isolate, *this);
+ if (current.is_null()) continue;
+ if (InstancesNeedRewriting(current)) continue;
+
+ if (ContainsMap(candidates, current) &&
+ (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
+ transition = current;
+ packed = packed && IsFastPackedElementsKind(current->elements_kind());
+ }
+ }
+ }
+ return transition;
+}
+
+static Map FindClosestElementsTransition(Isolate* isolate, Map map,
+ ElementsKind to_kind) {
+ // Ensure we are requested to search elements kind transition "near the root".
+ DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
+ map->NumberOfOwnDescriptors());
+ Map current_map = map;
+
+ ElementsKind kind = map->elements_kind();
+ while (kind != to_kind) {
+ Map next_map = current_map->ElementsTransitionMap();
+ if (next_map.is_null()) return current_map;
+ kind = next_map->elements_kind();
+ current_map = next_map;
+ }
+
+ DCHECK_EQ(to_kind, current_map->elements_kind());
+ return current_map;
+}
+
+Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
+ Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
+ if (to_map->elements_kind() == to_kind) return to_map;
+ return Map();
+}
+
+bool Map::IsMapInArrayPrototypeChain(Isolate* isolate) const {
+ if (isolate->initial_array_prototype()->map() == *this) {
+ return true;
+ }
+
+ if (isolate->initial_object_prototype()->map() == *this) {
+ return true;
+ }
+
+ return false;
+}
+
+Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = map->elements_kind();
+ if (from_kind == to_kind) return map;
+
+ Context native_context = isolate->context()->native_context();
+ if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (*map == native_context->fast_aliased_arguments_map()) {
+ DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
+ return handle(native_context->slow_aliased_arguments_map(), isolate);
+ }
+ } else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (*map == native_context->slow_aliased_arguments_map()) {
+ DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
+ return handle(native_context->fast_aliased_arguments_map(), isolate);
+ }
+ } else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
+ // Reuse map transitions for JSArrays.
+ DisallowHeapAllocation no_gc;
+ if (native_context->GetInitialJSArrayMap(from_kind) == *map) {
+ Object maybe_transitioned_map =
+ native_context->get(Context::ArrayMapIndex(to_kind));
+ if (maybe_transitioned_map->IsMap()) {
+ return handle(Map::cast(maybe_transitioned_map), isolate);
+ }
+ }
+ }
+
+ DCHECK(!map->IsUndefined(isolate));
+ // Check if we can go back in the elements kind transition chain.
+ if (IsHoleyElementsKind(from_kind) &&
+ to_kind == GetPackedElementsKind(from_kind) &&
+ map->GetBackPointer()->IsMap() &&
+ Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
+ return handle(Map::cast(map->GetBackPointer()), isolate);
+ }
+
+ bool allow_store_transition = IsTransitionElementsKind(from_kind);
+ // Only store fast element maps in ascending generality.
+ if (IsFastElementsKind(to_kind)) {
+ allow_store_transition =
+ allow_store_transition && IsTransitionableFastElementsKind(from_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind);
+ }
+
+ if (!allow_store_transition) {
+ return Map::CopyAsElementsKind(isolate, map, to_kind, OMIT_TRANSITION);
+ }
+
+ return Map::ReconfigureElementsKind(isolate, map, to_kind);
+}
+
+static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
+ Handle<Map> map,
+ ElementsKind to_kind) {
+ DCHECK(IsTransitionElementsKind(map->elements_kind()));
+
+ Handle<Map> current_map = map;
+
+ ElementsKind kind = map->elements_kind();
+ TransitionFlag flag;
+ if (map->is_prototype_map()) {
+ flag = OMIT_TRANSITION;
+ } else {
+ flag = INSERT_TRANSITION;
+ if (IsFastElementsKind(kind)) {
+ while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+ kind = GetNextTransitionElementsKind(kind);
+ current_map = Map::CopyAsElementsKind(isolate, current_map, kind, flag);
+ }
+ }
+ }
+
+ // In case we are exiting the fast elements kind system, just add the map in
+ // the end.
+ if (kind != to_kind) {
+ current_map = Map::CopyAsElementsKind(isolate, current_map, to_kind, flag);
+ }
+
+ DCHECK(current_map->elements_kind() == to_kind);
+ return current_map;
+}
+
+// static
+Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind) {
+ Handle<Map> closest_map(FindClosestElementsTransition(isolate, *map, kind),
+ isolate);
+
+ if (closest_map->elements_kind() == kind) {
+ return closest_map;
+ }
+
+ return AddMissingElementsTransitions(isolate, closest_map, kind);
+}
+
+int Map::NumberOfEnumerableProperties() const {
+ int result = 0;
+ DescriptorArray descs = instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
+ !descs->GetKey(i)->FilterKey(ENUMERABLE_STRINGS)) {
+ result++;
+ }
+ }
+ return result;
+}
+
+int Map::NextFreePropertyIndex() const {
+ int free_index = 0;
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DescriptorArray descs = instance_descriptors();
+ for (int i = 0; i < number_of_own_descriptors; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (details.location() == kField) {
+ int candidate = details.field_index() + details.field_width_in_words();
+ if (candidate > free_index) free_index = candidate;
+ }
+ }
+ return free_index;
+}
+
+bool Map::OnlyHasSimpleProperties() const {
+ // Wrapped string elements aren't explicitly stored in the elements backing
+ // store, but are loaded indirectly from the underlying string.
+ return !IsStringWrapperElementsKind(elements_kind()) &&
+ !IsSpecialReceiverMap() && !has_hidden_prototype() &&
+ !is_dictionary_map();
+}
+
+bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
+ if (IsDictionaryElementsKind(elements_kind())) {
+ return false;
+ }
+
+ for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
+ iter.Advance()) {
+ // Be conservative, don't walk into proxies.
+ if (iter.GetCurrent()->IsJSProxy()) return true;
+ // String wrappers have non-configurable, non-writable elements.
+ if (iter.GetCurrent()->IsStringWrapper()) return true;
+ JSObject current = iter.GetCurrent<JSObject>();
+
+ if (current->HasDictionaryElements() &&
+ current->element_dictionary()->requires_slow_elements()) {
+ return true;
+ }
+
+ if (current->HasSlowArgumentsElements()) {
+ FixedArray parameter_map = FixedArray::cast(current->elements());
+ Object arguments = parameter_map->get(1);
+ if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
+ int inobject_properties) {
+ Handle<Map> result = isolate->factory()->NewMap(
+ map->instance_type(), instance_size, TERMINAL_FAST_ELEMENTS_KIND,
+ inobject_properties);
+ Handle<Object> prototype(map->prototype(), isolate);
+ Map::SetPrototype(isolate, result, prototype);
+ result->set_constructor_or_backpointer(map->GetConstructor());
+ result->set_bit_field(map->bit_field());
+ result->set_bit_field2(map->bit_field2());
+ int new_bit_field3 = map->bit_field3();
+ new_bit_field3 = OwnsDescriptorsBit::update(new_bit_field3, true);
+ new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
+ new_bit_field3 =
+ EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel);
+ new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
+ if (!map->is_dictionary_map()) {
+ new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
+ }
+ result->set_bit_field3(new_bit_field3);
+ result->clear_padding();
+ return result;
+}
+
+Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode, const char* reason) {
+ DCHECK(!fast_map->is_dictionary_map());
+
+ Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
+ isolate);
+ bool use_cache =
+ !fast_map->is_prototype_map() && !maybe_cache->IsUndefined(isolate);
+ Handle<NormalizedMapCache> cache;
+ if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
+
+ Handle<Map> new_map;
+ if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) new_map->DictionaryMapVerify(isolate);
+#endif
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ // The cached map should match newly created normalized map bit-by-bit,
+ // except for the code cache, which can contain some ICs which can be
+ // applied to the shared map, dependent code and weak cell cache.
+ Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
+
+ if (new_map->is_prototype_map()) {
+ // For prototype maps, the PrototypeInfo is not copied.
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
+ reinterpret_cast<void*>(new_map->address()),
+ kTransitionsOrPrototypeInfoOffset));
+ DCHECK_EQ(fresh->raw_transitions(),
+ MaybeObject::FromObject(Smi::kZero));
+ STATIC_ASSERT(kDescriptorsOffset ==
+ kTransitionsOrPrototypeInfoOffset + kTaggedSize);
+ DCHECK_EQ(
+ 0,
+ memcmp(
+ HeapObject::RawField(*fresh, kDescriptorsOffset).ToVoidPtr(),
+ HeapObject::RawField(*new_map, kDescriptorsOffset).ToVoidPtr(),
+ kDependentCodeOffset - kDescriptorsOffset));
+ } else {
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
+ reinterpret_cast<void*>(new_map->address()),
+ Map::kDependentCodeOffset));
+ }
+ STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
+ Map::kDependentCodeOffset + kTaggedSize);
+ int offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
+ reinterpret_cast<void*>(new_map->address() + offset),
+ Map::kSize - offset));
+ }
+#endif
+ } else {
+ new_map = Map::CopyNormalized(isolate, fast_map, mode);
+ if (use_cache) {
+ cache->Set(fast_map, new_map);
+ isolate->counters()->maps_normalized()->Increment();
+ }
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
+ }
+ }
+ fast_map->NotifyLeafMapLayoutChange(isolate);
+ return new_map;
+}
+
+Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
+ PropertyNormalizationMode mode) {
+ int new_instance_size = map->instance_size();
+ if (mode == CLEAR_INOBJECT_PROPERTIES) {
+ new_instance_size -= map->GetInObjectProperties() * kTaggedSize;
+ }
+
+ Handle<Map> result = RawCopy(
+ isolate, map, new_instance_size,
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : map->GetInObjectProperties());
+ // Clear the unused_property_fields explicitly as this field should not
+ // be accessed for normalized maps.
+ result->SetInObjectUnusedPropertyFields(0);
+ result->set_is_dictionary_map(true);
+ result->set_is_migration_target(false);
+ result->set_may_have_interesting_symbols(true);
+ result->set_construction_counter(kNoSlackTracking);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) result->DictionaryMapVerify(isolate);
+#endif
+
+ return result;
+}
+
+// Return an immutable prototype exotic object version of the input map.
+// Never even try to cache it in the transition tree, as it is intended
+// for the global object and its prototype chain, and excluding it saves
+// memory on the map transition tree.
+
+// static
+Handle<Map> Map::TransitionToImmutableProto(Isolate* isolate, Handle<Map> map) {
+ Handle<Map> new_map = Map::Copy(isolate, map, "ImmutablePrototype");
+ new_map->set_is_immutable_proto(true);
+ return new_map;
+}
+
+namespace {
+void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
+#ifdef DEBUG
+ // Strict function maps have Function as a constructor but the
+ // Function's initial map is a sloppy function map. Same holds for
+ // GeneratorFunction / AsyncFunction and its initial map.
+ Object constructor = map->GetConstructor();
+ DCHECK(constructor->IsJSFunction());
+ DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
+ *map == *isolate->strict_function_map() ||
+ *map == *isolate->strict_function_with_name_map() ||
+ *map == *isolate->generator_function_map() ||
+ *map == *isolate->generator_function_with_name_map() ||
+ *map == *isolate->generator_function_with_home_object_map() ||
+ *map == *isolate->generator_function_with_name_and_home_object_map() ||
+ *map == *isolate->async_function_map() ||
+ *map == *isolate->async_function_with_name_map() ||
+ *map == *isolate->async_function_with_home_object_map() ||
+ *map == *isolate->async_function_with_name_and_home_object_map());
+#endif
+ // Initial maps must always own their descriptors and it's descriptor array
+ // does not contain descriptors that do not belong to the map.
+ DCHECK(map->owns_descriptors());
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+}
+} // namespace
+
+// static
+Handle<Map> Map::CopyInitialMapNormalized(Isolate* isolate, Handle<Map> map,
+ PropertyNormalizationMode mode) {
+ EnsureInitialMap(isolate, map);
+ return CopyNormalized(isolate, map, mode);
+}
+
+// static
+Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
+ int instance_size, int inobject_properties,
+ int unused_property_fields) {
+ EnsureInitialMap(isolate, map);
+ Handle<Map> result =
+ RawCopy(isolate, map, instance_size, inobject_properties);
+
+ // Please note instance_type and instance_size are set when allocated.
+ result->SetInObjectUnusedPropertyFields(unused_property_fields);
+
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors > 0) {
+ // The copy will use the same descriptors array.
+ result->UpdateDescriptors(isolate, map->instance_descriptors(),
+ map->GetLayoutDescriptor(),
+ number_of_own_descriptors);
+
+ DCHECK_EQ(result->NumberOfFields(),
+ result->GetInObjectProperties() - result->UnusedPropertyFields());
+ }
+
+ return result;
+}
+
+Handle<Map> Map::CopyDropDescriptors(Isolate* isolate, Handle<Map> map) {
+ Handle<Map> result =
+ RawCopy(isolate, map, map->instance_size(),
+ map->IsJSObjectMap() ? map->GetInObjectProperties() : 0);
+
+ // Please note instance_type and instance_size are set when allocated.
+ if (map->IsJSObjectMap()) {
+ result->CopyUnusedPropertyFields(*map);
+ }
+ map->NotifyLeafMapLayoutChange(isolate);
+ return result;
+}
+
+Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor) {
+ // Sanity check. This path is only to be taken if the map owns its descriptor
+ // array, implying that its NumberOfOwnDescriptors equals the number of
+ // descriptors in the descriptor array.
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+
+ Handle<Map> result = CopyDropDescriptors(isolate, map);
+ Handle<Name> name = descriptor->GetKey();
+
+ // Properly mark the {result} if the {name} is an "interesting symbol".
+ if (name->IsInterestingSymbol()) {
+ result->set_may_have_interesting_symbols(true);
+ }
+
+ // Ensure there's space for the new descriptor in the shared descriptor array.
+ if (descriptors->number_of_slack_descriptors() == 0) {
+ int old_size = descriptors->number_of_descriptors();
+ if (old_size == 0) {
+ descriptors = DescriptorArray::Allocate(isolate, 0, 1);
+ } else {
+ int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
+ EnsureDescriptorSlack(isolate, map, slack);
+ descriptors = handle(map->instance_descriptors(), isolate);
+ }
+ }
+
+ Handle<LayoutDescriptor> layout_descriptor =
+ FLAG_unbox_double_fields
+ ? LayoutDescriptor::ShareAppend(isolate, map,
+ descriptor->GetDetails())
+ : handle(LayoutDescriptor::FastPointerLayout(), isolate);
+
+ {
+ DisallowHeapAllocation no_gc;
+ descriptors->Append(descriptor);
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ }
+
+ DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
+ ConnectTransition(isolate, map, result, name, SIMPLE_PROPERTY_TRANSITION);
+
+ return result;
+}
+
+void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
+ Handle<Map> child, Handle<Name> name,
+ SimpleTransitionFlag flag) {
+ DCHECK_IMPLIES(name->IsInterestingSymbol(),
+ child->may_have_interesting_symbols());
+ DCHECK_IMPLIES(parent->may_have_interesting_symbols(),
+ child->may_have_interesting_symbols());
+ // Do not track transitions during bootstrap except for element transitions.
+ if (isolate->bootstrapper()->IsActive() &&
+ !name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
+ if (FLAG_trace_maps) {
+ LOG(isolate,
+ MapEvent("Transition", *parent, *child,
+ child->is_prototype_map() ? "prototype" : "", *name));
+ }
+ return;
+ }
+ if (!parent->GetBackPointer()->IsUndefined(isolate)) {
+ parent->set_owns_descriptors(false);
+ } else {
+ // |parent| is initial map and it must keep the ownership, there must be no
+ // descriptors in the descriptors array that do not belong to the map.
+ DCHECK(parent->owns_descriptors());
+ DCHECK_EQ(parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors()->number_of_descriptors());
+ }
+ if (parent->is_prototype_map()) {
+ DCHECK(child->is_prototype_map());
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Transition", *parent, *child, "prototype", *name));
+ }
+ } else {
+ TransitionsAccessor(isolate, parent).Insert(name, child, flag);
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Transition", *parent, *child, "", *name));
+ }
+ }
+}
+
+Handle<Map> Map::CopyReplaceDescriptors(
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
+ MaybeHandle<Name> maybe_name, const char* reason,
+ SimpleTransitionFlag simple_flag) {
+ DCHECK(descriptors->IsSortedNoDuplicates());
+
+ Handle<Map> result = CopyDropDescriptors(isolate, map);
+
+ // Properly mark the {result} if the {name} is an "interesting symbol".
+ Handle<Name> name;
+ if (maybe_name.ToHandle(&name) && name->IsInterestingSymbol()) {
+ result->set_may_have_interesting_symbols(true);
+ }
+
+ if (!map->is_prototype_map()) {
+ if (flag == INSERT_TRANSITION &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+
+ DCHECK(!maybe_name.is_null());
+ ConnectTransition(isolate, map, result, name, simple_flag);
+ } else {
+ descriptors->GeneralizeAllFields();
+ result->InitializeDescriptors(isolate, *descriptors,
+ LayoutDescriptor::FastPointerLayout());
+ }
+ } else {
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ }
+ if (FLAG_trace_maps &&
+ // Mirror conditions above that did not call ConnectTransition().
+ (map->is_prototype_map() ||
+ !(flag == INSERT_TRANSITION &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()))) {
+ LOG(isolate, MapEvent("ReplaceDescriptors", *map, *result, reason,
+ maybe_name.is_null() ? Name() : *name));
+ }
+ return result;
+}
+
+// Creates transition tree starting from |split_map| and adding all descriptors
+// starting from descriptor with index |split_map|.NumberOfOwnDescriptors().
+// The way how it is done is tricky because of GC and special descriptors
+// marking logic.
+Handle<Map> Map::AddMissingTransitions(
+ Isolate* isolate, Handle<Map> split_map,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ DCHECK(descriptors->IsSortedNoDuplicates());
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ int nof_descriptors = descriptors->number_of_descriptors();
+ DCHECK_LT(split_nof, nof_descriptors);
+
+ // Start with creating last map which will own full descriptors array.
+ // This is necessary to guarantee that GC will mark the whole descriptor
+ // array if any of the allocations happening below fail.
+ // Number of unused properties is temporarily incorrect and the layout
+ // descriptor could unnecessarily be in slow mode but we will fix after
+ // all the other intermediate maps are created.
+ // Also the last map might have interesting symbols, we temporarily set
+ // the flag and clear it right before the descriptors are installed. This
+ // makes heap verification happy and ensures the flag ends up accurate.
+ Handle<Map> last_map = CopyDropDescriptors(isolate, split_map);
+ last_map->InitializeDescriptors(isolate, *descriptors,
+ *full_layout_descriptor);
+ last_map->SetInObjectUnusedPropertyFields(0);
+ last_map->set_may_have_interesting_symbols(true);
+
+ // During creation of intermediate maps we violate descriptors sharing
+ // invariant since the last map is not yet connected to the transition tree
+ // we create here. But it is safe because GC never trims map's descriptors
+ // if there are no dead transitions from that map and this is exactly the
+ // case for all the intermediate maps we create here.
+ Handle<Map> map = split_map;
+ for (int i = split_nof; i < nof_descriptors - 1; ++i) {
+ Handle<Map> new_map = CopyDropDescriptors(isolate, map);
+ InstallDescriptors(isolate, map, new_map, i, descriptors,
+ full_layout_descriptor);
+
+ map = new_map;
+ }
+ map->NotifyLeafMapLayoutChange(isolate);
+ last_map->set_may_have_interesting_symbols(false);
+ InstallDescriptors(isolate, map, last_map, nof_descriptors - 1, descriptors,
+ full_layout_descriptor);
+ return last_map;
+}
+
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
+void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
+ Handle<Map> child, int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ DCHECK(descriptors->IsSortedNoDuplicates());
+
+ child->SetInstanceDescriptors(isolate, *descriptors, new_descriptor + 1);
+ child->CopyUnusedPropertyFields(*parent);
+ PropertyDetails details = descriptors->GetDetails(new_descriptor);
+ if (details.location() == kField) {
+ child->AccountAddedPropertyField();
+ }
+
+ if (FLAG_unbox_double_fields) {
+ Handle<LayoutDescriptor> layout_descriptor =
+ LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
+ full_layout_descriptor);
+ child->set_layout_descriptor(*layout_descriptor);
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
+ }
+#else
+ SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
+#endif
+ child->set_visitor_id(Map::GetVisitorId(*child));
+ }
+
+ Handle<Name> name = handle(descriptors->GetKey(new_descriptor), isolate);
+ if (parent->may_have_interesting_symbols() || name->IsInterestingSymbol()) {
+ child->set_may_have_interesting_symbols(true);
+ }
+ ConnectTransition(isolate, parent, child, name, SIMPLE_PROPERTY_TRANSITION);
+}
+
+Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
+ ElementsKind kind, TransitionFlag flag) {
+ // Only certain objects are allowed to have non-terminal fast transitional
+ // elements kinds.
+ DCHECK(map->IsJSObjectMap());
+ DCHECK_IMPLIES(
+ !map->CanHaveFastTransitionableElementsKind(),
+ IsDictionaryElementsKind(kind) || IsTerminalElementsKind(kind));
+
+ Map maybe_elements_transition_map;
+ if (flag == INSERT_TRANSITION) {
+ // Ensure we are requested to add elements kind transition "near the root".
+ DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
+ map->NumberOfOwnDescriptors());
+
+ maybe_elements_transition_map = map->ElementsTransitionMap();
+ DCHECK(maybe_elements_transition_map.is_null() ||
+ (maybe_elements_transition_map->elements_kind() ==
+ DICTIONARY_ELEMENTS &&
+ kind == DICTIONARY_ELEMENTS));
+ DCHECK(!IsFastElementsKind(kind) ||
+ IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
+ DCHECK(kind != map->elements_kind());
+ }
+
+ bool insert_transition =
+ flag == INSERT_TRANSITION &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions() &&
+ maybe_elements_transition_map.is_null();
+
+ if (insert_transition) {
+ Handle<Map> new_map = CopyForElementsTransition(isolate, map);
+ new_map->set_elements_kind(kind);
+
+ Handle<Name> name = isolate->factory()->elements_transition_symbol();
+ ConnectTransition(isolate, map, new_map, name, SPECIAL_TRANSITION);
+ return new_map;
+ }
+
+ // Create a new free-floating map only if we are not allowed to store it.
+ Handle<Map> new_map = Copy(isolate, map, "CopyAsElementsKind");
+ new_map->set_elements_kind(kind);
+ return new_map;
+}
+
+Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
+ Handle<SharedFunctionInfo> shared_info) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ // Initial map for sloppy mode function is stored in the function
+ // constructor. Initial maps for strict mode are cached as special transitions
+ // using |strict_function_transition_symbol| as a key.
+ if (is_sloppy(shared_info->language_mode())) return initial_map;
+
+ Handle<Map> function_map(Map::cast(isolate->native_context()->get(
+ shared_info->function_map_index())),
+ isolate);
+
+ STATIC_ASSERT(LanguageModeSize == 2);
+ DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
+ Handle<Symbol> transition_symbol =
+ isolate->factory()->strict_function_transition_symbol();
+ Map maybe_transition = TransitionsAccessor(isolate, initial_map)
+ .SearchSpecial(*transition_symbol);
+ if (!maybe_transition.is_null()) {
+ return handle(maybe_transition, isolate);
+ }
+ initial_map->NotifyLeafMapLayoutChange(isolate);
+
+ // Create new map taking descriptors from the |function_map| and all
+ // the other details from the |initial_map|.
+ Handle<Map> map =
+ Map::CopyInitialMap(isolate, function_map, initial_map->instance_size(),
+ initial_map->GetInObjectProperties(),
+ initial_map->UnusedPropertyFields());
+ map->SetConstructor(initial_map->GetConstructor());
+ map->set_prototype(initial_map->prototype());
+ map->set_construction_counter(initial_map->construction_counter());
+
+ if (TransitionsAccessor(isolate, initial_map).CanHaveMoreTransitions()) {
+ Map::ConnectTransition(isolate, initial_map, map, transition_symbol,
+ SPECIAL_TRANSITION);
+ }
+ return map;
+}
+
+Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
+ DCHECK(!map->is_prototype_map());
+ Handle<Map> new_map = CopyDropDescriptors(isolate, map);
+
+ if (map->owns_descriptors()) {
+ // In case the map owned its own descriptors, share the descriptors and
+ // transfer ownership to the new map.
+ // The properties did not change, so reuse descriptors.
+ new_map->InitializeDescriptors(isolate, map->instance_descriptors(),
+ map->GetLayoutDescriptor());
+ } else {
+ // In case the map did not own its own descriptors, a split is forced by
+ // copying the map; creating a new descriptor array cell.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ isolate, descriptors, number_of_own_descriptors);
+ Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+ isolate);
+ new_map->InitializeDescriptors(isolate, *new_descriptors,
+ *new_layout_descriptor);
+ }
+ return new_map;
+}
+
+Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ isolate, descriptors, number_of_own_descriptors);
+ Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+ isolate);
+ return CopyReplaceDescriptors(
+ isolate, map, new_descriptors, new_layout_descriptor, OMIT_TRANSITION,
+ MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
+}
+
+Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
+ Handle<Map> copy =
+ Copy(isolate, handle(isolate->object_function()->initial_map(), isolate),
+ "MapCreate");
+
+ // Check that we do not overflow the instance size when adding the extra
+ // inobject properties. If the instance size overflows, we allocate as many
+ // properties as we can as inobject properties.
+ if (inobject_properties > JSObject::kMaxInObjectProperties) {
+ inobject_properties = JSObject::kMaxInObjectProperties;
+ }
+
+ int new_instance_size =
+ JSObject::kHeaderSize + kTaggedSize * inobject_properties;
+
+ // Adjust the map with the extra inobject properties.
+ copy->set_instance_size(new_instance_size);
+ copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
+ DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
+ copy->SetInObjectUnusedPropertyFields(inobject_properties);
+ copy->set_visitor_id(Map::GetVisitorId(*copy));
+ return copy;
+}
+
+Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
+ PropertyAttributes attrs_to_add,
+ Handle<Symbol> transition_marker,
+ const char* reason) {
+ int num_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
+ isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
+ attrs_to_add);
+ Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+ isolate);
+ Handle<Map> new_map = CopyReplaceDescriptors(
+ isolate, map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
+ transition_marker, reason, SPECIAL_TRANSITION);
+ new_map->set_is_extensible(false);
+ if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
+ new_map->set_elements_kind(new_kind);
+ }
+ return new_map;
+}
+
+namespace {
+
+bool CanHoldValue(DescriptorArray descriptors, int descriptor,
+ PropertyConstness constness, Object value) {
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
+ return IsGeneralizableTo(constness, details.constness()) &&
+ value->FitsRepresentation(details.representation()) &&
+ descriptors->GetFieldType(descriptor)->NowContains(value);
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ return false;
+ }
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyConstness::kConst, details.constness());
+ if (details.kind() == kData) {
+ DCHECK(!FLAG_track_constant_fields);
+ DCHECK(descriptors->GetStrongValue(descriptor) != value ||
+ value->FitsRepresentation(details.representation()));
+ return descriptors->GetStrongValue(descriptor) == value;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ return false;
+ }
+ }
+ UNREACHABLE();
+}
+
+Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
+ int descriptor,
+ PropertyConstness constness,
+ Handle<Object> value) {
+ if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
+ *value)) {
+ return map;
+ }
+
+ PropertyAttributes attributes =
+ map->instance_descriptors()->GetDetails(descriptor).attributes();
+ Representation representation = value->OptimalRepresentation();
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
+
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(descriptor, attributes, constness,
+ representation, type);
+}
+
+} // namespace
+
+// static
+Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
+ int descriptor,
+ PropertyConstness constness,
+ Handle<Object> value) {
+ // Dictionaries can store any property value.
+ DCHECK(!map->is_dictionary_map());
+ // Update to the newest map before storing the property.
+ return UpdateDescriptorForValue(isolate, Update(isolate, map), descriptor,
+ constness, value);
+}
+
+Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ StoreOrigin store_origin) {
+ RuntimeCallTimerScope stats_scope(
+ isolate, *map,
+ map->is_prototype_map()
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
+ : RuntimeCallCounterId::kMap_TransitionToDataProperty);
+
+ DCHECK(name->IsUniqueName());
+ DCHECK(!map->is_dictionary_map());
+
+ // Migrate to the newest map before storing the property.
+ map = Update(isolate, map);
+
+ Map maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kData, attributes);
+ if (!maybe_transition.is_null()) {
+ Handle<Map> transition(maybe_transition, isolate);
+ int descriptor = transition->LastAdded();
+
+ DCHECK_EQ(attributes, transition->instance_descriptors()
+ ->GetDetails(descriptor)
+ .attributes());
+
+ return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
+ value);
+ }
+
+ TransitionFlag flag = INSERT_TRANSITION;
+ MaybeHandle<Map> maybe_map;
+ if (!map->TooManyFastProperties(store_origin)) {
+ if (!FLAG_track_constant_fields && value->IsJSFunction()) {
+ maybe_map =
+ Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
+ } else {
+ Representation representation = value->OptimalRepresentation();
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
+ maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
+ constness, representation, flag);
+ }
+ }
+
+ Handle<Map> result;
+ if (!maybe_map.ToHandle(&result)) {
+ const char* reason = "TooManyFastProperties";
+#if V8_TRACE_MAPS
+ std::unique_ptr<ScopedVector<char>> buffer;
+ if (FLAG_trace_maps) {
+ ScopedVector<char> name_buffer(100);
+ name->NameShortPrint(name_buffer);
+ buffer.reset(new ScopedVector<char>(128));
+ SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.start());
+ reason = buffer->start();
+ }
+#endif
+ Handle<Object> maybe_constructor(map->GetConstructor(), isolate);
+ if (FLAG_feedback_normalization && map->new_target_is_base() &&
+ maybe_constructor->IsJSFunction() &&
+ !JSFunction::cast(*maybe_constructor)->shared()->native()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(maybe_constructor);
+ DCHECK_NE(*constructor,
+ constructor->context()->native_context()->object_function());
+ Handle<Map> initial_map(constructor->initial_map(), isolate);
+ result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
+ reason);
+ initial_map->DeprecateTransitionTree(isolate);
+ Handle<Object> prototype(result->prototype(), isolate);
+ JSFunction::SetInitialMap(constructor, result, prototype);
+
+ // Deoptimize all code that embeds the previous initial map.
+ initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kInitialMapChangedGroup);
+ if (!result->EquivalentToForNormalization(*map,
+ CLEAR_INOBJECT_PROPERTIES)) {
+ result =
+ Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
+ }
+ } else {
+ result = Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, reason);
+ }
+ }
+
+ return result;
+}
+
+Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
+ int descriptor, PropertyKind kind,
+ PropertyAttributes attributes) {
+ // Dictionaries have to be reconfigured in-place.
+ DCHECK(!map->is_dictionary_map());
+
+ if (!map->GetBackPointer()->IsMap()) {
+ // There is no benefit from reconstructing transition tree for maps without
+ // back pointers.
+ return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
+ descriptor, kind, attributes,
+ "GenAll_AttributesMismatchProtoMap");
+ }
+
+ if (FLAG_trace_generalization) {
+ map->PrintReconfiguration(isolate, stdout, descriptor, kind, attributes);
+ }
+
+ MapUpdater mu(isolate, map);
+ DCHECK_EQ(kData, kind); // Only kData case is supported so far.
+ Handle<Map> new_map = mu.ReconfigureToDataField(
+ descriptor, attributes, kDefaultFieldConstness, Representation::None(),
+ FieldType::None(isolate));
+ return new_map;
+}
+
+Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
+ Handle<Name> name, int descriptor,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ RuntimeCallTimerScope stats_scope(
+ isolate,
+ map->is_prototype_map()
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToAccessorProperty
+ : RuntimeCallCounterId::kMap_TransitionToAccessorProperty);
+
+ // At least one of the accessors needs to be a new value.
+ DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
+ DCHECK(name->IsUniqueName());
+
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) return map;
+
+ // Migrate to the newest map before transitioning to the new property.
+ map = Update(isolate, map);
+
+ PropertyNormalizationMode mode = map->is_prototype_map()
+ ? KEEP_INOBJECT_PROPERTIES
+ : CLEAR_INOBJECT_PROPERTIES;
+
+ Map maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kAccessor, attributes);
+ if (!maybe_transition.is_null()) {
+ Handle<Map> transition(maybe_transition, isolate);
+ DescriptorArray descriptors = transition->instance_descriptors();
+ int descriptor = transition->LastAdded();
+ DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
+
+ DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
+ DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
+
+ Handle<Object> maybe_pair(descriptors->GetStrongValue(descriptor), isolate);
+ if (!maybe_pair->IsAccessorPair()) {
+ return Map::Normalize(isolate, map, mode,
+ "TransitionToAccessorFromNonPair");
+ }
+
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
+ if (!pair->Equals(*getter, *setter)) {
+ return Map::Normalize(isolate, map, mode,
+ "TransitionToDifferentAccessor");
+ }
+
+ return transition;
+ }
+
+ Handle<AccessorPair> pair;
+ DescriptorArray old_descriptors = map->instance_descriptors();
+ if (descriptor != DescriptorArray::kNotFound) {
+ if (descriptor != map->LastAdded()) {
+ return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
+ }
+ PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
+ if (old_details.kind() != kAccessor) {
+ return Map::Normalize(isolate, map, mode,
+ "AccessorsOverwritingNonAccessors");
+ }
+
+ if (old_details.attributes() != attributes) {
+ return Map::Normalize(isolate, map, mode, "AccessorsWithAttributes");
+ }
+
+ Handle<Object> maybe_pair(old_descriptors->GetStrongValue(descriptor),
+ isolate);
+ if (!maybe_pair->IsAccessorPair()) {
+ return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonPair");
+ }
+
+ Handle<AccessorPair> current_pair = Handle<AccessorPair>::cast(maybe_pair);
+ if (current_pair->Equals(*getter, *setter)) return map;
+
+ bool overwriting_accessor = false;
+ if (!getter->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
+ current_pair->get(ACCESSOR_GETTER) != *getter) {
+ overwriting_accessor = true;
+ }
+ if (!setter->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
+ current_pair->get(ACCESSOR_SETTER) != *setter) {
+ overwriting_accessor = true;
+ }
+ if (overwriting_accessor) {
+ return Map::Normalize(isolate, map, mode,
+ "AccessorsOverwritingAccessors");
+ }
+
+ pair = AccessorPair::Copy(isolate, Handle<AccessorPair>::cast(maybe_pair));
+ } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
+ map->TooManyFastProperties(StoreOrigin::kNamed)) {
+ return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
+ "TooManyAccessors");
+ } else {
+ pair = isolate->factory()->NewAccessorPair();
+ }
+
+ pair->SetComponents(*getter, *setter);
+
+ TransitionFlag flag = INSERT_TRANSITION;
+ Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
+ return Map::CopyInsertDescriptor(isolate, map, &d, flag);
+}
+
+Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+
+ // Share descriptors only if map owns descriptors and it not an initial map.
+ if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
+ !map->GetBackPointer()->IsUndefined(isolate) &&
+ TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
+ return ShareDescriptor(isolate, map, descriptors, descriptor);
+ }
+
+ int nof = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(isolate, descriptors, nof, 1);
+ new_descriptors->Append(descriptor);
+
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ FLAG_unbox_double_fields
+ ? LayoutDescriptor::New(isolate, map, new_descriptors, nof + 1)
+ : handle(LayoutDescriptor::FastPointerLayout(), isolate);
+
+ return CopyReplaceDescriptors(
+ isolate, map, new_descriptors, new_layout_descriptor, flag,
+ descriptor->GetKey(), "CopyAddDescriptor", SIMPLE_PROPERTY_TRANSITION);
+}
+
+Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag) {
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+
+ // We replace the key if it is already present.
+ int index =
+ old_descriptors->SearchWithCache(isolate, *descriptor->GetKey(), *map);
+ if (index != DescriptorArray::kNotFound) {
+ return CopyReplaceDescriptor(isolate, map, old_descriptors, descriptor,
+ index, flag);
+ }
+ return CopyAddDescriptor(isolate, map, descriptor, flag);
+}
+
+Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor,
+ int insertion_index,
+ TransitionFlag flag) {
+ Handle<Name> key = descriptor->GetKey();
+ DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
+ // This function does not support replacing property fields as
+ // that would break property field counters.
+ DCHECK_NE(kField, descriptor->GetDetails().location());
+ DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
+
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
+ isolate, descriptors, map->NumberOfOwnDescriptors());
+
+ new_descriptors->Replace(insertion_index, descriptor);
+ Handle<LayoutDescriptor> new_layout_descriptor = LayoutDescriptor::New(
+ isolate, map, new_descriptors, new_descriptors->number_of_descriptors());
+
+ SimpleTransitionFlag simple_flag =
+ (insertion_index == descriptors->number_of_descriptors() - 1)
+ ? SIMPLE_PROPERTY_TRANSITION
+ : PROPERTY_TRANSITION;
+ return CopyReplaceDescriptors(isolate, map, new_descriptors,
+ new_layout_descriptor, flag, key,
+ "CopyReplaceDescriptor", simple_flag);
+}
+
+int Map::Hash() {
+ // For performance reasons we only hash the 3 most variable fields of a map:
+ // constructor, prototype and bit_field2. For predictability reasons we
+ // use objects' offsets in respective pages for hashing instead of raw
+ // addresses.
+
+ // Shift away the tag.
+ int hash = ObjectAddressForHashing(GetConstructor().ptr()) >> 2;
+
+ // XOR-ing the prototype and constructor directly yields too many zero bits
+ // when the two pointers are close (which is fairly common).
+ // To avoid this we shift the prototype bits relatively to the constructor.
+ hash ^= ObjectAddressForHashing(prototype().ptr()) << (32 - kPageSizeBits);
+
+ return hash ^ (hash >> 16) ^ bit_field2();
+}
+
+namespace {
+
+bool CheckEquivalent(const Map first, const Map second) {
+ return first->GetConstructor() == second->GetConstructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->is_extensible() == second->is_extensible() &&
+ first->new_target_is_base() == second->new_target_is_base() &&
+ first->has_hidden_prototype() == second->has_hidden_prototype();
+}
+
+} // namespace
+
+bool Map::EquivalentToForTransition(const Map other) const {
+ CHECK_EQ(GetConstructor(), other->GetConstructor());
+ CHECK_EQ(instance_type(), other->instance_type());
+ CHECK_EQ(has_hidden_prototype(), other->has_hidden_prototype());
+
+ if (bit_field() != other->bit_field()) return false;
+ if (new_target_is_base() != other->new_target_is_base()) return false;
+ if (prototype() != other->prototype()) return false;
+ if (instance_type() == JS_FUNCTION_TYPE) {
+ // JSFunctions require more checks to ensure that sloppy function is
+ // not equivalent to strict function.
+ int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
+ return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
+ nof);
+ }
+ return true;
+}
+
+bool Map::EquivalentToForElementsKindTransition(const Map other) const {
+ if (!EquivalentToForTransition(other)) return false;
+#ifdef DEBUG
+ // Ensure that we don't try to generate elements kind transitions from maps
+ // with fields that may be generalized in-place. This must already be handled
+ // during addition of a new field.
+ DescriptorArray descriptors = instance_descriptors();
+ int nof = NumberOfOwnDescriptors();
+ for (int i = 0; i < nof; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kField) {
+ DCHECK(IsMostGeneralFieldType(details.representation(),
+ descriptors->GetFieldType(i)));
+ }
+ }
+#endif
+ return true;
+}
+
+bool Map::EquivalentToForNormalization(const Map other,
+ PropertyNormalizationMode mode) const {
+ int properties =
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
+ return CheckEquivalent(*this, other) && bit_field2() == other->bit_field2() &&
+ GetInObjectProperties() == properties &&
+ JSObject::GetEmbedderFieldCount(*this) ==
+ JSObject::GetEmbedderFieldCount(other);
+}
+
+static void GetMinInobjectSlack(Map map, void* data) {
+ int slack = map->UnusedPropertyFields();
+ if (*reinterpret_cast<int*>(data) > slack) {
+ *reinterpret_cast<int*>(data) = slack;
+ }
+}
+
+int Map::ComputeMinObjectSlack(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ // Has to be an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(isolate));
+
+ int slack = UnusedPropertyFields();
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
+ transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+ return slack;
+}
+
+static void ShrinkInstanceSize(Map map, void* data) {
+ int slack = *reinterpret_cast<int*>(data);
+ DCHECK_GE(slack, 0);
+#ifdef DEBUG
+ int old_visitor_id = Map::GetVisitorId(map);
+ int new_unused = map->UnusedPropertyFields() - slack;
+#endif
+ map->set_instance_size(map->InstanceSizeFromSlack(slack));
+ map->set_construction_counter(Map::kNoSlackTracking);
+ DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
+ DCHECK_EQ(new_unused, map->UnusedPropertyFields());
+}
+
+static void StopSlackTracking(Map map, void* data) {
+ map->set_construction_counter(Map::kNoSlackTracking);
+}
+
+void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ // Has to be an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(isolate));
+
+ int slack = ComputeMinObjectSlack(isolate);
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
+ if (slack != 0) {
+ // Resize the initial map and all maps in its transition tree.
+ transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+ } else {
+ transitions.TraverseTransitionTree(&StopSlackTracking, nullptr);
+ }
+}
+
+void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ int number_of_own_descriptors) {
+ set_synchronized_instance_descriptors(descriptors);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors);
+ MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
+ number_of_own_descriptors);
+}
+
+// static
+Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
+ Isolate* isolate) {
+ Object maybe_proto_info = prototype->map()->prototype_info();
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ }
+ Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
+ prototype->map()->set_prototype_info(*proto_info);
+ return proto_info;
+}
+
+// static
+Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
+ Isolate* isolate) {
+ Object maybe_proto_info = prototype_map->prototype_info();
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ }
+ Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
+ prototype_map->set_prototype_info(*proto_info);
+ return proto_info;
+}
+
+// static
+void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+ Isolate* isolate) {
+ if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
+ // "False" is the implicit default value, so there's nothing to do.
+ return;
+ }
+ GetOrCreatePrototypeInfo(map, isolate)->set_should_be_fast_map(value);
+}
+
+// static
+Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
+ Isolate* isolate) {
+ Handle<Object> maybe_prototype;
+ if (map->IsJSGlobalObjectMap()) {
+ DCHECK(map->is_prototype_map());
+ // Global object is prototype of a global proxy and therefore we can
+ // use its validity cell for guarding global object's prototype change.
+ maybe_prototype = isolate->global_object();
+ } else {
+ maybe_prototype =
+ handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+ }
+ if (!maybe_prototype->IsJSObject()) {
+ return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
+ // Ensure the prototype is registered with its own prototypes so its cell
+ // will be invalidated when necessary.
+ JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
+ isolate);
+
+ Object maybe_cell = prototype->map()->prototype_validity_cell();
+ // Return existing cell if it's still valid.
+ if (maybe_cell->IsCell()) {
+ Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
+ if (cell->value() == Smi::FromInt(Map::kPrototypeChainValid)) {
+ return cell;
+ }
+ }
+ // Otherwise create a new cell.
+ Handle<Cell> cell = isolate->factory()->NewCell(
+ handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
+ prototype->map()->set_prototype_validity_cell(*cell);
+ return cell;
+}
+
+// static
+bool Map::IsPrototypeChainInvalidated(Map map) {
+ DCHECK(map->is_prototype_map());
+ Object maybe_cell = map->prototype_validity_cell();
+ if (maybe_cell->IsCell()) {
+ Cell cell = Cell::cast(maybe_cell);
+ return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
+ }
+ return true;
+}
+
+// static
+void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
+ Handle<Object> prototype,
+ bool enable_prototype_setup_mode) {
+ RuntimeCallTimerScope stats_scope(isolate, *map,
+ RuntimeCallCounterId::kMap_SetPrototype);
+
+ bool is_hidden = false;
+ if (prototype->IsJSObject()) {
+ Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
+ JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
+
+ Object maybe_constructor = prototype_jsobj->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ Object data = constructor->shared()->function_data();
+ is_hidden = (data->IsFunctionTemplateInfo() &&
+ FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
+ prototype->IsJSGlobalObject();
+ } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ is_hidden =
+ FunctionTemplateInfo::cast(maybe_constructor)->hidden_prototype() ||
+ prototype->IsJSGlobalObject();
+ }
+ }
+ map->set_has_hidden_prototype(is_hidden);
+
+ WriteBarrierMode wb_mode =
+ prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ map->set_prototype(*prototype, wb_mode);
+}
+
+void Map::StartInobjectSlackTracking() {
+ DCHECK(!IsInobjectSlackTrackingInProgress());
+ if (UnusedPropertyFields() == 0) return;
+ set_construction_counter(Map::kSlackTrackingCounterStart);
+}
+
+Handle<Map> Map::TransitionToPrototype(Isolate* isolate, Handle<Map> map,
+ Handle<Object> prototype) {
+ Handle<Map> new_map =
+ TransitionsAccessor(isolate, map).GetPrototypeTransition(prototype);
+ if (new_map.is_null()) {
+ new_map = Copy(isolate, map, "TransitionToPrototype");
+ TransitionsAccessor(isolate, map)
+ .PutPrototypeTransition(prototype, new_map);
+ Map::SetPrototype(isolate, new_map, prototype);
+ }
+ return new_map;
+}
+
+Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
+ Handle<WeakFixedArray> array(
+ isolate->factory()->NewWeakFixedArray(kEntries, TENURED));
+ return Handle<NormalizedMapCache>::cast(array);
+}
+
+MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
+ PropertyNormalizationMode mode) {
+ DisallowHeapAllocation no_gc;
+ MaybeObject value = WeakFixedArray::Get(GetIndex(fast_map));
+ HeapObject heap_object;
+ if (!value->GetHeapObjectIfWeak(&heap_object)) {
+ return MaybeHandle<Map>();
+ }
+
+ Map normalized_map = Map::cast(heap_object);
+ if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
+ return MaybeHandle<Map>();
+ }
+ return handle(normalized_map, GetIsolate());
+}
+
+void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(normalized_map->is_dictionary_map());
+ WeakFixedArray::Set(GetIndex(fast_map),
+ HeapObjectReference::Weak(*normalized_map));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 0880b73b66..7424ee4181 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -18,10 +18,16 @@ namespace internal {
enum InstanceType : uint16_t;
-#define VISITOR_ID_LIST(V) \
+#define DATA_ONLY_VISITOR_ID_LIST(V) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(DataObject) \
+ V(FixedDoubleArray) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString)
+
+#define POINTER_VISITOR_ID_LIST(V) \
V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
V(BytecodeArray) \
V(Cell) \
V(Code) \
@@ -29,14 +35,12 @@ enum InstanceType : uint16_t;
V(ConsString) \
V(Context) \
V(DataHandler) \
- V(DataObject) \
V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
- V(FixedDoubleArray) \
V(FixedFloat64Array) \
V(FixedTypedArrayBase) \
V(FreeSpace) \
@@ -47,7 +51,6 @@ enum InstanceType : uint16_t;
V(JSObject) \
V(JSObjectFast) \
V(JSTypedArray) \
- V(JSWeakCell) \
V(JSWeakRef) \
V(JSWeakCollection) \
V(Map) \
@@ -57,8 +60,6 @@ enum InstanceType : uint16_t;
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
V(SharedFunctionInfo) \
V(ShortcutCandidate) \
V(SlicedString) \
@@ -72,24 +73,26 @@ enum InstanceType : uint16_t;
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmInstanceObject) \
- V(WeakArray)
-
-// For data objects, JS objects and structs along with generic visitor which
-// can visit object of any size we provide visitors specialized by
-// object size in words.
-// Ids of specialized visitors are declared in a linear order (without
-// holes) starting from the id of visitor specialized for 2 words objects
-// (base visitor id) and ending with the id of generic visitor.
-// Method GetVisitorIdForSize depends on this ordering to calculate visitor
-// id of specialized visitor from given instance size, base visitor id and
-// generic visitor's id.
+ V(WeakArray) \
+ V(WeakCell)
+
+// Objects with the same visitor id are processed in the same way by
+// the heap visitors. The visitor ids for data only objects must precede
+// other visitor ids. We rely on kDataOnlyVisitorIdCount for quick check
+// of whether an object contains only data or may contain pointers.
enum VisitorId {
#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ DATA_ONLY_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL) kDataOnlyVisitorIdCount,
+ POINTER_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
#undef VISITOR_ID_ENUM_DECL
kVisitorIdCount
};
+enum class ObjectFields {
+ kDataOnly,
+ kMaybePointers,
+};
+
typedef std::vector<Handle<Map>> MapHandles;
// All heap objects have a Map that describes their structure.
@@ -272,6 +275,10 @@ class Map : public HeapObject {
//
DECL_PRIMITIVE_ACCESSORS(bit_field3, uint32_t)
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic. Depending on the V8 build mode there could be no padding.
+ V8_INLINE void clear_padding();
+
// Bit positions for |bit_field3|.
#define MAP_BIT_FIELD3_FIELDS(V, _) \
V(EnumLengthBits, int, kDescriptorIndexBitCount, _) \
@@ -501,11 +508,10 @@ class Map : public HeapObject {
int modify_index, PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type);
- // Returns true if |descriptor|'th property is a field that may be generalized
- // by just updating current map.
- static inline bool IsInplaceGeneralizableField(PropertyConstness constness,
- Representation representation,
- FieldType field_type);
+ // Returns true if the |field_type| is the most general one for
+ // given |representation|.
+ static inline bool IsMostGeneralFieldType(Representation representation,
+ FieldType field_type);
// Generalizes constness, representation and field_type if objects with given
// instance type can have fast elements that can be transitioned by stubs or
@@ -806,6 +812,12 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(visitor_id, VisitorId)
+ static ObjectFields ObjectFieldsFrom(VisitorId visitor_id) {
+ return (visitor_id < kDataOnlyVisitorIdCount)
+ ? ObjectFields::kDataOnly
+ : ObjectFields::kMaybePointers;
+ }
+
static Handle<Map> TransitionToPrototype(Isolate* isolate, Handle<Map> map,
Handle<Object> prototype);
@@ -815,29 +827,29 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
-#define MAP_FIELDS(V) \
- /* Raw data fields. */ \
- V(kInstanceSizeInWordsOffset, kUInt8Size) \
- V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
- V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
- V(kVisitorIdOffset, kUInt8Size) \
- V(kInstanceTypeOffset, kUInt16Size) \
- V(kBitFieldOffset, kUInt8Size) \
- V(kBitField2Offset, kUInt8Size) \
- V(kBitField3Offset, kUInt32Size) \
- V(k64BitArchPaddingOffset, \
- kSystemPointerSize == kUInt32Size ? 0 : kUInt32Size) \
- /* Pointer fields. */ \
- V(kPointerFieldsBeginOffset, 0) \
- V(kPrototypeOffset, kTaggedSize) \
- V(kConstructorOrBackPointerOffset, kTaggedSize) \
- V(kTransitionsOrPrototypeInfoOffset, kTaggedSize) \
- V(kDescriptorsOffset, kTaggedSize) \
- V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kTaggedSize : 0) \
- V(kDependentCodeOffset, kTaggedSize) \
- V(kPrototypeValidityCellOffset, kTaggedSize) \
- V(kPointerFieldsEndOffset, 0) \
- /* Total size. */ \
+#define MAP_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
+ V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kVisitorIdOffset, kUInt8Size) \
+ V(kInstanceTypeOffset, kUInt16Size) \
+ V(kBitFieldOffset, kUInt8Size) \
+ V(kBitField2Offset, kUInt8Size) \
+ V(kBitField3Offset, kUInt32Size) \
+ /* Adds padding to make tagged fields kTaggedSize-aligned. */ \
+ V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
+ /* Pointer fields. */ \
+ V(kPointerFieldsBeginOffset, 0) \
+ V(kPrototypeOffset, kTaggedSize) \
+ V(kConstructorOrBackPointerOffset, kTaggedSize) \
+ V(kTransitionsOrPrototypeInfoOffset, kTaggedSize) \
+ V(kDescriptorsOffset, kTaggedSize) \
+ V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kTaggedSize : 0) \
+ V(kDependentCodeOffset, kTaggedSize) \
+ V(kPrototypeValidityCellOffset, kTaggedSize) \
+ V(kPointerFieldsEndOffset, 0) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
@@ -997,12 +1009,11 @@ class NormalizedMapCache : public WeakFixedArray {
void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
DECL_CAST(NormalizedMapCache)
-
- static inline bool IsNormalizedMapCache(const HeapObject obj);
-
DECL_VERIFIER(NormalizedMapCache)
private:
+ friend bool HeapObject::IsNormalizedMapCache() const;
+
static const int kEntries = 64;
static inline int GetIndex(Handle<Map> map);
@@ -1011,7 +1022,7 @@ class NormalizedMapCache : public WeakFixedArray {
Object get(int index);
void set(int index, Object value);
- OBJECT_CONSTRUCTORS(NormalizedMapCache, WeakFixedArray)
+ OBJECT_CONSTRUCTORS(NormalizedMapCache, WeakFixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 8c1023665a..72c328d29b 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -10,8 +10,9 @@
#ifdef V8_COMPRESS_POINTERS
#include "src/isolate.h"
#endif
-#include "src/objects-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/slots-inl.h"
+#include "src/objects/smi-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index fbd8142ebd..036b227056 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -7,8 +7,10 @@
#include "src/objects/microtask.h"
+#include "src/contexts-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index a19eea500f..b3c81533a6 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -34,15 +34,8 @@ class CallbackTask : public Microtask {
DECL_ACCESSORS(callback, Foreign)
DECL_ACCESSORS(data, Foreign)
-// Layout description.
-#define CALLBACK_TASK_FIELDS(V) \
- V(kCallbackOffset, kTaggedSize) \
- V(kDataOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize, CALLBACK_TASK_FIELDS)
-#undef CALLBACK_TASK_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
+ TORQUE_GENERATED_CALLBACK_TASK_FIELDS)
// Dispatched behavior.
DECL_CAST(CallbackTask)
@@ -60,15 +53,8 @@ class CallableTask : public Microtask {
DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(context, Context)
-// Layout description.
-#define CALLABLE_TASK_FIELDS(V) \
- V(kCallableOffset, kTaggedSize) \
- V(kContextOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize, CALLABLE_TASK_FIELDS)
-#undef CALLABLE_TASK_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
+ TORQUE_GENERATED_CALLABLE_TASK_FIELDS)
// Dispatched behavior.
DECL_CAST(CallableTask)
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 5ac05478b8..4e2ae75b06 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -165,6 +165,7 @@ Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
void Module::StoreVariable(Handle<Module> module, int cell_index,
Handle<Object> value) {
+ DisallowHeapAllocation no_gc;
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kExport);
module->GetCell(cell_index)->set_value(*value);
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index ec63ddb640..5137d92351 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -333,20 +333,8 @@ class ModuleInfoEntry : public Struct {
int module_request, int cell_index,
int beg_pos, int end_pos);
-// Layout description.
-#define MODULE_INFO_FIELDS(V) \
- V(kExportNameOffset, kTaggedSize) \
- V(kLocalNameOffset, kTaggedSize) \
- V(kImportNameOffset, kTaggedSize) \
- V(kModuleRequestOffset, kTaggedSize) \
- V(kCellIndexOffset, kTaggedSize) \
- V(kBegPosOffset, kTaggedSize) \
- V(kEndPosOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, MODULE_INFO_FIELDS)
-#undef MODULE_INFO_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_MODULE_INFO_ENTRY_FIELDS)
OBJECT_CONSTRUCTORS(ModuleInfoEntry, Struct);
};
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 3fda66f2a1..af1724b76d 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -7,8 +7,7 @@
#include "src/objects/name.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/map-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -47,16 +46,18 @@ void Symbol::set_is_private_name() {
bool Name::IsUniqueName() const {
uint32_t type = map()->instance_type();
- return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
- (kStringTag | kNotInternalizedTag);
+ bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
+ (kStringTag | kNotInternalizedTag);
+ SLOW_DCHECK(result == HeapObject::IsUniqueName());
+ return result;
}
uint32_t Name::hash_field() {
- return READ_UINT32_FIELD(this, kHashFieldOffset);
+ return READ_UINT32_FIELD(*this, kHashFieldOffset);
}
void Name::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
+ WRITE_UINT32_FIELD(*this, kHashFieldOffset, value);
}
bool Name::Equals(Name other) {
@@ -89,10 +90,7 @@ uint32_t Name::Hash() {
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it. Has to be a string.
- // Also the string must be writable, because read-only strings will have their
- // hash values precomputed.
- return String::cast(*this)->ComputeAndSetHash(
- Heap::FromWritableHeapObject(*this)->isolate());
+ return String::cast(*this)->ComputeAndSetHash();
}
bool Name::IsInterestingSymbol() const {
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index a125251d63..3b15d63e09 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -25,7 +25,7 @@
const Type* operator->() const { return this; } \
\
protected: \
- explicit inline Type(Address ptr);
+ explicit inline Type(Address ptr)
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
@@ -80,38 +80,38 @@
#define CAST_ACCESSOR(Type) \
Type Type::cast(Object object) { return Type(object.ptr()); }
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() const { return READ_INT_FIELD(this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() const { return READ_INT_FIELD(*this, offset); } \
+ void holder::set_##name(int value) { WRITE_INT_FIELD(*this, offset, value); }
-#define INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { return READ_INT32_FIELD(this, offset); } \
- void holder::set_##name(int32_t value) { \
- WRITE_INT32_FIELD(this, offset, value); \
+#define INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name() const { return READ_INT32_FIELD(*this, offset); } \
+ void holder::set_##name(int32_t value) { \
+ WRITE_INT32_FIELD(*this, offset, value); \
}
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name() const { \
- return RELAXED_READ_INT32_FIELD(this, offset); \
+ return RELAXED_READ_INT32_FIELD(*this, offset); \
} \
void holder::set_##name(int32_t value) { \
- RELAXED_WRITE_INT32_FIELD(this, offset, value); \
+ RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
-#define UINT16_ACCESSORS(holder, name, offset) \
- uint16_t holder::name() const { return READ_UINT16_FIELD(this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint16_t>(-1)); \
- WRITE_UINT16_FIELD(this, offset, value); \
+#define UINT16_ACCESSORS(holder, name, offset) \
+ uint16_t holder::name() const { return READ_UINT16_FIELD(*this, offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint16_t>(-1)); \
+ WRITE_UINT16_FIELD(*this, offset, value); \
}
-#define UINT8_ACCESSORS(holder, name, offset) \
- uint8_t holder::name() const { return READ_UINT8_FIELD(this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint8_t>(-1)); \
- WRITE_UINT8_FIELD(this, offset, value); \
+#define UINT8_ACCESSORS(holder, name, offset) \
+ uint8_t holder::name() const { return READ_UINT8_FIELD(*this, offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint8_t>(-1)); \
+ WRITE_UINT8_FIELD(*this, offset, value); \
}
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
@@ -240,7 +240,7 @@
RELAXED_WRITE_INT16_FIELD(*this, offset, value); \
}
-#define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag)
+#define FIELD_ADDR(p, offset) ((p).ptr() + offset - kHeapObjectTag)
#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
@@ -278,21 +278,21 @@
#define WRITE_BARRIER(object, offset, value) \
do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
MarkingBarrier(object, (object)->RawField(offset), value); \
GenerationalBarrier(object, (object)->RawField(offset), value); \
} while (false)
#define WEAK_WRITE_BARRIER(object, offset, value) \
do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
} while (false)
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
MarkingBarrier(object, (object)->RawField(offset), value); \
@@ -303,7 +303,7 @@
#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index 554686a4b5..fbd5a1b2c6 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -7,7 +7,10 @@
#include "src/objects/oddball.h"
+#include "src/handles.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,30 +23,37 @@ OBJECT_CONSTRUCTORS_IMPL(Oddball, HeapObject)
CAST_ACCESSOR(Oddball)
double Oddball::to_number_raw() const {
- return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
+ return READ_DOUBLE_FIELD(*this, kToNumberRawOffset);
}
void Oddball::set_to_number_raw(double value) {
- WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
+ WRITE_DOUBLE_FIELD(*this, kToNumberRawOffset, value);
}
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(this, kToNumberRawOffset, bits);
+ WRITE_UINT64_FIELD(*this, kToNumberRawOffset, bits);
}
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-byte Oddball::kind() const { return Smi::ToInt(READ_FIELD(this, kKindOffset)); }
+byte Oddball::kind() const {
+ return Smi::ToInt(READ_FIELD(*this, kKindOffset));
+}
void Oddball::set_kind(byte value) {
- WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
+ WRITE_FIELD(*this, kKindOffset, Smi::FromInt(value));
}
// static
Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
- return handle(input->to_number(), isolate);
+ return Handle<Object>(input->to_number(), isolate);
+}
+
+bool HeapObject::IsBoolean() const {
+ return IsOddball() &&
+ ((Oddball::cast(*this)->kind() & Oddball::kNotBooleanMask) == 0);
}
} // namespace internal
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 277c033994..95ddf06132 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/ordered-hash-table.h"
#include "src/heap/heap.h"
+#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index b4120643e3..3581b344d4 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -4,6 +4,7 @@
#include "src/objects/ordered-hash-table.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -70,8 +71,9 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
- Handle<Derived> new_table = Allocate(
- isolate, kMinCapacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
+ Handle<Derived> new_table =
+ Allocate(isolate, kMinCapacity,
+ Heap::InYoungGeneration(*table) ? NOT_TENURED : TENURED);
table->SetNextTable(*new_table);
table->SetNumberOfDeletedElements(kClearedTableSentinel);
@@ -187,7 +189,8 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
DCHECK(!table->IsObsolete());
Handle<Derived> new_table = Derived::Allocate(
- isolate, new_capacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
+ isolate, new_capacity,
+ Heap::InYoungGeneration(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
int new_buckets = new_table->NumberOfBuckets();
@@ -508,7 +511,7 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
num_buckets + num_chains);
- if (Heap::InNewSpace(*this)) {
+ if (Heap::InYoungGeneration(*this)) {
MemsetTagged(RawField(DataTableStartOffset()),
ReadOnlyRoots(isolate).the_hole_value(),
capacity * Derived::kEntrySize);
@@ -728,7 +731,8 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
DCHECK_GE(kMaxCapacity, new_capacity);
Handle<Derived> new_table = SmallOrderedHashTable<Derived>::Allocate(
- isolate, new_capacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
+ isolate, new_capacity,
+ Heap::InYoungGeneration(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
int new_entry = 0;
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 6e938d53b2..1bdab1a73e 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -222,7 +222,7 @@ class OrderedHashTable : public FixedArray {
return set(RemovedHolesIndex() + index, Smi::FromInt(removed_index));
}
- OBJECT_CONSTRUCTORS(OrderedHashTable, FixedArray)
+ OBJECT_CONSTRUCTORS(OrderedHashTable, FixedArray);
private:
friend class OrderedNameDictionaryHandler;
@@ -248,7 +248,7 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
static inline bool Is(Handle<HeapObject> table);
static const int kPrefixSize = 0;
- OBJECT_CONSTRUCTORS(OrderedHashSet, OrderedHashTable<OrderedHashSet, 1>)
+ OBJECT_CONSTRUCTORS(OrderedHashSet, OrderedHashTable<OrderedHashSet, 1>);
};
class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
@@ -279,7 +279,7 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
static const int kValueOffset = 1;
static const int kPrefixSize = 0;
- OBJECT_CONSTRUCTORS(OrderedHashMap, OrderedHashTable<OrderedHashMap, 2>)
+ OBJECT_CONSTRUCTORS(OrderedHashMap, OrderedHashTable<OrderedHashMap, 2>);
};
// This is similar to the OrderedHashTable, except for the memory
@@ -406,7 +406,7 @@ class SmallOrderedHashTable : public HeapObject {
Object KeyAt(int entry) const {
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
- return READ_FIELD(this, entry_offset);
+ return READ_FIELD(*this, entry_offset);
}
DECL_VERIFIER(SmallOrderedHashTable)
@@ -449,7 +449,7 @@ class SmallOrderedHashTable : public HeapObject {
}
Address GetHashTableStartAddress(int capacity) const {
- return FIELD_ADDR(this,
+ return FIELD_ADDR(*this,
DataTableStartOffset() + DataTableSizeFor(capacity));
}
@@ -491,7 +491,7 @@ class SmallOrderedHashTable : public HeapObject {
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
- return READ_FIELD(this, entry_offset);
+ return READ_FIELD(*this, entry_offset);
}
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
@@ -542,13 +542,13 @@ class SmallOrderedHashTable : public HeapObject {
byte getByte(Offset offset, ByteIndex index) const {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- return READ_BYTE_FIELD(this, offset + (index * kOneByteSize));
+ return READ_BYTE_FIELD(*this, offset + (index * kOneByteSize));
}
void setByte(Offset offset, ByteIndex index, byte value) {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- WRITE_BYTE_FIELD(this, offset + (index * kOneByteSize), value);
+ WRITE_BYTE_FIELD(*this, offset + (index * kOneByteSize), value);
}
Offset GetDataEntryOffset(int entry, int relative_index) const {
@@ -571,7 +571,7 @@ class SmallOrderedHashTable : public HeapObject {
friend class OrderedNameDictionaryHandler;
friend class CodeStubAssembler;
- OBJECT_CONSTRUCTORS(SmallOrderedHashTable, HeapObject)
+ OBJECT_CONSTRUCTORS(SmallOrderedHashTable, HeapObject);
};
class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
@@ -597,7 +597,7 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
Handle<SmallOrderedHashSet> table,
int new_capacity);
OBJECT_CONSTRUCTORS(SmallOrderedHashSet,
- SmallOrderedHashTable<SmallOrderedHashSet>)
+ SmallOrderedHashTable<SmallOrderedHashSet>);
};
class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
@@ -627,7 +627,7 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
int new_capacity);
OBJECT_CONSTRUCTORS(SmallOrderedHashMap,
- SmallOrderedHashTable<SmallOrderedHashMap>)
+ SmallOrderedHashTable<SmallOrderedHashMap>);
};
// TODO(gsathya): Rename this to OrderedHashTable, after we rename
@@ -713,7 +713,7 @@ class OrderedNameDictionary
static const int kPrefixSize = 1;
OBJECT_CONSTRUCTORS(OrderedNameDictionary,
- OrderedHashTable<OrderedNameDictionary, 3>)
+ OrderedHashTable<OrderedNameDictionary, 3>);
};
class OrderedNameDictionaryHandler
@@ -807,7 +807,7 @@ class SmallOrderedNameDictionary
static inline RootIndex GetMapRootIndex();
OBJECT_CONSTRUCTORS(SmallOrderedNameDictionary,
- SmallOrderedHashTable<SmallOrderedNameDictionary>)
+ SmallOrderedHashTable<SmallOrderedNameDictionary>);
};
class JSCollectionIterator : public JSObject {
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index abd9fa3e0a..6807ac88f4 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -43,10 +43,10 @@ ACCESSORS(PromiseResolveThenableJobTask, then, JSReceiver, kThenOffset)
ACCESSORS(PromiseResolveThenableJobTask, thenable, JSReceiver, kThenableOffset)
ACCESSORS(PromiseReactionJobTask, context, Context, kContextOffset)
-ACCESSORS(PromiseReactionJobTask, argument, Object, kArgumentOffset);
-ACCESSORS(PromiseReactionJobTask, handler, HeapObject, kHandlerOffset);
+ACCESSORS(PromiseReactionJobTask, argument, Object, kArgumentOffset)
+ACCESSORS(PromiseReactionJobTask, handler, HeapObject, kHandlerOffset)
ACCESSORS(PromiseReactionJobTask, promise_or_capability, HeapObject,
- kPromiseOrCapabilityOffset);
+ kPromiseOrCapabilityOffset)
ACCESSORS(PromiseCapability, promise, HeapObject, kPromiseOffset)
ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 0504eb0537..122ee7157f 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -50,7 +50,7 @@ class PromiseReactionJobTask : public Microtask {
DECL_CAST(PromiseReactionJobTask)
DECL_VERIFIER(PromiseReactionJobTask)
- OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask)
+ OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask);
};
// Struct to hold state required for a PromiseReactionJob of type "Fulfill".
@@ -61,7 +61,7 @@ class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseFulfillReactionJobTask)
DECL_VERIFIER(PromiseFulfillReactionJobTask)
- OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask)
+ OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask);
};
// Struct to hold state required for a PromiseReactionJob of type "Reject".
@@ -72,7 +72,7 @@ class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseRejectReactionJobTask)
DECL_VERIFIER(PromiseRejectReactionJobTask)
- OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask)
+ OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask);
};
// A container struct to hold state required for PromiseResolveThenableJob.
@@ -101,7 +101,7 @@ class PromiseResolveThenableJobTask : public Microtask {
DECL_PRINTER(PromiseResolveThenableJobTask)
DECL_VERIFIER(PromiseResolveThenableJobTask)
- OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask, Microtask)
+ OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask, Microtask);
};
// Struct to hold the state of a PromiseCapability.
@@ -111,16 +111,8 @@ class PromiseCapability : public Struct {
DECL_ACCESSORS(resolve, Object)
DECL_ACCESSORS(reject, Object)
-// Layout description.
-#define PROMISE_CAPABILITY_FIELDS(V) \
- V(kPromiseOffset, kTaggedSize) \
- V(kResolveOffset, kTaggedSize) \
- V(kRejectOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, PROMISE_CAPABILITY_FIELDS)
-#undef PROMISE_CAPABILITY_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_CAPABILITY_FIELDS)
// Dispatched behavior.
DECL_CAST(PromiseCapability)
@@ -158,17 +150,8 @@ class PromiseReaction : public Struct {
// a PromiseCapability (general case), or undefined (in case of await).
DECL_ACCESSORS(promise_or_capability, HeapObject)
-// Layout description.
-#define PROMISE_REACTION_FIELDS(V) \
- V(kNextOffset, kTaggedSize) \
- V(kRejectHandlerOffset, kTaggedSize) \
- V(kFulfillHandlerOffset, kTaggedSize) \
- V(kPromiseOrCapabilityOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, PROMISE_REACTION_FIELDS)
-#undef PROMISE_REACTION_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_REACTION_FIELDS)
// Dispatched behavior.
DECL_CAST(PromiseReaction)
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index b9785c563f..fa3f4ccde2 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/property-array.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/smi-inl.h"
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index 6b8e396cd4..d6600234ad 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/property-cell.h"
-#include "src/heap/heap-inl.h"
-#include "src/objects/code.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/code-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 8c2628d131..7c90369be7 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -56,7 +56,7 @@ class PropertyDescriptorObject : public FixedArray {
static const int kSetOffset =
FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kSetIndex);
- OBJECT_CONSTRUCTORS(PropertyDescriptorObject, FixedArray)
+ OBJECT_CONSTRUCTORS(PropertyDescriptorObject, FixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 7bb8ed109c..ddcb50fe90 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -8,8 +8,11 @@
#include "src/objects/prototype-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects/map.h"
+#include "src/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/map-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 7a34830ee7..b9084137a4 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -80,11 +80,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
break;
}
}
- DCHECK(module_vars_count == 0 || scope->is_module_scope());
-
- // Make sure we allocate the correct amount.
- DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
-
// Determine use and location of the "this" binding if it is present.
VariableAllocationInfo receiver_info;
if (scope->is_declaration_scope() &&
@@ -94,6 +89,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
receiver_info = UNUSED;
} else if (var->IsContextSlot()) {
receiver_info = CONTEXT;
+ context_local_count++;
} else {
DCHECK(var->IsParameter());
receiver_info = STACK;
@@ -102,6 +98,11 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
receiver_info = NONE;
}
+ DCHECK(module_vars_count == 0 || scope->is_module_scope());
+
+ // Make sure we allocate the correct amount.
+ DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
+
const bool has_new_target =
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->new_target_var() != nullptr;
@@ -136,7 +137,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
const bool has_function_name = function_name_info != NONE;
const bool has_position_info = NeedsPositionInfo(scope->scope_type());
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
- const int parameter_count = scope->num_parameters();
+ const int parameter_count =
+ scope->is_declaration_scope()
+ ? scope->AsDeclarationScope()->num_parameters()
+ : 0;
const bool has_outer_scope_info = !outer_scope.is_null();
const int length = kVariablePartIndex + 2 * context_local_count +
(has_receiver ? 1 : 0) +
@@ -148,172 +152,192 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
? 2 + kModuleVariableEntryLength * module_vars_count
: 0);
- Factory* factory = isolate->factory();
- Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
-
- bool has_simple_parameters = false;
- bool is_asm_module = false;
- bool calls_sloppy_eval = false;
- if (scope->is_function_scope()) {
- DeclarationScope* function_scope = scope->AsDeclarationScope();
- has_simple_parameters = function_scope->has_simple_parameters();
- is_asm_module = function_scope->is_asm_module();
- }
- FunctionKind function_kind = kNormalFunction;
- if (scope->is_declaration_scope()) {
- function_kind = scope->AsDeclarationScope()->function_kind();
- calls_sloppy_eval = scope->AsDeclarationScope()->calls_sloppy_eval();
- }
-
- // Encode the flags.
- int flags =
- ScopeTypeField::encode(scope->scope_type()) |
- CallsSloppyEvalField::encode(calls_sloppy_eval) |
- LanguageModeField::encode(scope->language_mode()) |
- DeclarationScopeField::encode(scope->is_declaration_scope()) |
- ReceiverVariableField::encode(receiver_info) |
- HasNewTargetField::encode(has_new_target) |
- FunctionVariableField::encode(function_name_info) |
- HasInferredFunctionNameField::encode(has_inferred_function_name) |
- IsAsmModuleField::encode(is_asm_module) |
- HasSimpleParametersField::encode(has_simple_parameters) |
- FunctionKindField::encode(function_kind) |
- HasOuterScopeInfoField::encode(has_outer_scope_info) |
- IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
- ForceContextAllocationField::encode(scope->ForceContextForLanguageMode());
- scope_info->SetFlags(flags);
-
- scope_info->SetParameterCount(parameter_count);
- scope_info->SetContextLocalCount(context_local_count);
-
+ Handle<ScopeInfo> scope_info_handle =
+ isolate->factory()->NewScopeInfo(length);
int index = kVariablePartIndex;
+ {
+ DisallowHeapAllocation no_gc;
+ ScopeInfo scope_info = *scope_info_handle;
+ WriteBarrierMode mode = scope_info->GetWriteBarrierMode(no_gc);
+
+ bool has_simple_parameters = false;
+ bool is_asm_module = false;
+ bool calls_sloppy_eval = false;
+ if (scope->is_function_scope()) {
+ DeclarationScope* function_scope = scope->AsDeclarationScope();
+ has_simple_parameters = function_scope->has_simple_parameters();
+ is_asm_module = function_scope->is_asm_module();
+ }
+ FunctionKind function_kind = kNormalFunction;
+ if (scope->is_declaration_scope()) {
+ function_kind = scope->AsDeclarationScope()->function_kind();
+ calls_sloppy_eval = scope->AsDeclarationScope()->calls_sloppy_eval();
+ }
- // Add context locals' names and info, module variables' names and info.
- // Context locals are added using their index.
- int context_local_base = index;
- int context_local_info_base = context_local_base + context_local_count;
- int module_var_entry = scope_info->ModuleVariablesIndex();
-
- for (Variable* var : *scope->locals()) {
- switch (var->location()) {
- case VariableLocation::CONTEXT: {
- // Due to duplicate parameters, context locals aren't guaranteed to come
- // in order.
- int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
- DCHECK_LE(0, local_index);
- DCHECK_LT(local_index, context_local_count);
- uint32_t info =
- VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(context_local_base + local_index, *var->name());
- scope_info->set(context_local_info_base + local_index,
- Smi::FromInt(info));
- break;
- }
- case VariableLocation::MODULE: {
- scope_info->set(module_var_entry + kModuleVariableNameOffset,
- *var->name());
- scope_info->set(module_var_entry + kModuleVariableIndexOffset,
- Smi::FromInt(var->index()));
- uint32_t properties =
- VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
- Smi::FromInt(properties));
- module_var_entry += kModuleVariableEntryLength;
- break;
+ // Encode the flags.
+ int flags =
+ ScopeTypeField::encode(scope->scope_type()) |
+ CallsSloppyEvalField::encode(calls_sloppy_eval) |
+ LanguageModeField::encode(scope->language_mode()) |
+ DeclarationScopeField::encode(scope->is_declaration_scope()) |
+ ReceiverVariableField::encode(receiver_info) |
+ HasNewTargetField::encode(has_new_target) |
+ FunctionVariableField::encode(function_name_info) |
+ HasInferredFunctionNameField::encode(has_inferred_function_name) |
+ IsAsmModuleField::encode(is_asm_module) |
+ HasSimpleParametersField::encode(has_simple_parameters) |
+ FunctionKindField::encode(function_kind) |
+ HasOuterScopeInfoField::encode(has_outer_scope_info) |
+ IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
+ ForceContextAllocationField::encode(
+ scope->ForceContextForLanguageMode());
+ scope_info->SetFlags(flags);
+
+ scope_info->SetParameterCount(parameter_count);
+ scope_info->SetContextLocalCount(context_local_count);
+
+ // Add context locals' names and info, module variables' names and info.
+ // Context locals are added using their index.
+ int context_local_base = index;
+ int context_local_info_base = context_local_base + context_local_count;
+ int module_var_entry = scope_info->ModuleVariablesIndex();
+
+ for (Variable* var : *scope->locals()) {
+ switch (var->location()) {
+ case VariableLocation::CONTEXT: {
+ // Due to duplicate parameters, context locals aren't guaranteed to
+ // come in order.
+ int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+ DCHECK_LE(0, local_index);
+ DCHECK_LT(local_index, context_local_count);
+ uint32_t info =
+ VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
+ scope_info->set(context_local_base + local_index, *var->name(), mode);
+ scope_info->set(context_local_info_base + local_index,
+ Smi::FromInt(info));
+ break;
+ }
+ case VariableLocation::MODULE: {
+ scope_info->set(module_var_entry + kModuleVariableNameOffset,
+ *var->name(), mode);
+ scope_info->set(module_var_entry + kModuleVariableIndexOffset,
+ Smi::FromInt(var->index()));
+ uint32_t properties =
+ VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
+ scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
+ Smi::FromInt(properties));
+ module_var_entry += kModuleVariableEntryLength;
+ break;
+ }
+ default:
+ break;
}
- default:
- break;
}
- }
- if (scope->is_declaration_scope()) {
- // Mark contexts slots with the parameter number they represent. We walk the
- // list of parameters. That can include duplicate entries if a parameter
- // name is repeated. By walking upwards, we'll automatically mark the
- // context slot with the highest parameter number that uses this variable.
- // That will be the parameter number that is represented by the context
- // slot. All lower parameters will only be available on the stack through
- // the arguments object.
- for (int i = 0; i < parameter_count; i++) {
- Variable* parameter = scope->AsDeclarationScope()->parameter(i);
- if (parameter->location() != VariableLocation::CONTEXT) continue;
- int index = parameter->index() - Context::MIN_CONTEXT_SLOTS;
- int info_index = context_local_info_base + index;
- int info = Smi::ToInt(scope_info->get(info_index));
- info = ParameterNumberField::update(info, i);
- scope_info->set(info_index, Smi::FromInt(info));
+ if (scope->is_declaration_scope()) {
+ // Mark contexts slots with the parameter number they represent. We walk
+ // the list of parameters. That can include duplicate entries if a
+ // parameter name is repeated. By walking upwards, we'll automatically
+ // mark the context slot with the highest parameter number that uses this
+ // variable. That will be the parameter number that is represented by the
+ // context slot. All lower parameters will only be available on the stack
+ // through the arguments object.
+ for (int i = 0; i < parameter_count; i++) {
+ Variable* parameter = scope->AsDeclarationScope()->parameter(i);
+ if (parameter->location() != VariableLocation::CONTEXT) continue;
+ int index = parameter->index() - Context::MIN_CONTEXT_SLOTS;
+ int info_index = context_local_info_base + index;
+ int info = Smi::ToInt(scope_info->get(info_index));
+ info = ParameterNumberField::update(info, i);
+ scope_info->set(info_index, Smi::FromInt(info));
+ }
+
+ // TODO(verwaest): Remove this unnecessary entry.
+ if (scope->AsDeclarationScope()->has_this_declaration()) {
+ Variable* var = scope->AsDeclarationScope()->receiver();
+ if (var->location() == VariableLocation::CONTEXT) {
+ int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+ uint32_t info =
+ VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ ParameterNumberField::encode(ParameterNumberField::kMax);
+ scope_info->set(context_local_base + local_index, *var->name(), mode);
+ scope_info->set(context_local_info_base + local_index,
+ Smi::FromInt(info));
+ }
+ }
}
- }
- index += 2 * context_local_count;
+ index += 2 * context_local_count;
- // If the receiver is allocated, add its index.
- DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
- if (has_receiver) {
- int var_index = scope->AsDeclarationScope()->receiver()->index();
- scope_info->set(index++, Smi::FromInt(var_index));
- // ?? DCHECK(receiver_info != CONTEXT || var_index ==
- // scope_info->ContextLength() - 1);
- }
+ // If the receiver is allocated, add its index.
+ DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
+ if (has_receiver) {
+ int var_index = scope->AsDeclarationScope()->receiver()->index();
+ scope_info->set(index++, Smi::FromInt(var_index));
+ // ?? DCHECK(receiver_info != CONTEXT || var_index ==
+ // scope_info->ContextLength() - 1);
+ }
- // If present, add the function variable name and its index.
- DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
- if (has_function_name) {
- DisallowHeapAllocation no_gc;
- Variable* var = scope->AsDeclarationScope()->function_var();
- int var_index = -1;
- Object name = Smi::kZero;
- if (var != nullptr) {
- var_index = var->index();
- name = *var->name();
+ // If present, add the function variable name and its index.
+ DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ if (has_function_name) {
+ Variable* var = scope->AsDeclarationScope()->function_var();
+ int var_index = -1;
+ Object name = Smi::kZero;
+ if (var != nullptr) {
+ var_index = var->index();
+ name = *var->name();
+ }
+ scope_info->set(index++, name, mode);
+ scope_info->set(index++, Smi::FromInt(var_index));
+ DCHECK(function_name_info != CONTEXT ||
+ var_index == scope_info->ContextLength() - 1);
}
- scope_info->set(index++, name);
- scope_info->set(index++, Smi::FromInt(var_index));
- DCHECK(function_name_info != CONTEXT ||
- var_index == scope_info->ContextLength() - 1);
- }
- DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
- if (has_inferred_function_name) {
- // The inferred function name is taken from the SFI.
- index++;
- }
+ DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ if (has_inferred_function_name) {
+ // The inferred function name is taken from the SFI.
+ index++;
+ }
- DCHECK_EQ(index, scope_info->PositionInfoIndex());
- if (has_position_info) {
- scope_info->set(index++, Smi::FromInt(scope->start_position()));
- scope_info->set(index++, Smi::FromInt(scope->end_position()));
- }
+ DCHECK_EQ(index, scope_info->PositionInfoIndex());
+ if (has_position_info) {
+ scope_info->set(index++, Smi::FromInt(scope->start_position()));
+ scope_info->set(index++, Smi::FromInt(scope->end_position()));
+ }
- // If present, add the outer scope info.
- DCHECK(index == scope_info->OuterScopeInfoIndex());
- if (has_outer_scope_info) {
- scope_info->set(index++, *outer_scope.ToHandleChecked());
+ // If present, add the outer scope info.
+ DCHECK(index == scope_info->OuterScopeInfoIndex());
+ if (has_outer_scope_info) {
+ scope_info->set(index++, *outer_scope.ToHandleChecked(), mode);
+ }
}
// Module-specific information (only for module scopes).
if (scope->is_module_scope()) {
Handle<ModuleInfo> module_info =
ModuleInfo::New(isolate, zone, scope->AsModuleScope()->module());
- DCHECK_EQ(index, scope_info->ModuleInfoIndex());
- scope_info->set(index++, *module_info);
- DCHECK_EQ(index, scope_info->ModuleVariableCountIndex());
- scope_info->set(index++, Smi::FromInt(module_vars_count));
- DCHECK_EQ(index, scope_info->ModuleVariablesIndex());
+ DCHECK_EQ(index, scope_info_handle->ModuleInfoIndex());
+ scope_info_handle->set(index++, *module_info);
+ DCHECK_EQ(index, scope_info_handle->ModuleVariableCountIndex());
+ scope_info_handle->set(index++, Smi::FromInt(module_vars_count));
+ DCHECK_EQ(index, scope_info_handle->ModuleVariablesIndex());
// The variable entries themselves have already been written above.
index += kModuleVariableEntryLength * module_vars_count;
}
- DCHECK_EQ(index, scope_info->length());
- DCHECK_EQ(scope->num_parameters(), scope_info->ParameterCount());
- DCHECK_EQ(scope->num_heap_slots(), scope_info->ContextLength());
- return scope_info;
+ DCHECK_EQ(index, scope_info_handle->length());
+ DCHECK_EQ(parameter_count, scope_info_handle->ParameterCount());
+ DCHECK_EQ(scope->num_heap_slots(), scope_info_handle->ContextLength());
+ return scope_info_handle;
}
// static
@@ -678,9 +702,10 @@ bool ScopeInfo::VariableIsSynthetic(String name) {
name->Equals(name->GetReadOnlyRoots().this_string());
}
-int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
+int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
+ DisallowHeapAllocation no_gc;
DCHECK(name->IsInternalizedString());
DCHECK_EQ(scope_type(), MODULE_SCOPE);
DCHECK_NOT_NULL(mode);
@@ -703,10 +728,11 @@ int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
}
// static
-int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
- Handle<String> name, VariableMode* mode,
+int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
+ VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
+ DisallowHeapAllocation no_gc;
DCHECK(name->IsInternalizedString());
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
@@ -717,16 +743,15 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
int start = scope_info->ContextLocalNamesIndex();
int end = start + scope_info->ContextLocalCount();
for (int i = start; i < end; ++i) {
- if (*name == scope_info->get(i)) {
- int var = i - start;
- *mode = scope_info->ContextLocalMode(var);
- *init_flag = scope_info->ContextLocalInitFlag(var);
- *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
- int result = Context::MIN_CONTEXT_SLOTS + var;
-
- DCHECK_LT(result, scope_info->ContextLength());
- return result;
- }
+ if (name != scope_info->get(i)) continue;
+ int var = i - start;
+ *mode = scope_info->ContextLocalMode(var);
+ *init_flag = scope_info->ContextLocalInitFlag(var);
+ *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
+ int result = Context::MIN_CONTEXT_SLOTS + var;
+
+ DCHECK_LT(result, scope_info->ContextLength());
+ return result;
}
return -1;
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 3f6ae6df88..38d2318f6d 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_SCOPE_INFO_H_
#define V8_OBJECTS_SCOPE_INFO_H_
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
@@ -132,14 +133,14 @@ class ScopeInfo : public FixedArray {
// returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != nullptr, sets *mode to the corresponding
// mode for that variable.
- static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
+ static int ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
// Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
// module variable with the given name (the index value of a MODULE variable
// is never 0).
- int ModuleIndex(Handle<String> name, VariableMode* mode,
+ int ModuleIndex(String name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
@@ -320,7 +321,7 @@ class ScopeInfo : public FixedArray {
friend std::ostream& operator<<(std::ostream& os,
ScopeInfo::VariableAllocationInfo var);
- OBJECT_CONSTRUCTORS(ScopeInfo, FixedArray)
+ OBJECT_CONSTRUCTORS(ScopeInfo, FixedArray);
};
std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 5484441030..137e0d9e02 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -9,7 +9,7 @@
#include "src/feedback-vector-inl.h"
#include "src/handles-inl.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/templates.h"
@@ -60,7 +60,7 @@ void PreparseData::set(int index, byte value) {
void PreparseData::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->data_length());
- Address dst_addr = FIELD_ADDR(this, kDataStartOffset + index * kByteSize);
+ Address dst_addr = FIELD_ADDR(*this, kDataStartOffset + index * kByteSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
@@ -106,6 +106,11 @@ CAST_ACCESSOR(UncompiledDataWithPreparseData)
ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData,
kPreparseDataOffset)
+bool HeapObject::IsUncompiledData() const {
+ return IsUncompiledDataWithoutPreparseData() ||
+ IsUncompiledDataWithPreparseData();
+}
+
OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
CAST_ACCESSOR(InterpreterData)
@@ -123,15 +128,13 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
-#if V8_SFI_HAS_UNIQUE_ID
-INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
-#endif
UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
UINT8_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
-UINT8_ACCESSORS(SharedFunctionInfo, raw_builtin_function_id, kBuiltinFunctionId)
+UINT8_ACCESSORS(SharedFunctionInfo, raw_builtin_function_id,
+ kBuiltinFunctionIdOffset)
UINT16_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
kFunctionTokenOffsetOffset)
RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
@@ -177,11 +180,11 @@ AbstractCode SharedFunctionInfo::abstract_code() {
}
Object SharedFunctionInfo::function_data() const {
- return RELAXED_READ_FIELD(*this, kFunctionDataOffset);
+ return ACQUIRE_READ_FIELD(*this, kFunctionDataOffset);
}
void SharedFunctionInfo::set_function_data(Object data, WriteBarrierMode mode) {
- RELAXED_WRITE_FIELD(*this, kFunctionDataOffset, data);
+ RELEASE_WRITE_FIELD(*this, kFunctionDataOffset, data);
CONDITIONAL_WRITE_BARRIER(*this, kFunctionDataOffset, data, mode);
}
@@ -224,6 +227,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
SharedFunctionInfo::IsTopLevelBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_oneshot_iife,
SharedFunctionInfo::IsOneshotIIFEBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+ is_safe_to_skip_arguments_adaptor,
+ SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit)
bool SharedFunctionInfo::optimization_disabled() const {
return disable_optimization_reason() != BailoutReason::kNoReason;
@@ -257,7 +263,6 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
int hints = flags();
hints = FunctionKindBits::update(hints, kind);
hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
- hints = IsDerivedConstructorBit::update(hints, IsDerivedConstructor(kind));
set_flags(hints);
UpdateFunctionMapIndex();
}
@@ -317,7 +322,7 @@ void SharedFunctionInfo::clear_padding() {
void SharedFunctionInfo::UpdateFunctionMapIndex() {
int map_index = Context::FunctionMapIndex(
- language_mode(), kind(), true, HasSharedName(), needs_home_object());
+ language_mode(), kind(), HasSharedName(), needs_home_object());
set_function_map_index(map_index);
}
@@ -494,10 +499,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
bool SharedFunctionInfo::ShouldFlushBytecode() {
if (!FLAG_flush_bytecode) return false;
- // TODO(rmcilroy): Enable bytecode flushing for resumable functions amd class
- // member initializers.
- if (IsResumableFunction(kind()) ||
- IsClassMembersInitializerFunction(kind()) || !allows_lazy_compilation()) {
+ // TODO(rmcilroy): Enable bytecode flushing for resumable functions.
+ if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
return false;
}
@@ -609,7 +612,7 @@ void SharedFunctionInfo::ClearPreparseData() {
// Trim off the pre-parsed scope data from the uncompiled data by swapping the
// map, leaving only an uncompiled data without pre-parsed scope.
DisallowHeapAllocation no_gc;
- Heap* heap = Heap::FromWritableHeapObject(data);
+ Heap* heap = GetHeapFromWritableObject(data);
// Swap the map.
heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
@@ -632,6 +635,10 @@ void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithoutPreparseData());
}
+OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfoWithID, SharedFunctionInfo)
+CAST_ACCESSOR(SharedFunctionInfoWithID)
+INT_ACCESSORS(SharedFunctionInfoWithID, unique_id, kUniqueIdOffset)
+
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
@@ -661,6 +668,10 @@ void UncompiledDataWithPreparseData::Initialize(
scope_data);
}
+bool UncompiledData::has_function_literal_id() {
+ return function_literal_id() != kFunctionLiteralIdInvalid;
+}
+
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data()->IsWasmExportedFunctionData();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index baaacd538c..bf382fc4f8 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -6,11 +6,13 @@
#define V8_OBJECTS_SHARED_FUNCTION_INFO_H_
#include "src/bailout-reason.h"
+#include "src/function-kind.h"
#include "src/objects.h"
#include "src/objects/builtin-function-id.h"
#include "src/objects/script.h"
#include "src/objects/smi.h"
#include "src/objects/struct.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -102,6 +104,9 @@ class UncompiledData : public HeapObject {
DECL_INT32_ACCESSORS(end_position)
DECL_INT32_ACCESSORS(function_literal_id)
+ // Returns true if the UncompiledData contains a valid function_literal_id.
+ inline bool has_function_literal_id();
+
DECL_CAST(UncompiledData)
inline static void Initialize(
@@ -186,7 +191,7 @@ class UncompiledDataWithPreparseData : public UncompiledData {
#undef UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS
// Make sure the size is aligned
- STATIC_ASSERT(kSize == POINTER_SIZE_ALIGN(kSize));
+ STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
typedef SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
@@ -318,12 +323,6 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_UINT8_ACCESSORS(expected_nof_properties)
-#if V8_SFI_HAS_UNIQUE_ID
- // [unique_id] - For --trace-maps purposes, an identifier that's persistent
- // even if the GC moves this SharedFunctionInfo.
- DECL_INT_ACCESSORS(unique_id)
-#endif
-
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
@@ -493,6 +492,17 @@ class SharedFunctionInfo : public HeapObject {
// is only executed once.
DECL_BOOLEAN_ACCESSORS(is_oneshot_iife)
+ // Indicates that the function represented by the shared function info
+ // cannot observe the actual parameters passed at a call site, which
+ // means the function doesn't use the arguments object, doesn't use
+ // rest parameters, and is also in strict mode (meaning that there's
+ // no way to get to the actual arguments via the non-standard "arguments"
+ // accessor on sloppy mode functions). This can be used to speed up calls
+ // to this function even in the presence of arguments mismatch.
+ // See http://bit.ly/v8-faster-calls-with-arguments-mismatch for more
+ // information on this.
+ DECL_BOOLEAN_ACCESSORS(is_safe_to_skip_arguments_adaptor)
+
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
@@ -585,6 +595,12 @@ class SharedFunctionInfo : public HeapObject {
void SetFunctionTokenPosition(int function_token_position,
int start_position);
+ static void EnsureSourcePositionsAvailable(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
+
+ // Hash based on function literal id and script id.
+ uint32_t Hash();
+
inline bool construct_as_builtin() const;
// Determines and sets the ConstructAsBuiltinBit in |flags|, based on the
@@ -630,7 +646,7 @@ class SharedFunctionInfo : public HeapObject {
Script::Iterator script_iterator_;
WeakArrayList::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
- DISALLOW_HEAP_ALLOCATION(no_gc_);
+ DISALLOW_HEAP_ALLOCATION(no_gc_)
DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
};
@@ -643,65 +659,37 @@ class SharedFunctionInfo : public HeapObject {
static const uint16_t kFunctionTokenOutOfRange = static_cast<uint16_t>(-1);
STATIC_ASSERT(kMaximumFunctionTokenOffset + 1 == kFunctionTokenOutOfRange);
-#if V8_SFI_HAS_UNIQUE_ID
- static const int kUniqueIdFieldSize = kInt32Size;
-#else
- // Just to not break the postmortrem support with conditional offsets
- static const int kUniqueIdFieldSize = 0;
-#endif
-
-// Layout description.
-#define SHARED_FUNCTION_INFO_FIELDS(V) \
- /* Pointer fields. */ \
- V(kStartOfPointerFieldsOffset, 0) \
- V(kFunctionDataOffset, kTaggedSize) \
- V(kStartOfAlwaysStrongPointerFieldsOffset, 0) \
- V(kNameOrScopeInfoOffset, kTaggedSize) \
- V(kOuterScopeInfoOrFeedbackMetadataOffset, kTaggedSize) \
- V(kScriptOrDebugInfoOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kUniqueIdOffset, kUniqueIdFieldSize) \
- V(kLengthOffset, kUInt16Size) \
- V(kFormalParameterCountOffset, kUInt16Size) \
- V(kExpectedNofPropertiesOffset, kUInt8Size) \
- V(kBuiltinFunctionId, kUInt8Size) \
- V(kFunctionTokenOffsetOffset, kUInt16Size) \
- V(kFlagsOffset, kInt32Size) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- SHARED_FUNCTION_INFO_FIELDS)
-#undef SHARED_FUNCTION_INFO_FIELDS
+ TORQUE_GENERATED_SHARED_FUNCTION_INFO_FIELDS)
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
class BodyDescriptor;
// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(IsNativeBit, bool, 1, _) \
- V(IsStrictBit, bool, 1, _) \
- V(IsWrappedBit, bool, 1, _) \
- V(IsClassConstructorBit, bool, 1, _) \
- V(IsDerivedConstructorBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 5, _) \
- V(HasDuplicateParametersBit, bool, 1, _) \
- V(AllowLazyCompilationBit, bool, 1, _) \
- V(NeedsHomeObjectBit, bool, 1, _) \
- V(IsDeclarationBit, bool, 1, _) \
- V(IsAsmWasmBrokenBit, bool, 1, _) \
- V(FunctionMapIndexBits, int, 5, _) \
- V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
- V(RequiresInstanceMembersInitializer, bool, 1, _) \
- V(ConstructAsBuiltinBit, bool, 1, _) \
- V(IsAnonymousExpressionBit, bool, 1, _) \
- V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
- V(HasReportedBinaryCoverageBit, bool, 1, _) \
- V(IsNamedExpressionBit, bool, 1, _) \
- V(IsTopLevelBit, bool, 1, _) \
- V(IsOneshotIIFEBit, bool, 1, _)
+#define FLAGS_BIT_FIELDS(V, _) \
+ /* Have FunctionKind first to make it cheaper to access */ \
+ V(FunctionKindBits, FunctionKind, 5, _) \
+ V(IsNativeBit, bool, 1, _) \
+ V(IsStrictBit, bool, 1, _) \
+ V(IsWrappedBit, bool, 1, _) \
+ V(IsClassConstructorBit, bool, 1, _) \
+ V(HasDuplicateParametersBit, bool, 1, _) \
+ V(AllowLazyCompilationBit, bool, 1, _) \
+ V(NeedsHomeObjectBit, bool, 1, _) \
+ V(IsDeclarationBit, bool, 1, _) \
+ V(IsAsmWasmBrokenBit, bool, 1, _) \
+ V(FunctionMapIndexBits, int, 5, _) \
+ V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
+ V(RequiresInstanceMembersInitializer, bool, 1, _) \
+ V(ConstructAsBuiltinBit, bool, 1, _) \
+ V(IsAnonymousExpressionBit, bool, 1, _) \
+ V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
+ V(HasReportedBinaryCoverageBit, bool, 1, _) \
+ V(IsNamedExpressionBit, bool, 1, _) \
+ V(IsTopLevelBit, bool, 1, _) \
+ V(IsOneshotIIFEBit, bool, 1, _) \
+ V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
@@ -716,6 +704,14 @@ class SharedFunctionInfo : public HeapObject {
// This is needed to set up the [[HomeObject]] on the function instance.
inline bool needs_home_object() const;
+ V8_INLINE bool IsSharedFunctionInfoWithID() const {
+#if V8_SFI_HAS_UNIQUE_ID
+ return true;
+#else
+ return false;
+#endif
+ }
+
private:
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
@@ -742,6 +738,23 @@ class SharedFunctionInfo : public HeapObject {
OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
+class SharedFunctionInfoWithID : public SharedFunctionInfo {
+ public:
+ // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+ // even if the GC moves this SharedFunctionInfo.
+ DECL_INT_ACCESSORS(unique_id)
+
+ DECL_CAST(SharedFunctionInfoWithID)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ SharedFunctionInfo::kSize,
+ TORQUE_GENERATED_SHARED_FUNCTION_INFO_WITH_ID_FIELDS)
+
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
+
+ OBJECT_CONSTRUCTORS(SharedFunctionInfoWithID, SharedFunctionInfo);
+};
+
// Printing support.
struct SourceCodeOf {
explicit SourceCodeOf(SharedFunctionInfo v, int max = -1)
diff --git a/deps/v8/src/objects/slots-atomic-inl.h b/deps/v8/src/objects/slots-atomic-inl.h
index e0f4f9dff2..3db4e38720 100644
--- a/deps/v8/src/objects/slots-atomic-inl.h
+++ b/deps/v8/src/objects/slots-atomic-inl.h
@@ -49,7 +49,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); }
void swap(Reference& other) {
- Address tmp = value();
+ Tagged_t tmp = value();
AsAtomicTagged::Relaxed_Store(address_, other.value());
AsAtomicTagged::Relaxed_Store(other.address_, tmp);
}
@@ -63,7 +63,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
}
private:
- Address value() const { return AsAtomicTagged::Relaxed_Load(address_); }
+ Tagged_t value() const { return AsAtomicTagged::Relaxed_Load(address_); }
Tagged_t* address_;
};
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index 7072c43b44..0d705888e8 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -31,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
return base::AsAtomicPointer::Relaxed_Load(location()) == raw_value;
}
-Object FullObjectSlot::operator*() const { return Object(*location()); }
+const Object FullObjectSlot::operator*() const { return Object(*location()); }
void FullObjectSlot::store(Object value) const { *location() = value->ptr(); }
@@ -61,7 +61,7 @@ Object FullObjectSlot::Release_CompareAndSwap(Object old, Object target) const {
// FullMaybeObjectSlot implementation.
//
-MaybeObject FullMaybeObjectSlot::operator*() const {
+const MaybeObject FullMaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
@@ -70,23 +70,24 @@ void FullMaybeObjectSlot::store(MaybeObject value) const {
}
MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
- return MaybeObject(AsAtomicTagged::Relaxed_Load(location()));
+ return MaybeObject(base::AsAtomicPointer::Relaxed_Load(location()));
}
void FullMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
- AsAtomicTagged::Relaxed_Store(location(), value->ptr());
+ base::AsAtomicPointer::Relaxed_Store(location(), value->ptr());
}
void FullMaybeObjectSlot::Release_CompareAndSwap(MaybeObject old,
MaybeObject target) const {
- AsAtomicTagged::Release_CompareAndSwap(location(), old.ptr(), target.ptr());
+ base::AsAtomicPointer::Release_CompareAndSwap(location(), old.ptr(),
+ target.ptr());
}
//
// FullHeapObjectSlot implementation.
//
-HeapObjectReference FullHeapObjectSlot::operator*() const {
+const HeapObjectReference FullHeapObjectSlot::operator*() const {
return HeapObjectReference(*location());
}
@@ -107,11 +108,23 @@ void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
// Utils.
//
+// Copies tagged words from |src| to |dst|. The data spans must not overlap.
+// |src| and |dst| must be kTaggedSize-aligned.
+inline void CopyTagged(Address dst, const Address src, size_t num_tagged) {
+ static const size_t kBlockCopyLimit = 16;
+ CopyImpl<kBlockCopyLimit>(reinterpret_cast<Tagged_t*>(dst),
+ reinterpret_cast<const Tagged_t*>(src), num_tagged);
+}
+
// Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
// TODO(ishell): revisit this implementation, maybe use "rep stosl"
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- MemsetPointer(start.location(), value.ptr(), counter);
+ Address raw_value = value.ptr();
+#ifdef V8_COMPRESS_POINTERS
+ raw_value = CompressTagged(raw_value);
+#endif
+ MemsetPointer(start.location(), raw_value, counter);
}
// Sets |counter| number of kSystemPointerSize-sized values starting at |start|
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index 12dc168e8e..95acb94ff3 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -110,7 +110,7 @@ class FullObjectSlot
// raw value.
inline bool contains_value(Address raw_value) const;
- inline Object operator*() const;
+ inline const Object operator*() const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
@@ -137,11 +137,13 @@ class FullMaybeObjectSlot
explicit FullMaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit FullMaybeObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
+ explicit FullMaybeObjectSlot(MaybeObject* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
- inline MaybeObject operator*() const;
+ inline const MaybeObject operator*() const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
@@ -167,7 +169,7 @@ class FullHeapObjectSlot
explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
- inline HeapObjectReference operator*() const;
+ inline const HeapObjectReference operator*() const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
diff --git a/deps/v8/src/objects/smi.h b/deps/v8/src/objects/smi.h
index 0361ef0a7a..301d5093c0 100644
--- a/deps/v8/src/objects/smi.h
+++ b/deps/v8/src/objects/smi.h
@@ -54,7 +54,14 @@ class Smi : public Object {
static inline Smi FromIntptr(intptr_t value) {
DCHECK(Smi::IsValid(value));
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- return Smi((value << smi_shift_bits) | kSmiTag);
+ return Smi((static_cast<Address>(value) << smi_shift_bits) | kSmiTag);
+ }
+
+ // Given {value} in [0, 2^31-1], force it into Smi range by changing at most
+ // the MSB (leaving the lower 31 bit unchanged).
+ static inline Smi From31BitPattern(int value) {
+ return Smi::FromInt((value << (32 - kSmiValueSize)) >>
+ (32 - kSmiValueSize));
}
template <typename E,
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index a9c4661726..4bcf3a5672 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -8,6 +8,9 @@
#include "src/objects/stack-frame-info.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/frame-array-inl.h"
+#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,18 +24,27 @@ NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
CAST_ACCESSOR(StackFrameInfo)
-SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
-SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberIndex)
-SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdIndex)
-ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameIndex)
+SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberOffset)
+SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberOffset)
+SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdOffset)
+ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
- kScriptNameOrSourceUrlIndex)
-ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameIndex)
-SMI_ACCESSORS(StackFrameInfo, flag, kFlagIndex)
+ kScriptNameOrSourceUrlOffset)
+ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
+SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
-SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
+SMI_ACCESSORS(StackFrameInfo, id, kIdOffset)
+
+OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame, Struct)
+NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
+CAST_ACCESSOR(StackTraceFrame)
+
+ACCESSORS(StackTraceFrame, frame_array, Object, kFrameArrayOffset)
+SMI_ACCESSORS(StackTraceFrame, frame_index, kFrameIndexOffset)
+ACCESSORS(StackTraceFrame, frame_info, Object, kFrameInfoOffset)
+SMI_ACCESSORS(StackTraceFrame, id, kIdOffset)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
new file mode 100644
index 0000000000..45ab671ee5
--- /dev/null
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -0,0 +1,83 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/stack-frame-info.h"
+
+#include "src/objects/stack-frame-info-inl.h"
+
+namespace v8 {
+namespace internal {
+
+int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ int line = GetFrameInfo(frame)->line_number();
+ return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
+}
+
+int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ int column = GetFrameInfo(frame)->column_number();
+ return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
+}
+
+int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ int id = GetFrameInfo(frame)->script_id();
+ return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
+}
+
+Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ auto name = GetFrameInfo(frame)->script_name();
+ return handle(name, frame->GetIsolate());
+}
+
+Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
+ Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ auto name = GetFrameInfo(frame)->script_name_or_source_url();
+ return handle(name, frame->GetIsolate());
+}
+
+Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ auto name = GetFrameInfo(frame)->function_name();
+ return handle(name, frame->GetIsolate());
+}
+
+bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ return GetFrameInfo(frame)->is_eval();
+}
+
+bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ return GetFrameInfo(frame)->is_constructor();
+}
+
+bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
+ if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
+ return GetFrameInfo(frame)->is_wasm();
+}
+
+Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
+ Handle<StackTraceFrame> frame) {
+ return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
+}
+
+void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
+ Isolate* isolate = frame->GetIsolate();
+ Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
+ handle(FrameArray::cast(frame->frame_array()), isolate),
+ frame->frame_index());
+ frame->set_frame_info(*frame_info);
+
+ // After initializing, we no longer need to keep a reference
+ // to the frame_array.
+ frame->set_frame_array(ReadOnlyRoots(isolate).undefined_value());
+ frame->set_frame_index(-1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 8764547ecc..cb67637119 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+class FrameArray;
+
class StackFrameInfo : public Struct {
public:
NEVER_READ_ONLY_SPACE
@@ -34,21 +36,8 @@ class StackFrameInfo : public Struct {
DECL_PRINTER(StackFrameInfo)
DECL_VERIFIER(StackFrameInfo)
- // Layout description.
-#define STACK_FRAME_INFO_FIELDS(V) \
- V(kLineNumberIndex, kTaggedSize) \
- V(kColumnNumberIndex, kTaggedSize) \
- V(kScriptIdIndex, kTaggedSize) \
- V(kScriptNameIndex, kTaggedSize) \
- V(kScriptNameOrSourceUrlIndex, kTaggedSize) \
- V(kFunctionNameIndex, kTaggedSize) \
- V(kFlagIndex, kTaggedSize) \
- V(kIdIndex, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, STACK_FRAME_INFO_FIELDS)
-#undef STACK_FRAME_INFO_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_STACK_FRAME_INFO_FIELDS)
private:
// Bit position in the flag, from least significant bit position.
@@ -59,6 +48,56 @@ class StackFrameInfo : public Struct {
OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
+// This class is used to lazily initialize a StackFrameInfo object from
+// a FrameArray plus an index.
+// The first time any of the Get* or Is* methods is called, a
+// StackFrameInfo object is allocated and all necessary information
+// retrieved.
+class StackTraceFrame : public Struct {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_ACCESSORS(frame_array, Object)
+ DECL_INT_ACCESSORS(frame_index)
+ DECL_ACCESSORS(frame_info, Object)
+ DECL_INT_ACCESSORS(id)
+
+ DECL_CAST(StackTraceFrame)
+
+ // Dispatched behavior.
+ DECL_PRINTER(StackTraceFrame)
+ DECL_VERIFIER(StackTraceFrame)
+
+ // Layout description.
+#define STACK_FRAME_FIELDS(V) \
+ V(kFrameArrayOffset, kTaggedSize) \
+ V(kFrameIndexOffset, kTaggedSize) \
+ V(kFrameInfoOffset, kTaggedSize) \
+ V(kIdOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, STACK_FRAME_FIELDS)
+#undef STACK_FRAME_FIELDS
+
+ static int GetLineNumber(Handle<StackTraceFrame> frame);
+ static int GetColumnNumber(Handle<StackTraceFrame> frame);
+ static int GetScriptId(Handle<StackTraceFrame> frame);
+
+ static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
+
+ static bool IsEval(Handle<StackTraceFrame> frame);
+ static bool IsConstructor(Handle<StackTraceFrame> frame);
+ static bool IsWasm(Handle<StackTraceFrame> frame);
+
+ private:
+ OBJECT_CONSTRUCTORS(StackTraceFrame, Struct);
+
+ static Handle<StackFrameInfo> GetFrameInfo(Handle<StackTraceFrame> frame);
+ static void InitializeFrameInfo(Handle<StackTraceFrame> frame);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
new file mode 100644
index 0000000000..b29f9c3d7b
--- /dev/null
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -0,0 +1,74 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/string-comparator.h"
+
+#include "src/objects/string-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void StringComparator::State::Init(String string) {
+ ConsString cons_string = String::VisitFlat(this, string);
+ iter_.Reset(cons_string);
+ if (!cons_string.is_null()) {
+ int offset;
+ string = iter_.Next(&offset);
+ String::VisitFlat(this, string, offset);
+ }
+}
+
+void StringComparator::State::Advance(int consumed) {
+ DCHECK(consumed <= length_);
+ // Still in buffer.
+ if (length_ != consumed) {
+ if (is_one_byte_) {
+ buffer8_ += consumed;
+ } else {
+ buffer16_ += consumed;
+ }
+ length_ -= consumed;
+ return;
+ }
+ // Advance state.
+ int offset;
+ String next = iter_.Next(&offset);
+ DCHECK_EQ(0, offset);
+ DCHECK(!next.is_null());
+ String::VisitFlat(this, next);
+}
+
+bool StringComparator::Equals(String string_1, String string_2) {
+ int length = string_1->length();
+ state_1_.Init(string_1);
+ state_2_.Init(string_2);
+ while (true) {
+ int to_check = Min(state_1_.length_, state_2_.length_);
+ DCHECK(to_check > 0 && to_check <= length);
+ bool is_equal;
+ if (state_1_.is_one_byte_) {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ } else {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ }
+ // Looping done.
+ if (!is_equal) return false;
+ length -= to_check;
+ // Exit condition. Strings are equal.
+ if (length == 0) return true;
+ state_1_.Advance(to_check);
+ state_2_.Advance(to_check);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
new file mode 100644
index 0000000000..5b4354deeb
--- /dev/null
+++ b/deps/v8/src/objects/string-comparator.h
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRING_COMPARATOR_H_
+#define V8_OBJECTS_STRING_COMPARATOR_H_
+
+#include "src/base/logging.h"
+#include "src/globals.h"
+#include "src/objects/string.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Compares the contents of two strings by reading and comparing
+// int-sized blocks of characters.
+template <typename Char>
+static inline bool CompareRawStringContents(const Char* const a,
+ const Char* const b, int length) {
+ return CompareChars(a, b, length) == 0;
+}
+
+template <typename Chars1, typename Chars2>
+class RawStringComparator : public AllStatic {
+ public:
+ static inline bool compare(const Chars1* a, const Chars2* b, int len) {
+ DCHECK(sizeof(Chars1) != sizeof(Chars2));
+ for (int i = 0; i < len; i++) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+template <>
+class RawStringComparator<uint16_t, uint16_t> {
+ public:
+ static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+template <>
+class RawStringComparator<uint8_t, uint8_t> {
+ public:
+ static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+class StringComparator {
+ class State {
+ public:
+ State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
+
+ void Init(String string);
+
+ inline void VisitOneByteString(const uint8_t* chars, int length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ length_ = length;
+ }
+
+ inline void VisitTwoByteString(const uint16_t* chars, int length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ length_ = length;
+ }
+
+ void Advance(int consumed);
+
+ ConsStringIterator iter_;
+ bool is_one_byte_;
+ int length_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(State);
+ };
+
+ public:
+ inline StringComparator() = default;
+
+ template <typename Chars1, typename Chars2>
+ static inline bool Equals(State* state_1, State* state_2, int to_check) {
+ const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
+ const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
+ return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
+ }
+
+ bool Equals(String string_1, String string_2);
+
+ private:
+ State state_1_;
+ State state_2_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringComparator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_STRING_COMPARATOR_H_
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index f9efd53418..8ae2ad405c 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -9,6 +9,7 @@
#include "src/conversions-inl.h"
#include "src/handles-inl.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/factory.h"
#include "src/objects/name-inl.h"
#include "src/objects/smi-inl.h"
@@ -25,12 +26,12 @@ INT32_ACCESSORS(String, length, kLengthOffset)
int String::synchronized_length() const {
return base::AsAtomic32::Acquire_Load(
- reinterpret_cast<const int32_t*>(FIELD_ADDR(this, kLengthOffset)));
+ reinterpret_cast<const int32_t*>(FIELD_ADDR(*this, kLengthOffset)));
}
void String::synchronized_set_length(int value) {
base::AsAtomic32::Release_Store(
- reinterpret_cast<int32_t*>(FIELD_ADDR(this, kLengthOffset)), value);
+ reinterpret_cast<int32_t*>(FIELD_ADDR(*this, kLengthOffset)), value);
}
OBJECT_CONSTRUCTORS_IMPL(String, Name)
@@ -80,11 +81,6 @@ bool StringShape::IsInternalized() {
(kStringTag | kInternalizedTag);
}
-bool StringShape::HasOnlyOneByteChars() {
- return (type_ & kStringEncodingMask) == kOneByteStringTag ||
- (type_ & kOneByteDataHintMask) == kOneByteDataHintTag;
-}
-
bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
@@ -179,12 +175,6 @@ bool String::IsOneByteRepresentationUnderneath(String string) {
}
}
-bool String::HasOnlyOneByteChars() {
- uint32_t type = map()->instance_type();
- return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
- IsOneByteRepresentation();
-}
-
uc32 FlatStringReader::Get(int index) {
if (is_one_byte_) {
return Get<uint8_t>(index);
@@ -243,7 +233,7 @@ class SeqOneByteSubStringKey : public StringTableKey {
// We have to set the hash later.
DisallowHeapAllocation no_gc;
uint32_t hash = StringHasher::HashSequentialString(
- string->GetChars(no_gc) + from, length, isolate->heap()->HashSeed());
+ string->GetChars(no_gc) + from, length, HashSeed(isolate));
set_hash_field(hash);
DCHECK_LE(0, length_);
@@ -382,7 +372,7 @@ String String::GetUnderlying() {
STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
static_cast<int>(ThinString::kActualOffset));
const int kUnderlyingOffset = SlicedString::kParentOffset;
- return String::cast(READ_FIELD(this, kUnderlyingOffset));
+ return String::cast(READ_FIELD(*this, kUnderlyingOffset));
}
template <class Visitor>
@@ -467,17 +457,17 @@ uint32_t String::ToValidIndex(Object number) {
uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
DCHECK(index >= 0 && index < length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+ return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
}
void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
+ WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
Address SeqOneByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
+ return FIELD_ADDR(*this, kHeaderSize);
}
uint8_t* SeqOneByteString::GetChars(const DisallowHeapAllocation& no_gc) {
@@ -486,22 +476,22 @@ uint8_t* SeqOneByteString::GetChars(const DisallowHeapAllocation& no_gc) {
}
Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
+ return FIELD_ADDR(*this, kHeaderSize);
}
uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
USE(no_gc);
- return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
+ return reinterpret_cast<uc16*>(FIELD_ADDR(*this, kHeaderSize));
}
uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
DCHECK(index >= 0 && index < length());
- return READ_UINT16_FIELD(this, kHeaderSize + index * kShortSize);
+ return READ_UINT16_FIELD(*this, kHeaderSize + index * kShortSize);
}
void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
- WRITE_UINT16_FIELD(this, kHeaderSize + index * kShortSize, value);
+ WRITE_UINT16_FIELD(*this, kHeaderSize + index * kShortSize, value);
}
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
@@ -526,7 +516,7 @@ void SlicedString::set_parent(Isolate* isolate, String parent,
SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
String ConsString::first() {
- return String::cast(READ_FIELD(this, kFirstOffset));
+ return String::cast(READ_FIELD(*this, kFirstOffset));
}
Object ConsString::unchecked_first() { return READ_FIELD(*this, kFirstOffset); }
@@ -551,7 +541,7 @@ void ConsString::set_second(Isolate* isolate, String value,
CONDITIONAL_WRITE_BARRIER(*this, kSecondOffset, value, mode);
}
-ACCESSORS(ThinString, actual, String, kActualOffset);
+ACCESSORS(ThinString, actual, String, kActualOffset)
HeapObject ThinString::unchecked_actual() const {
return HeapObject::unchecked_cast(READ_FIELD(*this, kActualOffset));
@@ -603,8 +593,9 @@ void ExternalOneByteString::SetResource(
Isolate* isolate, const ExternalOneByteString::Resource* resource) {
set_resource(resource);
size_t new_payload = resource == nullptr ? 0 : resource->length();
- if (new_payload > 0)
+ if (new_payload > 0) {
isolate->heap()->UpdateExternalString(*this, 0, new_payload);
+ }
}
void ExternalOneByteString::set_resource(
@@ -638,8 +629,9 @@ void ExternalTwoByteString::SetResource(
Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
set_resource(resource);
size_t new_payload = resource == nullptr ? 0 : resource->length() * 2;
- if (new_payload > 0)
+ if (new_payload > 0) {
isolate->heap()->UpdateExternalString(*this, 0, new_payload);
+ }
}
void ExternalTwoByteString::set_resource(
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
index ce0193d12d..199f0a0f6b 100644
--- a/deps/v8/src/objects/string-table-inl.h
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -36,7 +36,7 @@ uint32_t StringSetShape::Hash(Isolate* isolate, String key) {
return key->Hash();
}
-uint32_t StringSetShape::HashForObject(Isolate* isolate, Object object) {
+uint32_t StringSetShape::HashForObject(ReadOnlyRoots roots, Object object) {
return String::cast(object)->Hash();
}
@@ -53,7 +53,7 @@ Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
return key->AsHandle(isolate);
}
-uint32_t StringTableShape::HashForObject(Isolate* isolate, Object object) {
+uint32_t StringTableShape::HashForObject(ReadOnlyRoots roots, Object object) {
return String::cast(object)->Hash();
}
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 44b9fd930c..5e6d012e6b 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -39,7 +39,7 @@ class StringTableShape : public BaseShape<StringTableKey*> {
static inline uint32_t Hash(Isolate* isolate, Key key) { return key->Hash(); }
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
@@ -93,14 +93,14 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
template <bool seq_one_byte>
friend class JsonParser;
- OBJECT_CONSTRUCTORS(StringTable, HashTable<StringTable, StringTableShape>)
+ OBJECT_CONSTRUCTORS(StringTable, HashTable<StringTable, StringTableShape>);
};
class StringSetShape : public BaseShape<String> {
public:
static inline bool IsMatch(String key, Object value);
static inline uint32_t Hash(Isolate* isolate, String key);
- static inline uint32_t HashForObject(Isolate* isolate, Object object);
+ static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
@@ -114,7 +114,7 @@ class StringSet : public HashTable<StringSet, StringSetShape> {
bool Has(Isolate* isolate, Handle<String> name);
DECL_CAST(StringSet)
- OBJECT_CONSTRUCTORS(StringSet, HashTable<StringSet, StringSetShape>)
+ OBJECT_CONSTRUCTORS(StringSet, HashTable<StringSet, StringSetShape>);
};
} // namespace internal
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
new file mode 100644
index 0000000000..a735d038fd
--- /dev/null
+++ b/deps/v8/src/objects/string.cc
@@ -0,0 +1,1526 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/string.h"
+
+#include "src/char-predicates.h"
+#include "src/conversions.h"
+#include "src/handles-inl.h"
+#include "src/heap/heap-inl.h" // For LooksValid implementation.
+#include "src/objects/map.h"
+#include "src/objects/oddball.h"
+#include "src/objects/string-comparator.h"
+#include "src/objects/string-inl.h"
+#include "src/ostreams.h"
+#include "src/string-builder-inl.h"
+#include "src/string-hasher.h"
+#include "src/string-search.h"
+#include "src/string-stream.h"
+#include "src/unicode-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
+ PretenureFlag pretenure) {
+ DCHECK_NE(cons->second()->length(), 0);
+
+ // TurboFan can create cons strings with empty first parts.
+ while (cons->first()->length() == 0) {
+ // We do not want to call this function recursively. Therefore we call
+ // String::Flatten only in those cases where String::SlowFlatten is not
+ // called again.
+ if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
+ cons = handle(ConsString::cast(cons->second()), isolate);
+ } else {
+ return String::Flatten(isolate, handle(cons->second(), isolate));
+ }
+ }
+
+ DCHECK(AllowHeapAllocation::IsAllowed());
+ int length = cons->length();
+ PretenureFlag tenure = ObjectInYoungGeneration(*cons) ? pretenure : TENURED;
+ Handle<SeqString> result;
+ if (cons->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> flat = isolate->factory()
+ ->NewRawOneByteString(length, tenure)
+ .ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
+ result = flat;
+ } else {
+ Handle<SeqTwoByteString> flat = isolate->factory()
+ ->NewRawTwoByteString(length, tenure)
+ .ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
+ result = flat;
+ }
+ cons->set_first(isolate, *result);
+ cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ DCHECK(result->IsFlat());
+ return result;
+}
+
+bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
+ DisallowHeapAllocation no_allocation;
+ // Externalizing twice leaks the external resource, so it's
+ // prohibited by the API.
+ DCHECK(this->SupportsExternalization());
+ DCHECK(resource->IsCacheable());
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ // Assert that the resource and the string are equivalent.
+ DCHECK(static_cast<size_t>(this->length()) == resource->length());
+ ScopedVector<uc16> smart_chars(this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ resource->length() * sizeof(smart_chars[0])));
+ }
+#endif // DEBUG
+ int size = this->Size(); // Byte size of the original string.
+ // Abort if size does not allow in-place conversion.
+ if (size < ExternalString::kUncachedSize) return false;
+ Isolate* isolate;
+ // Read-only strings cannot be made external, since that would mutate the
+ // string.
+ if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
+ Heap* heap = isolate->heap();
+ bool is_internalized = this->IsInternalizedString();
+ bool has_pointers = StringShape(*this).IsIndirect();
+ if (has_pointers) {
+ heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ }
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if the space the existing
+ // string occupies is too small for a regular external string. Instead, we
+ // resort to an uncached external string instead, omitting the field caching
+ // the address of the backing store. When we encounter uncached external
+ // strings in generated code, we need to bailout to runtime.
+ Map new_map;
+ ReadOnlyRoots roots(heap);
+ if (size < ExternalString::kSize) {
+ if (is_internalized) {
+ new_map = roots.uncached_external_internalized_string_map();
+ } else {
+ new_map = roots.uncached_external_string_map();
+ }
+ } else {
+ new_map = is_internalized ? roots.external_internalized_string_map()
+ : roots.external_string_map();
+ }
+
+ // Byte size of the external String object.
+ int new_size = this->SizeFromMap(new_map);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+ ClearRecordedSlots::kNo);
+ if (has_pointers) {
+ heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ }
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ this->synchronized_set_map(new_map);
+
+ ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
+ self->SetResource(isolate, resource);
+ heap->RegisterExternalString(*this);
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ return true;
+}
+
+bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
+ DisallowHeapAllocation no_allocation;
+ // Externalizing twice leaks the external resource, so it's
+ // prohibited by the API.
+ DCHECK(this->SupportsExternalization());
+ DCHECK(resource->IsCacheable());
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ // Assert that the resource and the string are equivalent.
+ DCHECK(static_cast<size_t>(this->length()) == resource->length());
+ if (this->IsTwoByteRepresentation()) {
+ ScopedVector<uint16_t> smart_chars(this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
+ DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
+ }
+ ScopedVector<char> smart_chars(this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ resource->length() * sizeof(smart_chars[0])));
+ }
+#endif // DEBUG
+ int size = this->Size(); // Byte size of the original string.
+ // Abort if size does not allow in-place conversion.
+ if (size < ExternalString::kUncachedSize) return false;
+ Isolate* isolate;
+ // Read-only strings cannot be made external, since that would mutate the
+ // string.
+ if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
+ Heap* heap = isolate->heap();
+ bool is_internalized = this->IsInternalizedString();
+ bool has_pointers = StringShape(*this).IsIndirect();
+
+ if (has_pointers) {
+ heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ }
+
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if the space the existing
+ // string occupies is too small for a regular external string. Instead, we
+ // resort to an uncached external string instead, omitting the field caching
+ // the address of the backing store. When we encounter uncached external
+ // strings in generated code, we need to bailout to runtime.
+ Map new_map;
+ ReadOnlyRoots roots(heap);
+ if (size < ExternalString::kSize) {
+ new_map = is_internalized
+ ? roots.uncached_external_one_byte_internalized_string_map()
+ : roots.uncached_external_one_byte_string_map();
+ } else {
+ new_map = is_internalized
+ ? roots.external_one_byte_internalized_string_map()
+ : roots.external_one_byte_string_map();
+ }
+
+ // Byte size of the external String object.
+ int new_size = this->SizeFromMap(new_map);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+ ClearRecordedSlots::kNo);
+ if (has_pointers) {
+ heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ }
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ this->synchronized_set_map(new_map);
+
+ ExternalOneByteString self = ExternalOneByteString::cast(*this);
+ self->SetResource(isolate, resource);
+ heap->RegisterExternalString(*this);
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ return true;
+}
+
+bool String::SupportsExternalization() {
+ if (this->IsThinString()) {
+ return i::ThinString::cast(*this)->actual()->SupportsExternalization();
+ }
+
+ Isolate* isolate;
+ // RO_SPACE strings cannot be externalized.
+ if (!GetIsolateFromWritableObject(*this, &isolate)) {
+ return false;
+ }
+
+ // Already an external string.
+ if (StringShape(*this).IsExternal()) {
+ return false;
+ }
+
+#ifdef V8_COMPRESS_POINTERS
+ // Small strings may not be in-place externalizable.
+ if (this->Size() < ExternalString::kUncachedSize) return false;
+#else
+ DCHECK_LE(ExternalString::kUncachedSize, this->Size());
+#endif
+
+ return !isolate->heap()->IsInGCPostProcessing();
+}
+
+void String::StringShortPrint(StringStream* accumulator, bool show_details) {
+ const char* internalized_marker = this->IsInternalizedString() ? "#" : "";
+
+ int len = length();
+ if (len > kMaxShortPrintLength) {
+ accumulator->Add("<Very long string[%s%u]>", internalized_marker, len);
+ return;
+ }
+
+ if (!LooksValid()) {
+ accumulator->Add("<Invalid String>");
+ return;
+ }
+
+ StringCharacterStream stream(*this);
+
+ bool truncated = false;
+ if (len > kMaxShortPrintLength) {
+ len = kMaxShortPrintLength;
+ truncated = true;
+ }
+ bool one_byte = true;
+ for (int i = 0; i < len; i++) {
+ uint16_t c = stream.GetNext();
+
+ if (c < 32 || c >= 127) {
+ one_byte = false;
+ }
+ }
+ stream.Reset(*this);
+ if (one_byte) {
+ if (show_details)
+ accumulator->Add("<String[%s%u]: ", internalized_marker, length());
+ for (int i = 0; i < len; i++) {
+ accumulator->Put(static_cast<char>(stream.GetNext()));
+ }
+ if (show_details) accumulator->Put('>');
+ } else {
+ // Backslash indicates that the string contains control
+ // characters and that backslashes are therefore escaped.
+ if (show_details)
+ accumulator->Add("<String[%s%u]\\: ", internalized_marker, length());
+ for (int i = 0; i < len; i++) {
+ uint16_t c = stream.GetNext();
+ if (c == '\n') {
+ accumulator->Add("\\n");
+ } else if (c == '\r') {
+ accumulator->Add("\\r");
+ } else if (c == '\\') {
+ accumulator->Add("\\\\");
+ } else if (c < 32 || c > 126) {
+ accumulator->Add("\\x%02x", c);
+ } else {
+ accumulator->Put(static_cast<char>(c));
+ }
+ }
+ if (truncated) {
+ accumulator->Put('.');
+ accumulator->Put('.');
+ accumulator->Put('.');
+ }
+ if (show_details) accumulator->Put('>');
+ }
+ return;
+}
+
+void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
+ if (end < 0) end = length();
+ StringCharacterStream stream(*this, start);
+ for (int i = start; i < end && stream.HasMore(); i++) {
+ os << AsUC16(stream.GetNext());
+ }
+}
+
+// static
+Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
+ TrimMode mode) {
+ string = String::Flatten(isolate, string);
+ int const length = string->length();
+
+ // Perform left trimming if requested.
+ int left = 0;
+ if (mode == kTrim || mode == kTrimStart) {
+ while (left < length && IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+ left++;
+ }
+ }
+
+ // Perform right trimming if requested.
+ int right = length;
+ if (mode == kTrim || mode == kTrimEnd) {
+ while (right > left &&
+ IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
+ right--;
+ }
+ }
+
+ return isolate->factory()->NewSubString(string, left, right);
+}
+
+bool String::LooksValid() {
+ // TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
+ // basically the same logic as the way we access the heap in the first place.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
+ // RO_SPACE objects should always be valid.
+ if (chunk->owner()->identity() == RO_SPACE) return true;
+ if (chunk->heap() == nullptr) return false;
+ return chunk->heap()->Contains(*this);
+}
+
+namespace {
+
+bool AreDigits(const uint8_t* s, int from, int to) {
+ for (int i = from; i < to; i++) {
+ if (s[i] < '0' || s[i] > '9') return false;
+ }
+
+ return true;
+}
+
+int ParseDecimalInteger(const uint8_t* s, int from, int to) {
+ DCHECK_LT(to - from, 10); // Overflow is not possible.
+ DCHECK(from < to);
+ int d = s[from] - '0';
+
+ for (int i = from + 1; i < to; i++) {
+ d = 10 * d + (s[i] - '0');
+ }
+
+ return d;
+}
+
+} // namespace
+
+// static
+Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
+ // Flatten {subject} string first.
+ subject = String::Flatten(isolate, subject);
+
+ // Fast array index case.
+ uint32_t index;
+ if (subject->AsArrayIndex(&index)) {
+ return isolate->factory()->NewNumberFromUint(index);
+ }
+
+ // Fast case: short integer or some sorts of junk values.
+ if (subject->IsSeqOneByteString()) {
+ int len = subject->length();
+ if (len == 0) return handle(Smi::kZero, isolate);
+
+ DisallowHeapAllocation no_gc;
+ uint8_t const* data =
+ Handle<SeqOneByteString>::cast(subject)->GetChars(no_gc);
+ bool minus = (data[0] == '-');
+ int start_pos = (minus ? 1 : 0);
+
+ if (start_pos == len) {
+ return isolate->factory()->nan_value();
+ } else if (data[start_pos] > '9') {
+ // Fast check for a junk value. A valid string may start from a
+ // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
+ // or the 'I' character ('Infinity'). All of that have codes not greater
+ // than '9' except 'I' and &nbsp;.
+ if (data[start_pos] != 'I' && data[start_pos] != 0xA0) {
+ return isolate->factory()->nan_value();
+ }
+ } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
+ // The maximal/minimal smi has 10 digits. If the string has less digits
+ // we know it will fit into the smi-data type.
+ int d = ParseDecimalInteger(data, start_pos, len);
+ if (minus) {
+ if (d == 0) return isolate->factory()->minus_zero_value();
+ d = -d;
+ } else if (!subject->HasHashCode() && len <= String::kMaxArrayIndexSize &&
+ (len == 1 || data[0] != '0')) {
+ // String hash is not calculated yet but all the data are present.
+ // Update the hash field to speed up sequential convertions.
+ uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
+#ifdef DEBUG
+ subject->Hash(); // Force hash calculation.
+ DCHECK_EQ(static_cast<int>(subject->hash_field()),
+ static_cast<int>(hash));
+#endif
+ subject->set_hash_field(hash);
+ }
+ return handle(Smi::FromInt(d), isolate);
+ }
+ }
+
+ // Slower case.
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ return isolate->factory()->NewNumber(StringToDouble(isolate, subject, flags));
+}
+
+String::FlatContent String::GetFlatContent(
+ const DisallowHeapAllocation& no_gc) {
+ USE(no_gc);
+ int length = this->length();
+ StringShape shape(*this);
+ String string = *this;
+ int offset = 0;
+ if (shape.representation_tag() == kConsStringTag) {
+ ConsString cons = ConsString::cast(string);
+ if (cons->second()->length() != 0) {
+ return FlatContent();
+ }
+ string = cons->first();
+ shape = StringShape(string);
+ } else if (shape.representation_tag() == kSlicedStringTag) {
+ SlicedString slice = SlicedString::cast(string);
+ offset = slice->offset();
+ string = slice->parent();
+ shape = StringShape(string);
+ DCHECK(shape.representation_tag() != kConsStringTag &&
+ shape.representation_tag() != kSlicedStringTag);
+ }
+ if (shape.representation_tag() == kThinStringTag) {
+ ThinString thin = ThinString::cast(string);
+ string = thin->actual();
+ shape = StringShape(string);
+ DCHECK(!shape.IsCons());
+ DCHECK(!shape.IsSliced());
+ }
+ if (shape.encoding_tag() == kOneByteStringTag) {
+ const uint8_t* start;
+ if (shape.representation_tag() == kSeqStringTag) {
+ start = SeqOneByteString::cast(string)->GetChars(no_gc);
+ } else {
+ start = ExternalOneByteString::cast(string)->GetChars();
+ }
+ return FlatContent(start + offset, length);
+ } else {
+ DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
+ const uc16* start;
+ if (shape.representation_tag() == kSeqStringTag) {
+ start = SeqTwoByteString::cast(string)->GetChars(no_gc);
+ } else {
+ start = ExternalTwoByteString::cast(string)->GetChars();
+ }
+ return FlatContent(start + offset, length);
+ }
+}
+
+std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int offset, int length,
+ int* length_return) {
+ if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
+ return std::unique_ptr<char[]>();
+ }
+ // Negative length means the to the end of the string.
+ if (length < 0) length = kMaxInt - offset;
+
+ // Compute the size of the UTF-8 string. Start at the specified offset.
+ StringCharacterStream stream(*this, offset);
+ int character_position = offset;
+ int utf8_bytes = 0;
+ int last = unibrow::Utf16::kNoPreviousCharacter;
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
+ utf8_bytes += unibrow::Utf8::Length(character, last);
+ last = character;
+ }
+
+ if (length_return) {
+ *length_return = utf8_bytes;
+ }
+
+ char* result = NewArray<char>(utf8_bytes + 1);
+
+ // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
+ stream.Reset(*this, offset);
+ character_position = offset;
+ int utf8_byte_position = 0;
+ last = unibrow::Utf16::kNoPreviousCharacter;
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
+ if (allow_nulls == DISALLOW_NULLS && character == 0) {
+ character = ' ';
+ }
+ utf8_byte_position +=
+ unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
+ last = character;
+ }
+ result[utf8_byte_position] = 0;
+ return std::unique_ptr<char[]>(result);
+}
+
+std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int* length_return) {
+ return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
+}
+
+template <typename sinkchar>
+void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
+ DisallowHeapAllocation no_gc;
+ String source = src;
+ int from = f;
+ int to = t;
+ while (true) {
+ DCHECK(0 <= from && from <= to && to <= source->length());
+ switch (StringShape(source).full_representation_tag()) {
+ case kOneByteStringTag | kExternalStringTag: {
+ CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
+ to - from);
+ return;
+ }
+ case kTwoByteStringTag | kExternalStringTag: {
+ const uc16* data = ExternalTwoByteString::cast(source)->GetChars();
+ CopyChars(sink, data + from, to - from);
+ return;
+ }
+ case kOneByteStringTag | kSeqStringTag: {
+ CopyChars(sink, SeqOneByteString::cast(source)->GetChars(no_gc) + from,
+ to - from);
+ return;
+ }
+ case kTwoByteStringTag | kSeqStringTag: {
+ CopyChars(sink, SeqTwoByteString::cast(source)->GetChars(no_gc) + from,
+ to - from);
+ return;
+ }
+ case kOneByteStringTag | kConsStringTag:
+ case kTwoByteStringTag | kConsStringTag: {
+ ConsString cons_string = ConsString::cast(source);
+ String first = cons_string->first();
+ int boundary = first->length();
+ if (to - boundary >= boundary - from) {
+ // Right hand side is longer. Recurse over left.
+ if (from < boundary) {
+ WriteToFlat(first, sink, from, boundary);
+ if (from == 0 && cons_string->second() == first) {
+ CopyChars(sink + boundary, sink, boundary);
+ return;
+ }
+ sink += boundary - from;
+ from = 0;
+ } else {
+ from -= boundary;
+ }
+ to -= boundary;
+ source = cons_string->second();
+ } else {
+ // Left hand side is longer. Recurse over right.
+ if (to > boundary) {
+ String second = cons_string->second();
+ // When repeatedly appending to a string, we get a cons string that
+ // is unbalanced to the left, a list, essentially. We inline the
+ // common case of sequential one-byte right child.
+ if (to - boundary == 1) {
+ sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
+ } else if (second->IsSeqOneByteString()) {
+ CopyChars(sink + boundary - from,
+ SeqOneByteString::cast(second)->GetChars(no_gc),
+ to - boundary);
+ } else {
+ WriteToFlat(second, sink + boundary - from, 0, to - boundary);
+ }
+ to = boundary;
+ }
+ source = first;
+ }
+ break;
+ }
+ case kOneByteStringTag | kSlicedStringTag:
+ case kTwoByteStringTag | kSlicedStringTag: {
+ SlicedString slice = SlicedString::cast(source);
+ unsigned offset = slice->offset();
+ WriteToFlat(slice->parent(), sink, from + offset, to + offset);
+ return;
+ }
+ case kOneByteStringTag | kThinStringTag:
+ case kTwoByteStringTag | kThinStringTag:
+ source = ThinString::cast(source)->actual();
+ break;
+ }
+ }
+}
+
+template <typename SourceChar>
+static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
+ Vector<const SourceChar> src,
+ bool include_ending_line) {
+ const int src_len = src.length();
+ for (int i = 0; i < src_len - 1; i++) {
+ SourceChar current = src[i];
+ SourceChar next = src[i + 1];
+ if (IsLineTerminatorSequence(current, next)) line_ends->push_back(i);
+ }
+
+ if (src_len > 0 && IsLineTerminatorSequence(src[src_len - 1], 0)) {
+ line_ends->push_back(src_len - 1);
+ }
+ if (include_ending_line) {
+ // Include one character beyond the end of script. The rewriter uses that
+ // position for the implicit return statement.
+ line_ends->push_back(src_len);
+ }
+}
+
+Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
+ Handle<String> src,
+ bool include_ending_line) {
+ src = Flatten(isolate, src);
+ // Rough estimate of line count based on a roughly estimated average
+ // length of (unpacked) code.
+ int line_count_estimate = src->length() >> 4;
+ std::vector<int> line_ends;
+ line_ends.reserve(line_count_estimate);
+ {
+ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
+ // Dispatch on type of strings.
+ String::FlatContent content = src->GetFlatContent(no_allocation);
+ DCHECK(content.IsFlat());
+ if (content.IsOneByte()) {
+ CalculateLineEndsImpl(isolate, &line_ends, content.ToOneByteVector(),
+ include_ending_line);
+ } else {
+ CalculateLineEndsImpl(isolate, &line_ends, content.ToUC16Vector(),
+ include_ending_line);
+ }
+ }
+ int line_count = static_cast<int>(line_ends.size());
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
+ for (int i = 0; i < line_count; i++) {
+ array->set(i, Smi::FromInt(line_ends[i]));
+ }
+ return array;
+}
+
+bool String::SlowEquals(String other) {
+ DisallowHeapAllocation no_gc;
+ // Fast check: negative check with lengths.
+ int len = length();
+ if (len != other->length()) return false;
+ if (len == 0) return true;
+
+ // Fast check: if at least one ThinString is involved, dereference it/them
+ // and restart.
+ if (this->IsThinString() || other->IsThinString()) {
+ if (other->IsThinString()) other = ThinString::cast(other)->actual();
+ if (this->IsThinString()) {
+ return ThinString::cast(*this)->actual()->Equals(other);
+ } else {
+ return this->Equals(other);
+ }
+ }
+
+ // Fast check: if hash code is computed for both strings
+ // a fast negative check can be performed.
+ if (HasHashCode() && other->HasHashCode()) {
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ if (Hash() != other->Hash()) {
+ bool found_difference = false;
+ for (int i = 0; i < len; i++) {
+ if (Get(i) != other->Get(i)) {
+ found_difference = true;
+ break;
+ }
+ }
+ DCHECK(found_difference);
+ }
+ }
+#endif
+ if (Hash() != other->Hash()) return false;
+ }
+
+ // We know the strings are both non-empty. Compare the first chars
+ // before we try to flatten the strings.
+ if (this->Get(0) != other->Get(0)) return false;
+
+ if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
+ const uint8_t* str1 = SeqOneByteString::cast(*this)->GetChars(no_gc);
+ const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(no_gc);
+ return CompareRawStringContents(str1, str2, len);
+ }
+
+ StringComparator comparator;
+ return comparator.Equals(*this, other);
+}
+
+bool String::SlowEquals(Isolate* isolate, Handle<String> one,
+ Handle<String> two) {
+ // Fast check: negative check with lengths.
+ int one_length = one->length();
+ if (one_length != two->length()) return false;
+ if (one_length == 0) return true;
+
+ // Fast check: if at least one ThinString is involved, dereference it/them
+ // and restart.
+ if (one->IsThinString() || two->IsThinString()) {
+ if (one->IsThinString())
+ one = handle(ThinString::cast(*one)->actual(), isolate);
+ if (two->IsThinString())
+ two = handle(ThinString::cast(*two)->actual(), isolate);
+ return String::Equals(isolate, one, two);
+ }
+
+ // Fast check: if hash code is computed for both strings
+ // a fast negative check can be performed.
+ if (one->HasHashCode() && two->HasHashCode()) {
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ if (one->Hash() != two->Hash()) {
+ bool found_difference = false;
+ for (int i = 0; i < one_length; i++) {
+ if (one->Get(i) != two->Get(i)) {
+ found_difference = true;
+ break;
+ }
+ }
+ DCHECK(found_difference);
+ }
+ }
+#endif
+ if (one->Hash() != two->Hash()) return false;
+ }
+
+ // We know the strings are both non-empty. Compare the first chars
+ // before we try to flatten the strings.
+ if (one->Get(0) != two->Get(0)) return false;
+
+ one = String::Flatten(isolate, one);
+ two = String::Flatten(isolate, two);
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat1 = one->GetFlatContent(no_gc);
+ String::FlatContent flat2 = two->GetFlatContent(no_gc);
+
+ if (flat1.IsOneByte() && flat2.IsOneByte()) {
+ return CompareRawStringContents(flat1.ToOneByteVector().start(),
+ flat2.ToOneByteVector().start(),
+ one_length);
+ } else {
+ for (int i = 0; i < one_length; i++) {
+ if (flat1.Get(i) != flat2.Get(i)) return false;
+ }
+ return true;
+ }
+}
+
+// static
+ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
+ Handle<String> y) {
+ // A few fast case tests before we flatten.
+ if (x.is_identical_to(y)) {
+ return ComparisonResult::kEqual;
+ } else if (y->length() == 0) {
+ return x->length() == 0 ? ComparisonResult::kEqual
+ : ComparisonResult::kGreaterThan;
+ } else if (x->length() == 0) {
+ return ComparisonResult::kLessThan;
+ }
+
+ int const d = x->Get(0) - y->Get(0);
+ if (d < 0) {
+ return ComparisonResult::kLessThan;
+ } else if (d > 0) {
+ return ComparisonResult::kGreaterThan;
+ }
+
+ // Slow case.
+ x = String::Flatten(isolate, x);
+ y = String::Flatten(isolate, y);
+
+ DisallowHeapAllocation no_gc;
+ ComparisonResult result = ComparisonResult::kEqual;
+ int prefix_length = x->length();
+ if (y->length() < prefix_length) {
+ prefix_length = y->length();
+ result = ComparisonResult::kGreaterThan;
+ } else if (y->length() > prefix_length) {
+ result = ComparisonResult::kLessThan;
+ }
+ int r;
+ String::FlatContent x_content = x->GetFlatContent(no_gc);
+ String::FlatContent y_content = y->GetFlatContent(no_gc);
+ if (x_content.IsOneByte()) {
+ Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ } else {
+ Vector<const uc16> x_chars = x_content.ToUC16Vector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ }
+ if (r < 0) {
+ result = ComparisonResult::kLessThan;
+ } else if (r > 0) {
+ result = ComparisonResult::kGreaterThan;
+ }
+ return result;
+}
+
+Object String::IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
+ if (receiver->IsNullOrUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "String.prototype.indexOf")));
+ }
+ Handle<String> receiver_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+ Object::ToString(isolate, receiver));
+
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+
+ uint32_t index = receiver_string->ToValidIndex(*position);
+ return Smi::FromInt(
+ String::IndexOf(isolate, receiver_string, search_string, index));
+}
+
+namespace {
+
+template <typename T>
+int SearchString(Isolate* isolate, String::FlatContent receiver_content,
+ Vector<T> pat_vector, int start_index) {
+ if (receiver_content.IsOneByte()) {
+ return SearchString(isolate, receiver_content.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, receiver_content.ToUC16Vector(), pat_vector,
+ start_index);
+}
+
+} // namespace
+
+int String::IndexOf(Isolate* isolate, Handle<String> receiver,
+ Handle<String> search, int start_index) {
+ DCHECK_LE(0, start_index);
+ DCHECK(start_index <= receiver->length());
+
+ uint32_t search_length = search->length();
+ if (search_length == 0) return start_index;
+
+ uint32_t receiver_length = receiver->length();
+ if (start_index + search_length > receiver_length) return -1;
+
+ receiver = String::Flatten(isolate, receiver);
+ search = String::Flatten(isolate, search);
+
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+ // Extract flattened substrings of cons strings before getting encoding.
+ String::FlatContent receiver_content = receiver->GetFlatContent(no_gc);
+ String::FlatContent search_content = search->GetFlatContent(no_gc);
+
+ // dispatch on type of strings
+ if (search_content.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+ return SearchString<const uint8_t>(isolate, receiver_content, pat_vector,
+ start_index);
+ }
+ Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+ return SearchString<const uc16>(isolate, receiver_content, pat_vector,
+ start_index);
+}
+
+MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
+ Handle<String> replacement,
+ int start_index) {
+ DCHECK_GE(start_index, 0);
+
+ Factory* factory = isolate->factory();
+
+ const int replacement_length = replacement->length();
+ const int captures_length = match->CaptureCount();
+
+ replacement = String::Flatten(isolate, replacement);
+
+ Handle<String> dollar_string =
+ factory->LookupSingleCharacterStringFromCode('$');
+ int next_dollar_ix =
+ String::IndexOf(isolate, replacement, dollar_string, start_index);
+ if (next_dollar_ix < 0) {
+ return replacement;
+ }
+
+ IncrementalStringBuilder builder(isolate);
+
+ if (next_dollar_ix > 0) {
+ builder.AppendString(factory->NewSubString(replacement, 0, next_dollar_ix));
+ }
+
+ while (true) {
+ const int peek_ix = next_dollar_ix + 1;
+ if (peek_ix >= replacement_length) {
+ builder.AppendCharacter('$');
+ return builder.Finish();
+ }
+
+ int continue_from_ix = -1;
+ const uint16_t peek = replacement->Get(peek_ix);
+ switch (peek) {
+ case '$': // $$
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix + 1;
+ break;
+ case '&': // $& - match
+ builder.AppendString(match->GetMatch());
+ continue_from_ix = peek_ix + 1;
+ break;
+ case '`': // $` - prefix
+ builder.AppendString(match->GetPrefix());
+ continue_from_ix = peek_ix + 1;
+ break;
+ case '\'': // $' - suffix
+ builder.AppendString(match->GetSuffix());
+ continue_from_ix = peek_ix + 1;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ int scaled_index = (peek - '0');
+ int advance = 1;
+
+ if (peek_ix + 1 < replacement_length) {
+ const uint16_t next_peek = replacement->Get(peek_ix + 1);
+ if (next_peek >= '0' && next_peek <= '9') {
+ const int new_scaled_index = scaled_index * 10 + (next_peek - '0');
+ if (new_scaled_index < captures_length) {
+ scaled_index = new_scaled_index;
+ advance = 2;
+ }
+ }
+ }
+
+ if (scaled_index == 0 || scaled_index >= captures_length) {
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix;
+ break;
+ }
+
+ bool capture_exists;
+ Handle<String> capture;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, capture, match->GetCapture(scaled_index, &capture_exists),
+ String);
+ if (capture_exists) builder.AppendString(capture);
+ continue_from_ix = peek_ix + advance;
+ break;
+ }
+ case '<': { // $<name> - named capture
+ typedef String::Match::CaptureState CaptureState;
+
+ if (!match->HasNamedCaptures()) {
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix;
+ break;
+ }
+
+ Handle<String> bracket_string =
+ factory->LookupSingleCharacterStringFromCode('>');
+ const int closing_bracket_ix =
+ String::IndexOf(isolate, replacement, bracket_string, peek_ix + 1);
+
+ if (closing_bracket_ix == -1) {
+ // No closing bracket was found, treat '$<' as a string literal.
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix;
+ break;
+ }
+
+ Handle<String> capture_name =
+ factory->NewSubString(replacement, peek_ix + 1, closing_bracket_ix);
+ Handle<String> capture;
+ CaptureState capture_state;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, capture,
+ match->GetNamedCapture(capture_name, &capture_state), String);
+
+ switch (capture_state) {
+ case CaptureState::INVALID:
+ case CaptureState::UNMATCHED:
+ break;
+ case CaptureState::MATCHED:
+ builder.AppendString(capture);
+ break;
+ }
+
+ continue_from_ix = closing_bracket_ix + 1;
+ break;
+ }
+ default:
+ builder.AppendCharacter('$');
+ continue_from_ix = peek_ix;
+ break;
+ }
+
+ // Go the the next $ in the replacement.
+ // TODO(jgruber): Single-char lookups could be much more efficient.
+ DCHECK_NE(continue_from_ix, -1);
+ next_dollar_ix =
+ String::IndexOf(isolate, replacement, dollar_string, continue_from_ix);
+
+ // Return if there are no more $ characters in the replacement. If we
+ // haven't reached the end, we need to append the suffix.
+ if (next_dollar_ix < 0) {
+ if (continue_from_ix < replacement_length) {
+ builder.AppendString(factory->NewSubString(
+ replacement, continue_from_ix, replacement_length));
+ }
+ return builder.Finish();
+ }
+
+ // Append substring between the previous and the next $ character.
+ if (next_dollar_ix > continue_from_ix) {
+ builder.AppendString(
+ factory->NewSubString(replacement, continue_from_ix, next_dollar_ix));
+ }
+ }
+
+ UNREACHABLE();
+}
+
+namespace { // for String.Prototype.lastIndexOf
+
+template <typename schar, typename pchar>
+int StringMatchBackwards(Vector<const schar> subject,
+ Vector<const pchar> pattern, int idx) {
+ int pattern_length = pattern.length();
+ DCHECK_GE(pattern_length, 1);
+ DCHECK(idx + pattern_length <= subject.length());
+
+ if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
+ for (int i = 0; i < pattern_length; i++) {
+ uc16 c = pattern[i];
+ if (c > String::kMaxOneByteCharCode) {
+ return -1;
+ }
+ }
+ }
+
+ pchar pattern_first_char = pattern[0];
+ for (int i = idx; i >= 0; i--) {
+ if (subject[i] != pattern_first_char) continue;
+ int j = 1;
+ while (j < pattern_length) {
+ if (pattern[j] != subject[i + j]) {
+ break;
+ }
+ j++;
+ }
+ if (j == pattern_length) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+} // namespace
+
+Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
+ if (receiver->IsNullOrUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "String.prototype.lastIndexOf")));
+ }
+ Handle<String> receiver_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+ Object::ToString(isolate, receiver));
+
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToNumber(isolate, position));
+
+ uint32_t start_index;
+
+ if (position->IsNaN()) {
+ start_index = receiver_string->length();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+ start_index = receiver_string->ToValidIndex(*position);
+ }
+
+ uint32_t pattern_length = search_string->length();
+ uint32_t receiver_length = receiver_string->length();
+
+ if (start_index + pattern_length > receiver_length) {
+ start_index = receiver_length - pattern_length;
+ }
+
+ if (pattern_length == 0) {
+ return Smi::FromInt(start_index);
+ }
+
+ receiver_string = String::Flatten(isolate, receiver_string);
+ search_string = String::Flatten(isolate, search_string);
+
+ int last_index = -1;
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+
+ String::FlatContent receiver_content = receiver_string->GetFlatContent(no_gc);
+ String::FlatContent search_content = search_string->GetFlatContent(no_gc);
+
+ if (search_content.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+ if (receiver_content.IsOneByte()) {
+ last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+ pat_vector, start_index);
+ } else {
+ last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+ pat_vector, start_index);
+ }
+ } else {
+ Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+ if (receiver_content.IsOneByte()) {
+ last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+ pat_vector, start_index);
+ } else {
+ last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+ pat_vector, start_index);
+ }
+ }
+ return Smi::FromInt(last_index);
+}
+
+bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
+ int slen = length();
+ // Can't check exact length equality, but we can check bounds.
+ int str_len = str.length();
+ if (!allow_prefix_match &&
+ (str_len < slen ||
+ str_len > slen * static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
+ return false;
+ }
+
+ int i = 0;
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
+ while (i < slen && !it.Done()) {
+ if (Get(i++) != *it) return false;
+ ++it;
+ }
+
+ return (allow_prefix_match || i == slen) && it.Done();
+}
+
+template <>
+bool String::IsEqualTo(Vector<const uint8_t> str) {
+ return IsOneByteEqualTo(str);
+}
+
+template <>
+bool String::IsEqualTo(Vector<const uc16> str) {
+ return IsTwoByteEqualTo(str);
+}
+
+bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
+ int slen = length();
+ if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
+ FlatContent content = GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ 0;
+ }
+ return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+}
+
+bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
+ int slen = length();
+ if (str.length() != slen) return false;
+ DisallowHeapAllocation no_gc;
+ FlatContent content = GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ 0;
+ }
+ return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+}
+
+uint32_t String::ComputeAndSetHash() {
+ DisallowHeapAllocation no_gc;
+ // Should only be called if hash code has not yet been computed.
+ DCHECK(!HasHashCode());
+
+ // Store the hash code in the object.
+ uint32_t field =
+ IteratingStringHasher::Hash(*this, HashSeed(GetReadOnlyRoots()));
+ set_hash_field(field);
+
+ // Check the hash code is there.
+ DCHECK(HasHashCode());
+ uint32_t result = field >> kHashShift;
+ DCHECK_NE(result, 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+}
+
+bool String::ComputeArrayIndex(uint32_t* index) {
+ int length = this->length();
+ if (length == 0 || length > kMaxArrayIndexSize) return false;
+ StringCharacterStream stream(*this);
+ return StringToArrayIndex(&stream, index);
+}
+
+bool String::SlowAsArrayIndex(uint32_t* index) {
+ DisallowHeapAllocation no_gc;
+ if (length() <= kMaxCachedArrayIndexLength) {
+ Hash(); // force computation of hash code
+ uint32_t field = hash_field();
+ if ((field & kIsNotArrayIndexMask) != 0) return false;
+ // Isolate the array index form the full hash field.
+ *index = ArrayIndexValueBits::decode(field);
+ return true;
+ } else {
+ return ComputeArrayIndex(index);
+ }
+}
+
+void String::PrintOn(FILE* file) {
+ int length = this->length();
+ for (int i = 0; i < length; i++) {
+ PrintF(file, "%c", Get(i));
+ }
+}
+
+Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
+ if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
+
+ int new_size, old_size;
+ int old_length = string->length();
+ if (old_length <= new_length) return string;
+
+ if (string->IsSeqOneByteString()) {
+ old_size = SeqOneByteString::SizeFor(old_length);
+ new_size = SeqOneByteString::SizeFor(new_length);
+ } else {
+ DCHECK(string->IsSeqTwoByteString());
+ old_size = SeqTwoByteString::SizeFor(old_length);
+ new_size = SeqTwoByteString::SizeFor(new_length);
+ }
+
+ int delta = old_size - new_size;
+
+ Address start_of_string = string->address();
+ DCHECK(IsAligned(start_of_string, kObjectAlignment));
+ DCHECK(IsAligned(start_of_string + new_size, kObjectAlignment));
+
+ Heap* heap = Heap::FromWritableHeapObject(*string);
+ // Sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ heap->CreateFillerObjectAt(start_of_string + new_size, delta,
+ ClearRecordedSlots::kNo);
+ // We are storing the new length using release store after creating a filler
+ // for the left-over space to avoid races with the sweeper thread.
+ string->synchronized_set_length(new_length);
+
+ return string;
+}
+
+void SeqOneByteString::clear_padding() {
+ int data_size = SeqString::kHeaderSize + length() * kOneByteSize;
+ memset(reinterpret_cast<void*>(address() + data_size), 0,
+ SizeFor(length()) - data_size);
+}
+
+void SeqTwoByteString::clear_padding() {
+ int data_size = SeqString::kHeaderSize + length() * kUC16Size;
+ memset(reinterpret_cast<void*>(address() + data_size), 0,
+ SizeFor(length()) - data_size);
+}
+
+uint16_t ConsString::ConsStringGet(int index) {
+ DCHECK(index >= 0 && index < this->length());
+
+ // Check for a flattened cons string
+ if (second()->length() == 0) {
+ String left = first();
+ return left->Get(index);
+ }
+
+ String string = String::cast(*this);
+
+ while (true) {
+ if (StringShape(string).IsCons()) {
+ ConsString cons_string = ConsString::cast(string);
+ String left = cons_string->first();
+ if (left->length() > index) {
+ string = left;
+ } else {
+ index -= left->length();
+ string = cons_string->second();
+ }
+ } else {
+ return string->Get(index);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
+
+uint16_t SlicedString::SlicedStringGet(int index) {
+ return parent()->Get(offset() + index);
+}
+
+int ExternalString::ExternalPayloadSize() const {
+ int length_multiplier = IsTwoByteRepresentation() ? i::kShortSize : kCharSize;
+ return length() * length_multiplier;
+}
+
+FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
+ : Relocatable(isolate), str_(str.location()), length_(str->length()) {
+ PostGarbageCollection();
+}
+
+FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
+ : Relocatable(isolate),
+ str_(nullptr),
+ is_one_byte_(true),
+ length_(input.length()),
+ start_(input.start()) {}
+
+void FlatStringReader::PostGarbageCollection() {
+ if (str_ == nullptr) return;
+ Handle<String> str(str_);
+ DCHECK(str->IsFlat());
+ DisallowHeapAllocation no_gc;
+ // This does not actually prevent the vector from being relocated later.
+ String::FlatContent content = str->GetFlatContent(no_gc);
+ DCHECK(content.IsFlat());
+ is_one_byte_ = content.IsOneByte();
+ if (is_one_byte_) {
+ start_ = content.ToOneByteVector().start();
+ } else {
+ start_ = content.ToUC16Vector().start();
+ }
+}
+
+void ConsStringIterator::Initialize(ConsString cons_string, int offset) {
+ DCHECK(!cons_string.is_null());
+ root_ = cons_string;
+ consumed_ = offset;
+ // Force stack blown condition to trigger restart.
+ depth_ = 1;
+ maximum_depth_ = kStackSize + depth_;
+ DCHECK(StackBlown());
+}
+
+String ConsStringIterator::Continue(int* offset_out) {
+ DCHECK_NE(depth_, 0);
+ DCHECK_EQ(0, *offset_out);
+ bool blew_stack = StackBlown();
+ String string;
+ // Get the next leaf if there is one.
+ if (!blew_stack) string = NextLeaf(&blew_stack);
+ // Restart search from root.
+ if (blew_stack) {
+ DCHECK(string.is_null());
+ string = Search(offset_out);
+ }
+ // Ensure future calls return null immediately.
+ if (string.is_null()) Reset(ConsString());
+ return string;
+}
+
+String ConsStringIterator::Search(int* offset_out) {
+ ConsString cons_string = root_;
+ // Reset the stack, pushing the root string.
+ depth_ = 1;
+ maximum_depth_ = 1;
+ frames_[0] = cons_string;
+ const int consumed = consumed_;
+ int offset = 0;
+ while (true) {
+ // Loop until the string is found which contains the target offset.
+ String string = cons_string->first();
+ int length = string->length();
+ int32_t type;
+ if (consumed < offset + length) {
+ // Target offset is in the left branch.
+ // Keep going if we're still in a ConString.
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
+ continue;
+ }
+ // Tell the stack we're done descending.
+ AdjustMaximumDepth();
+ } else {
+ // Descend right.
+ // Update progress through the string.
+ offset += length;
+ // Keep going if we're still in a ConString.
+ string = cons_string->second();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushRight(cons_string);
+ continue;
+ }
+ // Need this to be updated for the current string.
+ length = string->length();
+ // Account for the possibility of an empty right leaf.
+ // This happens only if we have asked for an offset outside the string.
+ if (length == 0) {
+ // Reset so future operations will return null immediately.
+ Reset(ConsString());
+ return String();
+ }
+ // Tell the stack we're done descending.
+ AdjustMaximumDepth();
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ }
+ DCHECK_NE(length, 0);
+ // Adjust return values and exit.
+ consumed_ = offset + length;
+ *offset_out = consumed - offset;
+ return string;
+ }
+ UNREACHABLE();
+}
+
+String ConsStringIterator::NextLeaf(bool* blew_stack) {
+ while (true) {
+ // Tree traversal complete.
+ if (depth_ == 0) {
+ *blew_stack = false;
+ return String();
+ }
+ // We've lost track of higher nodes.
+ if (StackBlown()) {
+ *blew_stack = true;
+ return String();
+ }
+ // Go right.
+ ConsString cons_string = frames_[OffsetForDepth(depth_ - 1)];
+ String string = cons_string->second();
+ int32_t type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ int length = string->length();
+ // Could be a flattened ConsString.
+ if (length == 0) continue;
+ consumed_ += length;
+ return string;
+ }
+ cons_string = ConsString::cast(string);
+ PushRight(cons_string);
+ // Need to traverse all the way left.
+ while (true) {
+ // Continue left.
+ string = cons_string->first();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ AdjustMaximumDepth();
+ int length = string->length();
+ if (length == 0) break; // Skip empty left-hand sides of ConsStrings.
+ consumed_ += length;
+ return string;
+ }
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
+ }
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 3a1fc21ac4..e0b34f7a0e 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -52,7 +52,6 @@ class StringShape {
inline StringRepresentationTag representation_tag();
inline uint32_t encoding_tag();
inline uint32_t full_representation_tag();
- inline bool HasOnlyOneByteChars();
#ifdef DEBUG
inline uint32_t type() { return type_; }
inline void invalidate() { valid_ = false; }
@@ -168,10 +167,6 @@ class String : public Name {
// Requires: string.IsFlat()
static inline bool IsOneByteRepresentationUnderneath(String string);
- // NOTE: this should be considered only a hint. False negatives are
- // possible.
- inline bool HasOnlyOneByteChars();
-
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
@@ -450,7 +445,7 @@ class String : public Name {
V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
// Compute and set the hash code.
- uint32_t ComputeAndSetHash(Isolate* isolate);
+ uint32_t ComputeAndSetHash();
OBJECT_CONSTRUCTORS(String, Name);
};
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index 8a5b53efbb..9502698058 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -8,7 +8,9 @@
#include "src/objects/struct.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects-inl.h"
#include "src/objects/oddball.h"
+#include "src/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,15 +25,19 @@ Tuple2::Tuple2(Address ptr) : Struct(ptr) {}
Tuple3::Tuple3(Address ptr) : Tuple2(ptr) {}
OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
+OBJECT_CONSTRUCTORS_IMPL(ClassPositions, Struct)
+
CAST_ACCESSOR(AccessorPair)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
+CAST_ACCESSOR(ClassPositions)
+
void Struct::InitializeBody(int object_size) {
Object value = GetReadOnlyRoots().undefined_value();
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
+ for (int offset = kHeaderSize; offset < object_size; offset += kTaggedSize) {
+ WRITE_FIELD(*this, offset, value);
}
}
@@ -42,6 +48,9 @@ ACCESSORS(Tuple3, value3, Object, kValue3Offset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+SMI_ACCESSORS(ClassPositions, start, kStartOffset)
+SMI_ACCESSORS(ClassPositions, end, kEndOffset)
+
Object AccessorPair::get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
}
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index a790ac2320..f702022ebf 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/heap-object.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +24,7 @@ class Struct : public HeapObject {
DECL_CAST(Struct)
void BriefPrintDetails(std::ostream& os);
- OBJECT_CONSTRUCTORS(Struct, HeapObject)
+ OBJECT_CONSTRUCTORS(Struct, HeapObject);
};
class Tuple2 : public Struct {
@@ -38,15 +39,8 @@ class Tuple2 : public Struct {
DECL_VERIFIER(Tuple2)
void BriefPrintDetails(std::ostream& os);
-// Layout description.
-#define TUPLE2_FIELDS(V) \
- V(kValue1Offset, kTaggedSize) \
- V(kValue2Offset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TUPLE2_FIELDS)
-#undef TUPLE2_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_TUPLE2_FIELDS)
OBJECT_CONSTRUCTORS(Tuple2, Struct);
};
@@ -62,14 +56,7 @@ class Tuple3 : public Tuple2 {
DECL_VERIFIER(Tuple3)
void BriefPrintDetails(std::ostream& os);
-// Layout description.
-#define TUPLE3_FIELDS(V) \
- V(kValue3Offset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Tuple2::kSize, TUPLE3_FIELDS)
-#undef TUPLE3_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Tuple2::kSize, TORQUE_GENERATED_TUPLE3_FIELDS)
OBJECT_CONSTRUCTORS(Tuple3, Tuple2);
};
@@ -119,6 +106,24 @@ class AccessorPair : public Struct {
OBJECT_CONSTRUCTORS(AccessorPair, Struct);
};
+class ClassPositions : public Struct {
+ public:
+ DECL_INT_ACCESSORS(start)
+ DECL_INT_ACCESSORS(end)
+
+ DECL_CAST(ClassPositions)
+
+ // Dispatched behavior.
+ DECL_PRINTER(ClassPositions)
+ DECL_VERIFIER(ClassPositions)
+ void BriefPrintDetails(std::ostream& os);
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_CLASS_POSITIONS_FIELDS)
+
+ OBJECT_CONSTRUCTORS(ClassPositions, Struct);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
new file mode 100644
index 0000000000..cc6c096265
--- /dev/null
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TEMPLATE_OBJECTS_INL_H_
+#define V8_OBJECTS_TEMPLATE_OBJECTS_INL_H_
+
+#include "src/objects/template-objects.h"
+
+#include "src/objects/js-array-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject, Tuple3)
+
+CAST_ACCESSOR(TemplateObjectDescription)
+CAST_ACCESSOR(CachedTemplateObject)
+
+ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
+ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
+ kCookedStringsOffset)
+
+SMI_ACCESSORS(CachedTemplateObject, slot_id, kSlotIdOffset)
+ACCESSORS(CachedTemplateObject, template_object, JSArray, kTemplateObjectOffset)
+ACCESSORS(CachedTemplateObject, next, HeapObject, kNextOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TEMPLATE_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 1218ba5a7d..de9be911e2 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -4,17 +4,41 @@
#include "src/objects/template-objects.h"
+#include "src/base/functional.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
// static
-Handle<JSArray> TemplateObjectDescription::CreateTemplateObject(
- Isolate* isolate, Handle<TemplateObjectDescription> description) {
+Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
+ Isolate* isolate, Handle<Context> native_context,
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared_info, int slot_id) {
+ DCHECK(native_context->IsNativeContext());
+
+ // Check the template weakmap to see if the template object already exists.
+ Handle<EphemeronHashTable> template_weakmap =
+ native_context->template_weakmap()->IsUndefined(isolate)
+ ? EphemeronHashTable::New(isolate, 0)
+ : handle(EphemeronHashTable::cast(native_context->template_weakmap()),
+ isolate);
+
+ uint32_t hash = shared_info->Hash();
+ Object maybe_cached_template = template_weakmap->Lookup(shared_info, hash);
+ while (!maybe_cached_template->IsTheHole()) {
+ CachedTemplateObject cached_template =
+ CachedTemplateObject::cast(maybe_cached_template);
+ if (cached_template->slot_id() == slot_id)
+ return handle(cached_template->template_object(), isolate);
+
+ maybe_cached_template = cached_template->next();
+ }
+
// Create the raw object from the {raw_strings}.
Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
@@ -36,15 +60,37 @@ Handle<JSArray> TemplateObjectDescription::CreateTemplateObject(
raw_desc.set_writable(false);
JSArray::DefineOwnProperty(isolate, template_object,
isolate->factory()->raw_string(), &raw_desc,
- kThrowOnError)
+ Just(kThrowOnError))
.ToChecked();
// Freeze the {template_object} as well.
JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
.ToChecked();
+ // Insert the template object into the template weakmap.
+ Handle<HeapObject> previous_cached_templates = handle(
+ HeapObject::cast(template_weakmap->Lookup(shared_info, hash)), isolate);
+ Handle<CachedTemplateObject> cached_template = CachedTemplateObject::New(
+ isolate, slot_id, template_object, previous_cached_templates);
+ template_weakmap = EphemeronHashTable::Put(
+ isolate, template_weakmap, shared_info, cached_template, hash);
+ native_context->set_template_weakmap(*template_weakmap);
+
return template_object;
}
+Handle<CachedTemplateObject> CachedTemplateObject::New(
+ Isolate* isolate, int slot_id, Handle<JSArray> template_object,
+ Handle<HeapObject> next) {
+ DCHECK(next->IsCachedTemplateObject() || next->IsTheHole());
+ Factory* factory = isolate->factory();
+ Handle<CachedTemplateObject> result = Handle<CachedTemplateObject>::cast(
+ factory->NewStruct(TUPLE3_TYPE, TENURED));
+ result->set_slot_id(slot_id);
+ result->set_template_object(*template_object);
+ result->set_next(*next);
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index e24deabd8d..e99c8530e6 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -5,8 +5,7 @@
#ifndef V8_OBJECTS_TEMPLATE_OBJECTS_H_
#define V8_OBJECTS_TEMPLATE_OBJECTS_H_
-#include "src/objects.h"
-#include "src/objects/hash-table.h"
+#include "src/objects/fixed-array.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -15,17 +14,40 @@
namespace v8 {
namespace internal {
-// TemplateObjectDescription is a triple of hash, raw strings and cooked
-// strings for tagged template literals. Used to communicate with the runtime
-// for template object creation within the {Runtime_CreateTemplateObject}
-// method.
+// CachedTemplateObject is a tuple used to cache a TemplateObject that has been
+// created. All the CachedTemplateObject's for a given SharedFunctionInfo form a
+// linked list via the next fields.
+class CachedTemplateObject final : public Tuple3 {
+ public:
+ DECL_INT_ACCESSORS(slot_id)
+ DECL_ACCESSORS(template_object, JSArray)
+ DECL_ACCESSORS(next, HeapObject)
+
+ static Handle<CachedTemplateObject> New(Isolate* isolate, int slot_id,
+ Handle<JSArray> template_object,
+ Handle<HeapObject> next);
+
+ DECL_CAST(CachedTemplateObject)
+
+ static constexpr int kSlotIdOffset = kValue1Offset;
+ static constexpr int kTemplateObjectOffset = kValue2Offset;
+ static constexpr int kNextOffset = kValue3Offset;
+
+ OBJECT_CONSTRUCTORS(CachedTemplateObject, Tuple3);
+};
+
+// TemplateObjectDescription is a tuple of raw strings and cooked strings for
+// tagged template literals. Used to communicate with the runtime for template
+// object creation within the {Runtime_GetTemplateObject} method.
class TemplateObjectDescription final : public Tuple2 {
public:
DECL_ACCESSORS(raw_strings, FixedArray)
DECL_ACCESSORS(cooked_strings, FixedArray)
- static Handle<JSArray> CreateTemplateObject(
- Isolate* isolate, Handle<TemplateObjectDescription> description);
+ static Handle<JSArray> GetTemplateObject(
+ Isolate* isolate, Handle<Context> native_context,
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared_info, int slot_id);
DECL_CAST(TemplateObjectDescription)
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index c42353b249..90b1f05c6c 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -7,7 +7,8 @@
#include "src/objects/templates.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/oddball.h"
#include "src/objects/shared-function-info-inl.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index de75f5de80..6348120a25 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -224,13 +224,6 @@ class FunctionTemplateInfo : public TemplateInfo {
static MaybeHandle<Name> TryGetCachedPropertyName(Isolate* isolate,
Handle<Object> getter);
- private:
- static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
- Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
-
- static FunctionTemplateRareData AllocateFunctionTemplateRareData(
- Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
-
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
static const int kUndetectableBit = 1;
@@ -240,6 +233,13 @@ class FunctionTemplateInfo : public TemplateInfo {
static const int kDoNotCacheBit = 5;
static const int kAcceptAnyReceiver = 6;
+ private:
+ static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
+
+ static FunctionTemplateRareData AllocateFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
+
OBJECT_CONSTRUCTORS(FunctionTemplateInfo, TemplateInfo);
};
@@ -277,7 +277,7 @@ class ObjectTemplateInfo : public TemplateInfo {
class EmbedderFieldCount
: public BitField<int, IsImmutablePrototype::kNext, 29> {};
- OBJECT_CONSTRUCTORS(ObjectTemplateInfo, TemplateInfo)
+ OBJECT_CONSTRUCTORS(ObjectTemplateInfo, TemplateInfo);
};
} // namespace internal
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index 1ec908f4b0..ccf6919213 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -203,5 +203,15 @@ void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
}
+OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
+ Handle<SharedFunctionInfo> inlined_shared_info,
+ Handle<BytecodeArray> inlined_bytecode, SourcePosition pos)
+ : shared_info(inlined_shared_info), bytecode_array(inlined_bytecode) {
+ DCHECK_EQ(shared_info->GetBytecodeArray(), *bytecode_array);
+ position.position = pos;
+ // initialized when generating the deoptimization literals
+ position.inlined_function_id = DeoptimizationData::kNotInlinedIndex;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index 69376bc72c..720eb191e9 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -232,18 +232,12 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
struct InlinedFunctionHolder {
Handle<SharedFunctionInfo> shared_info;
- Handle<BytecodeArray> bytecode_array;
-
+ Handle<BytecodeArray> bytecode_array; // Explicit to prevent flushing.
InliningPosition position;
InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
Handle<BytecodeArray> inlined_bytecode,
- SourcePosition pos)
- : shared_info(inlined_shared_info), bytecode_array(inlined_bytecode) {
- position.position = pos;
- // initialized when generating the deoptimization literals
- position.inlined_function_id = DeoptimizationData::kNotInlinedIndex;
- }
+ SourcePosition pos);
void RegisterInlinedFunctionId(size_t inlined_function_id) {
position.inlined_function_id = static_cast<int>(inlined_function_id);
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 2dcd7892d8..1f2d53c239 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -7,6 +7,7 @@
#include "src/objects/string.h"
#if V8_OS_WIN
+#include <windows.h>
#if _MSC_VER < 1900
#define snprintf sprintf_s
#endif
@@ -20,6 +21,48 @@
namespace v8 {
namespace internal {
+DbgStreamBuf::DbgStreamBuf() { setp(data_, data_ + sizeof(data_)); }
+
+DbgStreamBuf::~DbgStreamBuf() { sync(); }
+
+int DbgStreamBuf::overflow(int c) {
+#if V8_OS_WIN
+ if (!IsDebuggerPresent()) {
+ return 0;
+ }
+
+ sync();
+
+ if (c != EOF) {
+ if (pbase() == epptr()) {
+ auto as_char = static_cast<char>(c);
+ OutputDebugStringA(&as_char);
+ } else {
+ sputc(static_cast<char>(c));
+ }
+ }
+#endif
+ return 0;
+}
+
+int DbgStreamBuf::sync() {
+#if V8_OS_WIN
+ if (!IsDebuggerPresent()) {
+ return 0;
+ }
+
+ if (pbase() != pptr()) {
+ OutputDebugStringA(std::string(pbase(), static_cast<std::string::size_type>(
+ pptr() - pbase()))
+ .c_str());
+ setp(pbase(), epptr());
+ }
+#endif
+ return 0;
+}
+
+DbgStdoutStream::DbgStdoutStream() : std::ostream(&streambuf_) {}
+
OFStreamBase::OFStreamBase(FILE* f) : f_(f) {}
int OFStreamBase::sync() {
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 189f5384b9..5f77e0d83e 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -31,6 +31,28 @@ class V8_EXPORT_PRIVATE OFStreamBase : public std::streambuf {
std::streamsize xsputn(const char* s, std::streamsize n) override;
};
+// Output buffer and stream writing into debugger's command window.
+class V8_EXPORT_PRIVATE DbgStreamBuf : public std::streambuf {
+ public:
+ DbgStreamBuf();
+ ~DbgStreamBuf();
+
+ private:
+ int sync() override;
+ int overflow(int c) override;
+
+ char data_[256];
+};
+
+class DbgStdoutStream : public std::ostream {
+ public:
+ DbgStdoutStream();
+ ~DbgStdoutStream() = default;
+
+ private:
+ DbgStreamBuf streambuf_;
+};
+
// An output stream writing to a file.
class V8_EXPORT_PRIVATE OFStream : public std::ostream {
public:
@@ -99,6 +121,10 @@ struct AsHex {
uint64_t value;
uint8_t min_width;
bool with_prefix;
+
+ static AsHex Address(Address a) {
+ return AsHex(a, kSystemPointerHexDigits, true);
+ }
};
// Output the given value as hex, separated in individual bytes.
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 878cb3cf25..744c60b872 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -6,6 +6,7 @@
#define V8_PARSING_EXPRESSION_SCOPE_H_
#include "src/ast/scopes.h"
+#include "src/function-kind.h"
#include "src/message-template.h"
#include "src/parsing/scanner.h"
#include "src/zone/zone.h" // For ScopedPtrList.
@@ -48,14 +49,28 @@ class ExpressionScope {
VariableProxy* result = parser_->NewRawVariable(name, pos);
if (CanBeExpression()) {
AsExpressionParsingScope()->TrackVariable(result);
- } else if (type_ == kParameterDeclaration) {
- AsParameterDeclarationParsingScope()->Declare(result);
} else {
- return AsVariableDeclarationParsingScope()->Declare(result);
+ Variable* var = Declare(name, pos);
+ if (IsVarDeclaration() && !parser()->scope()->is_declaration_scope()) {
+ // Make sure we'll properly resolve the variable since we might be in a
+ // with or catch scope. In those cases the proxy isn't guaranteed to
+ // refer to the declared variable, so consider it unresolved.
+ parser()->scope()->AddUnresolved(result);
+ } else {
+ DCHECK_NOT_NULL(var);
+ result->BindTo(var);
+ }
}
return result;
}
+ Variable* Declare(const AstRawString* name, int pos = kNoSourcePosition) {
+ if (type_ == kParameterDeclaration) {
+ return AsParameterDeclarationParsingScope()->Declare(name, pos);
+ }
+ return AsVariableDeclarationParsingScope()->Declare(name, pos);
+ }
+
void MarkIdentifierAsAssigned() {
if (!CanBeExpression()) return;
AsExpressionParsingScope()->MarkIdentifierAsAssigned();
@@ -94,6 +109,16 @@ class ExpressionScope {
Report(loc, message);
}
+ void RecordThisUse() {
+ ExpressionScope* scope = this;
+ do {
+ if (scope->IsArrowHeadParsingScope()) {
+ scope->AsArrowHeadParsingScope()->RecordThisUse();
+ }
+ scope = scope->parent();
+ } while (scope != nullptr);
+ }
+
void RecordPatternError(const Scanner::Location& loc,
MessageTemplate message) {
// TODO(verwaest): Non-assigning expression?
@@ -139,11 +164,6 @@ class ExpressionScope {
AsExpressionParsingScope()->RecordExpressionError(loc, message);
}
- void RecordLexicalDeclarationError(const Scanner::Location& loc,
- MessageTemplate message) {
- if (IsLexicalDeclaration()) Report(loc, message);
- }
-
void RecordNonSimpleParameter() {
if (!IsArrowHeadParsingScope()) return;
AsArrowHeadParsingScope()->RecordNonSimpleParameter();
@@ -213,6 +233,7 @@ class ExpressionScope {
bool IsAsyncArrowHeadParsingScope() const {
return type_ == kMaybeAsyncArrowParameterDeclaration;
}
+ bool IsVarDeclaration() const { return type_ == kVarDeclaration; }
private:
friend class AccumulationScope<Types>;
@@ -271,21 +292,22 @@ class VariableDeclarationParsingScope : public ExpressionScope<Types> {
mode_(mode),
names_(names) {}
- VariableProxy* Declare(VariableProxy* proxy) {
+ Variable* Declare(const AstRawString* name, int pos) {
VariableKind kind = NORMAL_VARIABLE;
bool was_added;
- this->parser()->DeclareVariable(
- proxy, kind, mode_, Variable::DefaultInitializationFlag(mode_),
- this->parser()->scope(), &was_added, proxy->position());
+ Variable* var = this->parser()->DeclareVariable(
+ name, kind, mode_, Variable::DefaultInitializationFlag(mode_),
+ this->parser()->scope(), &was_added, pos);
if (was_added &&
this->parser()->scope()->num_var() > kMaxNumFunctionLocals) {
this->parser()->ReportMessage(MessageTemplate::kTooManyVariables);
}
- if (names_) names_->Add(proxy->raw_name(), this->parser()->zone());
+ if (names_) names_->Add(name, this->parser()->zone());
if (this->IsLexicalDeclaration()) {
- if (this->parser()->IsLet(proxy->raw_name())) {
- this->parser()->ReportMessageAt(proxy->location(),
- MessageTemplate::kLetInLexicalBinding);
+ if (this->parser()->IsLet(name)) {
+ this->parser()->ReportMessageAt(
+ Scanner::Location(pos, pos + name->length()),
+ MessageTemplate::kLetInLexicalBinding);
}
} else {
if (this->parser()->loop_nesting_depth() > 0) {
@@ -306,18 +328,11 @@ class VariableDeclarationParsingScope : public ExpressionScope<Types> {
//
// This also handles marking of loop variables in for-in and for-of
// loops, as determined by loop-nesting-depth.
- proxy->set_is_assigned();
- }
-
- // Make sure we'll properly resolve the variable since we might be in a
- // with or catch scope. In those cases the assignment isn't guaranteed to
- // write to the variable declared above.
- if (!this->parser()->scope()->is_declaration_scope()) {
- proxy =
- this->parser()->NewUnresolved(proxy->raw_name(), proxy->position());
+ DCHECK_NOT_NULL(var);
+ var->set_maybe_assigned();
}
}
- return proxy;
+ return var;
}
private:
@@ -342,16 +357,17 @@ class ParameterDeclarationParsingScope : public ExpressionScope<Types> {
explicit ParameterDeclarationParsingScope(ParserT* parser)
: ExpressionScopeT(parser, ExpressionScopeT::kParameterDeclaration) {}
- void Declare(VariableProxy* proxy) {
+ Variable* Declare(const AstRawString* name, int pos) {
VariableKind kind = PARAMETER_VARIABLE;
VariableMode mode = VariableMode::kVar;
bool was_added;
- this->parser()->DeclareVariable(
- proxy, kind, mode, Variable::DefaultInitializationFlag(mode),
- this->parser()->scope(), &was_added, proxy->position());
+ Variable* var = this->parser()->DeclareVariable(
+ name, kind, mode, Variable::DefaultInitializationFlag(mode),
+ this->parser()->scope(), &was_added, pos);
if (!has_duplicate() && !was_added) {
- duplicate_loc_ = proxy->location();
+ duplicate_loc_ = Scanner::Location(pos, pos + name->length());
}
+ return var;
}
bool has_duplicate() const { return duplicate_loc_.IsValid(); }
@@ -670,14 +686,20 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
for (int i = 0; i < this->variable_list()->length(); i++) {
VariableProxy* proxy = this->variable_list()->at(i);
bool was_added;
- this->parser()->DeclareVariable(proxy, kind, mode,
- Variable::DefaultInitializationFlag(mode),
- result, &was_added, proxy->position());
+ this->parser()->DeclareAndBindVariable(
+ proxy, kind, mode, Variable::DefaultInitializationFlag(mode), result,
+ &was_added, proxy->position());
if (!was_added) {
ExpressionScope<Types>::Report(proxy->location(),
MessageTemplate::kParamDupe);
}
}
+
+ int initializer_position = this->parser()->end_position();
+ for (auto declaration : *result->declarations()) {
+ declaration->var()->set_initializer_position(initializer_position);
+ }
+ if (uses_this_) result->UsesThis();
return result;
}
@@ -689,6 +711,7 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
}
void RecordNonSimpleParameter() { has_simple_parameter_list_ = false; }
+ void RecordThisUse() { uses_this_ = true; }
private:
FunctionKind kind() const {
@@ -700,6 +723,7 @@ class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
Scanner::Location declaration_error_location = Scanner::Location::invalid();
MessageTemplate declaration_error_message = MessageTemplate::kNone;
bool has_simple_parameter_list_ = true;
+ bool uses_this_ = false;
DISALLOW_COPY_AND_ASSIGN(ArrowHeadParsingScope);
};
diff --git a/deps/v8/src/parsing/keywords-gen.h b/deps/v8/src/parsing/keywords-gen.h
index 67c47a2dda..b256187c96 100644
--- a/deps/v8/src/parsing/keywords-gen.h
+++ b/deps/v8/src/parsing/keywords-gen.h
@@ -49,14 +49,14 @@ struct PerfectKeywordHashTableEntry {
Token::Value value;
};
enum {
- TOTAL_KEYWORDS = 47,
+ TOTAL_KEYWORDS = 49,
MIN_WORD_LENGTH = 2,
MAX_WORD_LENGTH = 10,
MIN_HASH_VALUE = 2,
- MAX_HASH_VALUE = 51
+ MAX_HASH_VALUE = 55
};
-/* maximum key range = 50, duplicates = 0 */
+/* maximum key range = 54, duplicates = 0 */
class PerfectKeywordHash {
private:
@@ -70,22 +70,22 @@ inline unsigned int PerfectKeywordHash::Hash(const char* str, int len) {
DCHECK_LT(str[1], 128);
DCHECK_LT(str[0], 128);
static const unsigned char asso_values[128] = {
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
- 52, 8, 2, 6, 0, 0, 9, 52, 21, 0, 52, 52, 36, 40, 0, 3,
- 6, 52, 17, 13, 16, 16, 38, 25, 6, 26, 52, 52, 52, 52, 52, 52};
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 8, 0, 6, 0, 0, 9, 9, 9, 0, 56, 56, 34, 41, 0, 3,
+ 6, 56, 19, 10, 13, 16, 39, 26, 37, 36, 56, 56, 56, 56, 56, 56};
return len + asso_values[static_cast<unsigned char>(str[1])] +
asso_values[static_cast<unsigned char>(str[0])];
}
static const unsigned char kPerfectKeywordLengthTable[64] = {
- 0, 0, 2, 3, 4, 2, 6, 7, 8, 9, 10, 2, 6, 7, 5, 3, 7, 8, 4, 5, 4, 7,
- 5, 6, 5, 0, 5, 0, 6, 4, 7, 5, 9, 8, 5, 6, 3, 4, 5, 3, 4, 4, 5, 0,
- 6, 4, 6, 5, 6, 3, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ 0, 0, 2, 3, 4, 2, 6, 7, 8, 9, 10, 2, 3, 3, 5, 3, 7, 8, 4, 5, 4, 7,
+ 5, 5, 5, 6, 4, 5, 6, 6, 4, 5, 7, 8, 9, 3, 4, 3, 4, 5, 5, 5, 6, 6,
+ 7, 5, 4, 6, 0, 0, 3, 10, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0};
static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[64] =
{{"", Token::IDENTIFIER},
@@ -100,8 +100,8 @@ static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[64] =
{"interface", Token::FUTURE_STRICT_RESERVED_WORD},
{"instanceof", Token::INSTANCEOF},
{"if", Token::IF},
- {"export", Token::EXPORT},
- {"extends", Token::EXTENDS},
+ {"get", Token::GET},
+ {"set", Token::SET},
{"const", Token::CONST},
{"for", Token::FOR},
{"finally", Token::FINALLY},
@@ -111,39 +111,39 @@ static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[64] =
{"null", Token::NULL_LITERAL},
{"package", Token::FUTURE_STRICT_RESERVED_WORD},
{"false", Token::FALSE_LITERAL},
- {"return", Token::RETURN},
- {"break", Token::BREAK},
- {"", Token::IDENTIFIER},
{"async", Token::ASYNC},
- {"", Token::IDENTIFIER},
+ {"break", Token::BREAK},
+ {"return", Token::RETURN},
+ {"this", Token::THIS},
+ {"throw", Token::THROW},
{"public", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"static", Token::STATIC},
{"with", Token::WITH},
+ {"super", Token::SUPER},
{"private", Token::FUTURE_STRICT_RESERVED_WORD},
- {"yield", Token::YIELD},
- {"protected", Token::FUTURE_STRICT_RESERVED_WORD},
{"function", Token::FUNCTION},
- {"super", Token::SUPER},
- {"static", Token::STATIC},
+ {"protected", Token::FUTURE_STRICT_RESERVED_WORD},
{"try", Token::TRY},
{"true", Token::TRUE_LITERAL},
- {"await", Token::AWAIT},
{"let", Token::LET},
{"else", Token::ELSE},
- {"this", Token::THIS},
- {"throw", Token::THROW},
- {"", Token::IDENTIFIER},
+ {"await", Token::AWAIT},
+ {"while", Token::WHILE},
+ {"yield", Token::YIELD},
{"switch", Token::SWITCH},
+ {"export", Token::EXPORT},
+ {"extends", Token::EXTENDS},
+ {"class", Token::CLASS},
{"void", Token::VOID},
{"import", Token::IMPORT},
- {"class", Token::CLASS},
- {"typeof", Token::TYPEOF},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
{"var", Token::VAR},
{"implements", Token::FUTURE_STRICT_RESERVED_WORD},
- {"while", Token::WHILE},
- {"", Token::IDENTIFIER},
{"", Token::IDENTIFIER},
{"", Token::IDENTIFIER},
{"", Token::IDENTIFIER},
+ {"typeof", Token::TYPEOF},
{"", Token::IDENTIFIER},
{"", Token::IDENTIFIER},
{"", Token::IDENTIFIER},
diff --git a/deps/v8/src/parsing/keywords.txt b/deps/v8/src/parsing/keywords.txt
index a3b3e4614d..7ecfc7d25a 100644
--- a/deps/v8/src/parsing/keywords.txt
+++ b/deps/v8/src/parsing/keywords.txt
@@ -35,6 +35,7 @@ false, Token::FALSE_LITERAL
finally, Token::FINALLY
for, Token::FOR
function, Token::FUNCTION
+get, Token::GET
if, Token::IF
implements, Token::FUTURE_STRICT_RESERVED_WORD
import, Token::IMPORT
@@ -49,6 +50,7 @@ private, Token::FUTURE_STRICT_RESERVED_WORD
protected, Token::FUTURE_STRICT_RESERVED_WORD
public, Token::FUTURE_STRICT_RESERVED_WORD
return, Token::RETURN
+set, Token::SET
static, Token::STATIC
super, Token::SUPER
switch, Token::SWITCH
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 3050e01b48..7bbb201dbe 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -10,6 +10,7 @@
#include "src/base/template-utils.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/counters.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/heap-inl.h"
#include "src/log.h"
#include "src/objects-inl.h"
@@ -31,8 +32,8 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
start_position_(0),
end_position_(0),
parameters_end_pos_(kNoSourcePosition),
- function_literal_id_(FunctionLiteral::kIdTypeInvalid),
- max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
+ function_literal_id_(kFunctionLiteralIdInvalid),
+ max_function_literal_id_(kFunctionLiteralIdInvalid),
character_stream_(nullptr),
ast_value_factory_(nullptr),
ast_string_constants_(nullptr),
@@ -43,11 +44,14 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
: ParseInfo(zone_allocator) {
- set_hash_seed(isolate->heap()->HashSeed());
+ set_hash_seed(HashSeed(isolate));
set_stack_limit(isolate->stack_guard()->real_climit());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
set_logger(isolate->logger());
set_ast_string_constants(isolate->ast_string_constants());
+ set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
+ isolate->NeedsDetailedOptimizedCodeLineInfo());
+ if (!isolate->is_best_effort_code_coverage()) set_coverage_enabled();
if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
if (isolate->is_collecting_type_profile()) set_collect_type_profile();
if (isolate->compiler_dispatcher()->IsEnabled()) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 7ab236c82d..5d4aab48da 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -10,6 +10,7 @@
#include <vector>
#include "include/v8.h"
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/objects/script.h"
@@ -83,6 +84,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
FLAG_ACCESSOR(kContainsAsmModule, contains_asm_module,
set_contains_asm_module)
+ FLAG_ACCESSOR(kCoverageEnabled, coverage_enabled, set_coverage_enabled)
FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
set_block_coverage_enabled)
FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
@@ -93,28 +95,30 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kIsDeclaration, is_declaration, set_declaration)
FLAG_ACCESSOR(kRequiresInstanceMembersInitializer,
requires_instance_members_initializer,
- set_requires_instance_members_initializer);
+ set_requires_instance_members_initializer)
FLAG_ACCESSOR(kMightAlwaysOpt, might_always_opt, set_might_always_opt)
FLAG_ACCESSOR(kAllowNativeSyntax, allow_natives_syntax,
set_allow_natives_syntax)
FLAG_ACCESSOR(kAllowLazyCompile, allow_lazy_compile, set_allow_lazy_compile)
FLAG_ACCESSOR(kAllowNativeSyntax, allow_native_syntax,
- set_allow_native_syntax);
+ set_allow_native_syntax)
FLAG_ACCESSOR(kAllowHarmonyPublicFields, allow_harmony_public_fields,
- set_allow_harmony_public_fields);
+ set_allow_harmony_public_fields)
FLAG_ACCESSOR(kAllowHarmonyStaticFields, allow_harmony_static_fields,
- set_allow_harmony_static_fields);
+ set_allow_harmony_static_fields)
FLAG_ACCESSOR(kAllowHarmonyDynamicImport, allow_harmony_dynamic_import,
- set_allow_harmony_dynamic_import);
+ set_allow_harmony_dynamic_import)
FLAG_ACCESSOR(kAllowHarmonyImportMeta, allow_harmony_import_meta,
- set_allow_harmony_import_meta);
+ set_allow_harmony_import_meta)
FLAG_ACCESSOR(kAllowHarmonyNumericSeparator, allow_harmony_numeric_separator,
- set_allow_harmony_numeric_separator);
+ set_allow_harmony_numeric_separator)
FLAG_ACCESSOR(kAllowHarmonyPrivateFields, allow_harmony_private_fields,
- set_allow_harmony_private_fields);
+ set_allow_harmony_private_fields)
FLAG_ACCESSOR(kAllowHarmonyPrivateMethods, allow_harmony_private_methods,
- set_allow_harmony_private_methods);
- FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife);
+ set_allow_harmony_private_methods)
+ FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife)
+ FLAG_ACCESSOR(kCollectSourcePositions, collect_source_positions,
+ set_collect_source_positions)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -293,25 +297,27 @@ class V8_EXPORT_PRIVATE ParseInfo {
kIsNamedExpression = 1 << 8,
kLazyCompile = 1 << 9,
kCollectTypeProfile = 1 << 10,
- kBlockCoverageEnabled = 1 << 11,
- kIsAsmWasmBroken = 1 << 12,
- kOnBackgroundThread = 1 << 13,
- kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
- kAllowEvalCache = 1 << 15,
- kIsDeclaration = 1 << 16,
- kRequiresInstanceMembersInitializer = 1 << 17,
- kContainsAsmModule = 1 << 18,
- kMightAlwaysOpt = 1 << 19,
- kAllowLazyCompile = 1 << 20,
- kAllowNativeSyntax = 1 << 21,
- kAllowHarmonyPublicFields = 1 << 22,
- kAllowHarmonyStaticFields = 1 << 23,
- kAllowHarmonyDynamicImport = 1 << 24,
- kAllowHarmonyImportMeta = 1 << 25,
- kAllowHarmonyNumericSeparator = 1 << 26,
- kAllowHarmonyPrivateFields = 1 << 27,
- kAllowHarmonyPrivateMethods = 1 << 28,
- kIsOneshotIIFE = 1 << 29
+ kCoverageEnabled = 1 << 11,
+ kBlockCoverageEnabled = 1 << 12,
+ kIsAsmWasmBroken = 1 << 13,
+ kOnBackgroundThread = 1 << 14,
+ kWrappedAsFunction = 1 << 15, // Implicitly wrapped as function.
+ kAllowEvalCache = 1 << 16,
+ kIsDeclaration = 1 << 17,
+ kRequiresInstanceMembersInitializer = 1 << 18,
+ kContainsAsmModule = 1 << 19,
+ kMightAlwaysOpt = 1 << 20,
+ kAllowLazyCompile = 1 << 21,
+ kAllowNativeSyntax = 1 << 22,
+ kAllowHarmonyPublicFields = 1 << 23,
+ kAllowHarmonyStaticFields = 1 << 24,
+ kAllowHarmonyDynamicImport = 1 << 25,
+ kAllowHarmonyImportMeta = 1 << 26,
+ kAllowHarmonyNumericSeparator = 1 << 27,
+ kAllowHarmonyPrivateFields = 1 << 28,
+ kAllowHarmonyPrivateMethods = 1 << 29,
+ kIsOneshotIIFE = 1 << 30,
+ kCollectSourcePositions = 1 << 31,
};
//------------- Inputs to parsing and scope analysis -----------------------
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 33c165cd92..3bcb8bed91 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -16,6 +16,7 @@
#include "src/base/hashmap.h"
#include "src/base/v8-fallthrough.h"
#include "src/counters.h"
+#include "src/function-kind.h"
#include "src/globals.h"
#include "src/log.h"
#include "src/message-template.h"
@@ -277,13 +278,13 @@ class ParserBase {
bool allow_##name() const { return allow_##name##_; } \
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
- ALLOW_ACCESSORS(natives);
- ALLOW_ACCESSORS(harmony_public_fields);
- ALLOW_ACCESSORS(harmony_static_fields);
- ALLOW_ACCESSORS(harmony_dynamic_import);
- ALLOW_ACCESSORS(harmony_import_meta);
- ALLOW_ACCESSORS(harmony_private_methods);
- ALLOW_ACCESSORS(eval_cache);
+ ALLOW_ACCESSORS(natives)
+ ALLOW_ACCESSORS(harmony_public_fields)
+ ALLOW_ACCESSORS(harmony_static_fields)
+ ALLOW_ACCESSORS(harmony_dynamic_import)
+ ALLOW_ACCESSORS(harmony_import_meta)
+ ALLOW_ACCESSORS(harmony_private_methods)
+ ALLOW_ACCESSORS(eval_cache)
#undef ALLOW_ACCESSORS
@@ -399,7 +400,7 @@ class ParserBase {
}
void set_next_function_is_likely_called() {
- next_function_is_likely_called_ = true;
+ next_function_is_likely_called_ = !FLAG_max_lazy;
}
void RecordFunctionOrEvalCall() { contains_function_or_eval_ = true; }
@@ -480,16 +481,14 @@ class ParserBase {
struct DeclarationParsingResult {
struct Declaration {
- Declaration(ExpressionT pattern, int initializer_position,
- ExpressionT initializer)
- : pattern(pattern),
- initializer_position(initializer_position),
- initializer(initializer) {}
+ Declaration(ExpressionT pattern, ExpressionT initializer)
+ : pattern(pattern), initializer(initializer) {
+ DCHECK_IMPLIES(Impl::IsNull(pattern), Impl::IsNull(initializer));
+ }
ExpressionT pattern;
- int initializer_position;
- int value_beg_position = kNoSourcePosition;
ExpressionT initializer;
+ int value_beg_pos = kNoSourcePosition;
};
DeclarationParsingResult()
@@ -798,6 +797,7 @@ class ParserBase {
bool PeekContextualKeyword(const AstRawString* name) {
return peek() == Token::IDENTIFIER &&
+ !scanner()->next_literal_contains_escapes() &&
scanner()->NextSymbol(ast_value_factory()) == name;
}
@@ -809,14 +809,21 @@ class ParserBase {
return false;
}
- void ExpectMetaProperty(const AstRawString* property_name,
- const char* full_name, int pos);
-
- void ExpectContextualKeyword(const AstRawString* name) {
+ void ExpectContextualKeyword(const AstRawString* name,
+ const char* fullname = nullptr, int pos = -1) {
Expect(Token::IDENTIFIER);
if (V8_UNLIKELY(scanner()->CurrentSymbol(ast_value_factory()) != name)) {
ReportUnexpectedToken(scanner()->current_token());
}
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ const char* full = fullname == nullptr
+ ? reinterpret_cast<const char*>(name->raw_data())
+ : fullname;
+ int start = pos == -1 ? position() : pos;
+ impl()->ReportMessageAt(Scanner::Location(start, end_position()),
+ MessageTemplate::kInvalidEscapedMetaProperty,
+ full);
+ }
}
bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
@@ -955,6 +962,26 @@ class ParserBase {
if (is_strict(language_mode)) parameters.ValidateStrictMode(impl());
}
+ // Needs to be called if the reference needs to be available from the current
+ // point. It causes the receiver to be context allocated if necessary.
+ // Returns the receiver variable that we're referencing.
+ V8_INLINE Variable* UseThis() {
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
+ DeclarationScope* receiver_scope = closure_scope->GetReceiverScope();
+ Variable* var = receiver_scope->receiver();
+ var->set_is_used();
+ if (closure_scope == receiver_scope) {
+ // It's possible that we're parsing the head of an arrow function, in
+ // which case we haven't realized yet that closure_scope !=
+ // receiver_scope. Mark through the ExpressionScope for now.
+ expression_scope()->RecordThisUse();
+ } else {
+ closure_scope->set_has_this_reference();
+ var->ForceContextAllocation();
+ }
+ return var;
+ }
+
V8_INLINE IdentifierT ParseAndClassifyIdentifier(Token::Value token);
// Parses an identifier or a strict mode future reserved word. Allows passing
// in function_kind for the case of parsing the identifier in a function
@@ -968,6 +995,8 @@ class ParserBase {
// mode.
IdentifierT ParseNonRestrictedIdentifier();
+ // This method should be used to ambiguously parse property names that can
+ // become destructuring identifiers.
V8_INLINE IdentifierT ParsePropertyName();
ExpressionT ParsePropertyOrPrivatePropertyName();
@@ -1027,10 +1056,11 @@ class ParserBase {
ExpressionT ParseAwaitExpression();
V8_INLINE ExpressionT ParseUnaryExpression();
V8_INLINE ExpressionT ParsePostfixExpression();
+ V8_NOINLINE ExpressionT ParsePostfixContinuation(ExpressionT expression,
+ int lhs_beg_pos);
V8_INLINE ExpressionT ParseLeftHandSideExpression();
ExpressionT ParseLeftHandSideContinuation(ExpressionT expression);
ExpressionT ParseMemberWithPresentNewPrefixesExpression();
- V8_INLINE ExpressionT ParseMemberWithNewPrefixesExpression();
ExpressionT ParseFunctionExpression();
V8_INLINE ExpressionT ParseMemberExpression();
V8_INLINE ExpressionT
@@ -1082,6 +1112,31 @@ class ParserBase {
FunctionLiteral::FunctionType function_type,
FunctionBodyType body_type);
+ // Check if the scope has conflicting var/let declarations from different
+ // scopes. This covers for example
+ //
+ // function f() { { { var x; } let x; } }
+ // function g() { { var x; let x; } }
+ //
+ // The var declarations are hoisted to the function scope, but originate from
+ // a scope where the name has also been let bound or the var declaration is
+ // hoisted over such a scope.
+ void CheckConflictingVarDeclarations(DeclarationScope* scope) {
+ if (has_error()) return;
+ Declaration* decl = scope->CheckConflictingVarDeclarations();
+ if (decl != nullptr) {
+ // In ES6, conflicting variable bindings are early errors.
+ const AstRawString* name = decl->var()->raw_name();
+ int position = decl->position();
+ Scanner::Location location =
+ position == kNoSourcePosition
+ ? Scanner::Location::invalid()
+ : Scanner::Location(position, position + 1);
+ impl()->ReportMessageAt(location, MessageTemplate::kVarRedeclaration,
+ name);
+ }
+ }
+
// TODO(nikolaos, marja): The first argument should not really be passed
// by value. The method is expected to add the parsed statements to the
// list. This works because in the case of the parser, StatementListT is
@@ -1159,39 +1214,6 @@ class ParserBase {
return identifier == ast_value_factory()->let_string();
}
- void DesugarBindingInForEachStatement(ForInfo* for_info, BlockT* body_block,
- ExpressionT* each_variable) {
- // Annex B.3.5 prohibits the form
- // `try {} catch(e) { for (var e of {}); }`
- // So if we are parsing a statement like `for (var ... of ...)`
- // we need to walk up the scope chain and look for catch scopes
- // which have a simple binding, then compare their binding against
- // all of the names declared in the init of the for-of we're
- // parsing.
- bool is_for_var_of =
- for_info->mode == ForEachStatement::ITERATE &&
- for_info->parsing_result.descriptor.mode == VariableMode::kVar;
-
- if (is_for_var_of) {
- Scope* scope = this->scope();
- while (scope != nullptr && !scope->is_declaration_scope()) {
- if (scope->is_catch_scope()) {
- auto name = scope->catch_variable()->raw_name();
- // If it's a simple binding and the name is declared in the for loop.
- if (name != ast_value_factory()->dot_catch_string() &&
- for_info->bound_names.Contains(name)) {
- impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
- MessageTemplate::kVarRedeclaration, name);
- }
- }
- scope = scope->outer_scope();
- }
- }
-
- impl()->DesugarBindingInForEachStatement(for_info, body_block,
- each_variable);
- }
-
bool IsNextLetKeyword();
// Checks if the expression is a valid reference expression (e.g., on the
@@ -1461,9 +1483,8 @@ template <typename Impl>
typename ParserBase<Impl>::IdentifierT
ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
DCHECK_EQ(scanner()->current_token(), next);
- STATIC_ASSERT(Token::IDENTIFIER + 1 == Token::ASYNC);
if (V8_LIKELY(IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
- IdentifierT name = impl()->GetSymbol();
+ IdentifierT name = impl()->GetIdentifier();
if (V8_UNLIKELY(impl()->IsArguments(name) &&
scope()->ShouldBanArguments())) {
ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
@@ -1481,13 +1502,13 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
if (next == Token::AWAIT) {
expression_scope()->RecordAsyncArrowParametersError(
scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
- return impl()->GetSymbol();
+ return impl()->GetIdentifier();
}
DCHECK(Token::IsStrictReservedWord(next));
expression_scope()->RecordStrictModeParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
- return impl()->GetSymbol();
+ return impl()->GetIdentifier();
}
template <class Impl>
@@ -1502,7 +1523,7 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
return impl()->EmptyIdentifierString();
}
- return impl()->GetSymbol();
+ return impl()->GetIdentifier();
}
template <typename Impl>
@@ -1522,7 +1543,10 @@ ParserBase<Impl>::ParseNonRestrictedIdentifier() {
template <typename Impl>
typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParsePropertyName() {
Token::Value next = Next();
- if (V8_LIKELY(Token::IsPropertyName(next))) return impl()->GetSymbol();
+ if (V8_LIKELY(Token::IsPropertyName(next))) {
+ if (peek() == Token::COLON) return impl()->GetSymbol();
+ return impl()->GetIdentifier();
+ }
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
@@ -1539,7 +1563,23 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
name = impl()->GetSymbol();
key = factory()->NewStringLiteral(name, pos);
} else if (allow_harmony_private_fields() && next == Token::PRIVATE_NAME) {
- name = impl()->GetSymbol();
+ // In the case of a top level function, we completely skip
+ // analysing it's scope, meaning, we don't have a chance to
+ // resolve private names and find that they are not enclosed in a
+ // class body.
+ //
+ // Here, we check if this is a new private name reference in a top
+ // level function and throw an error if so.
+ //
+ // Bug(v8:7468): This hack will go away once we refactor private
+ // name resolution to happen independently from scope resolution.
+ if (scope()->scope_type() == FUNCTION_SCOPE &&
+ scope()->outer_scope() != nullptr &&
+ scope()->outer_scope()->scope_type() == SCRIPT_SCOPE) {
+ ReportMessage(MessageTemplate::kInvalidPrivateFieldResolution);
+ }
+
+ name = impl()->GetIdentifier();
key = impl()->ExpressionFromIdentifier(name, pos, InferName::kNo);
} else {
ReportUnexpectedToken(next);
@@ -1637,7 +1677,8 @@ ParserBase<Impl>::ParsePrimaryExpression() {
FunctionKind kind = FunctionKind::kArrowFunction;
if (V8_UNLIKELY(token == Token::ASYNC &&
- !scanner()->HasLineTerminatorBeforeNext())) {
+ !scanner()->HasLineTerminatorBeforeNext() &&
+ !scanner()->literal_contains_escapes())) {
// async function ...
if (peek() == Token::FUNCTION) return ParseAsyncFunctionLiteral();
@@ -1668,15 +1709,29 @@ ParserBase<Impl>::ParsePrimaryExpression() {
}
switch (token) {
+ case Token::NEW:
+ return ParseMemberWithPresentNewPrefixesExpression();
+
case Token::THIS: {
Consume(Token::THIS);
- return impl()->ThisExpression(beg_pos);
+ return impl()->ThisExpression();
}
case Token::ASSIGN_DIV:
case Token::DIV:
return ParseRegExpLiteral();
+ case Token::FUNCTION:
+ return ParseFunctionExpression();
+
+ case Token::SUPER: {
+ const bool is_new = false;
+ return ParseSuperExpression(is_new);
+ }
+ case Token::IMPORT:
+ if (!allow_harmony_dynamic_import()) break;
+ return ParseImportExpressions();
+
case Token::LBRACK:
return ParseArrayLiteral();
@@ -1914,10 +1969,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
Token::Value token = peek();
if ((token != Token::MUL && prop_info->ParsePropertyKindFromToken(token)) ||
scanner()->HasLineTerminatorBeforeNext()) {
- prop_info->name = impl()->GetSymbol();
+ prop_info->name = impl()->GetIdentifier();
impl()->PushLiteralName(prop_info->name);
return factory()->NewStringLiteral(prop_info->name, position());
}
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
prop_info->function_flags = ParseFunctionFlag::kIsAsync;
prop_info->kind = ParsePropertyKind::kMethod;
}
@@ -1928,21 +1986,21 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
}
if (prop_info->kind == ParsePropertyKind::kNotSet &&
- Check(Token::IDENTIFIER)) {
- IdentifierT symbol = impl()->GetSymbol();
- if (!prop_info->ParsePropertyKindFromToken(peek())) {
- if (impl()->IdentifierEquals(symbol, ast_value_factory()->get_string())) {
- prop_info->kind = ParsePropertyKind::kAccessorGetter;
- } else if (impl()->IdentifierEquals(symbol,
- ast_value_factory()->set_string())) {
- prop_info->kind = ParsePropertyKind::kAccessorSetter;
- }
- }
- if (!IsAccessor(prop_info->kind)) {
- prop_info->name = symbol;
+ IsInRange(peek(), Token::GET, Token::SET)) {
+ Token::Value token = Next();
+ if (prop_info->ParsePropertyKindFromToken(peek())) {
+ prop_info->name = impl()->GetIdentifier();
impl()->PushLiteralName(prop_info->name);
return factory()->NewStringLiteral(prop_info->name, position());
}
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
+ if (token == Token::GET) {
+ prop_info->kind = ParsePropertyKind::kAccessorGetter;
+ } else if (token == Token::SET) {
+ prop_info->kind = ParsePropertyKind::kAccessorSetter;
+ }
}
int pos = peek_position();
@@ -1966,19 +2024,26 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
if (prop_info->kind == ParsePropertyKind::kNotSet) {
prop_info->ParsePropertyKindFromToken(peek());
}
- prop_info->name = impl()->GetSymbol();
- if (prop_info->position == PropertyPosition::kObjectLiteral ||
- (!allow_harmony_private_methods() &&
- (IsAccessor(prop_info->kind) ||
- prop_info->kind == ParsePropertyKind::kMethod))) {
+ prop_info->name = impl()->GetIdentifier();
+ if (V8_UNLIKELY(prop_info->position ==
+ PropertyPosition::kObjectLiteral)) {
+ ReportUnexpectedToken(Token::PRIVATE_NAME);
+ prop_info->kind = ParsePropertyKind::kNotSet;
+ return impl()->FailureExpression();
+ }
+ if (V8_UNLIKELY(!allow_harmony_private_methods() &&
+ (IsAccessor(prop_info->kind) ||
+ prop_info->kind == ParsePropertyKind::kMethod))) {
ReportUnexpectedToken(Next());
+ prop_info->kind = ParsePropertyKind::kNotSet;
return impl()->FailureExpression();
}
break;
case Token::STRING:
Consume(Token::STRING);
- prop_info->name = impl()->GetSymbol();
+ prop_info->name = peek() == Token::COLON ? impl()->GetSymbol()
+ : impl()->GetIdentifier();
is_array_index = impl()->IsArrayIndex(prop_info->name, &index);
break;
@@ -2071,13 +2136,13 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
if (peek() == Token::LPAREN) {
prop_info->kind = ParsePropertyKind::kMethod;
// TODO(bakkot) specialize on 'static'
- prop_info->name = impl()->GetSymbol();
+ prop_info->name = impl()->GetIdentifier();
name_expression =
factory()->NewStringLiteral(prop_info->name, position());
} else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
peek() == Token::RBRACE) {
// TODO(bakkot) specialize on 'static'
- prop_info->name = impl()->GetSymbol();
+ prop_info->name = impl()->GetIdentifier();
name_expression =
factory()->NewStringLiteral(prop_info->name, position());
} else {
@@ -2264,6 +2329,9 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
Scanner::Location next_loc = scanner()->peek_location();
ExpressionT name_expression = ParseProperty(prop_info);
+
+ DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME, has_error());
+
IdentifierT name = prop_info->name;
ParseFunctionFlags function_flags = prop_info->function_flags;
ParsePropertyKind kind = prop_info->kind;
@@ -2285,7 +2353,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
if (!prop_info->is_computed_name &&
- impl()->IdentifierEquals(name, ast_value_factory()->proto_string())) {
+ scanner()->CurrentLiteralEquals("__proto__")) {
if (*has_seen_proto) {
expression_scope()->RecordExpressionError(
scanner()->location(), MessageTemplate::kDuplicateProto);
@@ -2322,10 +2390,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
DCHECK(!prop_info->is_computed_name);
- if (name_token == Token::LET) {
- expression_scope()->RecordLexicalDeclarationError(
- scanner()->location(), MessageTemplate::kLetInLexicalBinding);
- }
if (name_token == Token::AWAIT) {
DCHECK(!is_async_function());
expression_scope()->RecordAsyncArrowParametersError(
@@ -2666,6 +2730,9 @@ ParserBase<Impl>::ParseYieldExpression() {
expression_scope()->RecordParameterInitializerError(
scanner()->peek_location(), MessageTemplate::kYieldInParameter);
Consume(Token::YIELD);
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
CheckStackOverflow();
@@ -2890,6 +2957,9 @@ ParserBase<Impl>::ParseAwaitExpression() {
MessageTemplate::kAwaitExpressionFormalParameter);
int await_pos = peek_position();
Consume(Token::AWAIT);
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
CheckStackOverflow();
@@ -2933,24 +3003,29 @@ ParserBase<Impl>::ParsePostfixExpression() {
int lhs_beg_pos = peek_position();
ExpressionT expression = ParseLeftHandSideExpression();
- if (!scanner()->HasLineTerminatorBeforeNext() && Token::IsCountOp(peek())) {
- if (V8_UNLIKELY(!IsValidReferenceExpression(expression))) {
- expression = RewriteInvalidReferenceExpression(
- expression, lhs_beg_pos, end_position(),
- MessageTemplate::kInvalidLhsInPostfixOp);
- }
- if (impl()->IsIdentifier(expression)) {
- expression_scope()->MarkIdentifierAsAssigned();
- }
+ if (V8_LIKELY(!Token::IsCountOp(peek()) ||
+ scanner()->HasLineTerminatorBeforeNext())) {
+ return expression;
+ }
+ return ParsePostfixContinuation(expression, lhs_beg_pos);
+}
- Token::Value next = Next();
- expression =
- factory()->NewCountOperation(next,
- false /* postfix */,
- expression,
- position());
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParsePostfixContinuation(ExpressionT expression,
+ int lhs_beg_pos) {
+ if (V8_UNLIKELY(!IsValidReferenceExpression(expression))) {
+ expression = RewriteInvalidReferenceExpression(
+ expression, lhs_beg_pos, end_position(),
+ MessageTemplate::kInvalidLhsInPostfixOp);
}
- return expression;
+ if (impl()->IsIdentifier(expression)) {
+ expression_scope()->MarkIdentifierAsAssigned();
+ }
+
+ Token::Value next = Next();
+ return factory()->NewCountOperation(next, false /* postfix */, expression,
+ position());
}
template <typename Impl>
@@ -2959,7 +3034,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression() {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- ExpressionT result = ParseMemberWithNewPrefixesExpression();
+ ExpressionT result = ParseMemberExpression();
if (!Token::IsPropertyOrCall(peek())) return result;
return ParseLeftHandSideContinuation(result);
}
@@ -2971,7 +3046,8 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
if (V8_UNLIKELY(peek() == Token::LPAREN && impl()->IsIdentifier(result) &&
scanner()->current_token() == Token::ASYNC &&
- !scanner()->HasLineTerminatorBeforeNext())) {
+ !scanner()->HasLineTerminatorBeforeNext() &&
+ !scanner()->literal_contains_escapes())) {
DCHECK(impl()->IsAsync(impl()->AsIdentifier(result)));
int pos = position();
@@ -3128,7 +3204,7 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
result = ParseNewTargetExpression();
return ParseMemberExpressionContinuation(result);
} else {
- result = ParseMemberWithNewPrefixesExpression();
+ result = ParseMemberExpression();
}
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
@@ -3153,13 +3229,6 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithNewPrefixesExpression() {
- return peek() == Token::NEW ? ParseMemberWithPresentNewPrefixesExpression()
- : ParseMemberExpression();
-}
-
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseFunctionExpression() {
Consume(Token::FUNCTION);
int function_token_position = position();
@@ -3207,22 +3276,11 @@ ParserBase<Impl>::ParseMemberExpression() {
// ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
//
// The '[' Expression ']' and '.' Identifier parts are parsed by
- // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
- // caller.
+ // ParseMemberExpressionContinuation, and everything preceeding it is merged
+ // into ParsePrimaryExpression.
// Parse the initial primary or function expression.
- ExpressionT result;
- if (peek() == Token::FUNCTION) {
- result = ParseFunctionExpression();
- } else if (peek() == Token::SUPER) {
- const bool is_new = false;
- result = ParseSuperExpression(is_new);
- } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
- result = ParseImportExpressions();
- } else {
- result = ParsePrimaryExpression();
- }
-
+ ExpressionT result = ParsePrimaryExpression();
return ParseMemberExpressionContinuation(result);
}
@@ -3233,8 +3291,9 @@ ParserBase<Impl>::ParseImportExpressions() {
Consume(Token::IMPORT);
int pos = position();
- if (allow_harmony_import_meta() && peek() == Token::PERIOD) {
- ExpectMetaProperty(ast_value_factory()->meta_string(), "import.meta", pos);
+ if (allow_harmony_import_meta() && Check(Token::PERIOD)) {
+ ExpectContextualKeyword(ast_value_factory()->meta_string(), "import.meta",
+ pos);
if (!parsing_module_) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMetaOutsideModule);
@@ -3267,7 +3326,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
IsClassConstructor(kind)) {
if (Token::IsProperty(peek())) {
+ if (peek() == Token::PERIOD && PeekAhead() == Token::PRIVATE_NAME) {
+ Consume(Token::PERIOD);
+ Consume(Token::PRIVATE_NAME);
+
+ impl()->ReportMessage(MessageTemplate::kUnexpectedPrivateField);
+ return impl()->FailureExpression();
+ }
scope->RecordSuperPropertyUsage();
+ UseThis();
return impl()->NewSuperPropertyReference(pos);
}
// new super() is never allowed.
@@ -3275,6 +3342,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
if (!is_new && peek() == Token::LPAREN && IsDerivedConstructor(kind)) {
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
+ expression_scope()->RecordThisUse();
+ UseThis()->set_maybe_assigned();
return impl()->NewSuperCallReference(pos);
}
}
@@ -3285,22 +3354,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
}
template <typename Impl>
-void ParserBase<Impl>::ExpectMetaProperty(const AstRawString* property_name,
- const char* full_name, int pos) {
- Consume(Token::PERIOD);
- ExpectContextualKeyword(property_name);
- if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
- impl()->ReportMessageAt(Scanner::Location(pos, end_position()),
- MessageTemplate::kInvalidEscapedMetaProperty,
- full_name);
- }
-}
-
-template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseNewTargetExpression() {
int pos = position();
- ExpectMetaProperty(ast_value_factory()->target_string(), "new.target", pos);
+ Consume(Token::PERIOD);
+ ExpectContextualKeyword(ast_value_factory()->target_string(), "new.target",
+ pos);
if (!GetReceiverScope()->is_function_scope()) {
impl()->ReportMessageAt(scanner()->location(),
@@ -3363,6 +3422,7 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters) {
// BindingElement[?Yield, ?GeneratorParameter]
FuncNameInferrerState fni_state(&fni_);
int pos = peek_position();
+ auto declaration_it = scope()->declarations()->end();
ExpressionT pattern = ParseBindingPattern();
if (impl()->IsIdentifier(pattern)) {
ClassifyParameter(impl()->AsIdentifier(pattern), pos, end_position());
@@ -3379,11 +3439,17 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters) {
return;
}
- AcceptINScope scope(this, true);
+ AcceptINScope accept_in_scope(this, true);
initializer = ParseAssignmentExpression();
impl()->SetFunctionNameFromIdentifierRef(initializer, pattern);
}
+ auto declaration_end = scope()->declarations()->end();
+ int initializer_end = end_position();
+ for (; declaration_it != declaration_end; ++declaration_it) {
+ declaration_it->var()->set_initializer_position(initializer_end);
+ }
+
impl()->AddFormalParameter(parameters, pattern, initializer, end_position(),
parameters->has_rest);
}
@@ -3473,6 +3539,11 @@ void ParserBase<Impl>::ParseVariableDeclarations(
VariableDeclarationParsingScope declaration(
impl(), parsing_result->descriptor.mode, names);
+ Scope* target_scope = IsLexicalVariableMode(parsing_result->descriptor.mode)
+ ? scope()
+ : scope()->GetDeclarationScope();
+
+ auto declaration_it = target_scope->declarations()->end();
int bindings_start = peek_position();
do {
@@ -3480,17 +3551,44 @@ void ParserBase<Impl>::ParseVariableDeclarations(
FuncNameInferrerState fni_state(&fni_);
int decl_pos = peek_position();
- ExpressionT pattern = ParseBindingPattern();
+
+ IdentifierT name;
+ ExpressionT pattern;
+ // Check for an identifier first, so that we can elide the pattern in cases
+ // where there is no initializer (and so no proxy needs to be created).
+ if (V8_LIKELY(Token::IsAnyIdentifier(peek()))) {
+ name = ParseAndClassifyIdentifier(Next());
+ if (V8_UNLIKELY(is_strict(language_mode()) &&
+ impl()->IsEvalOrArguments(name))) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrictEvalArguments);
+ return;
+ }
+ if (peek() == Token::ASSIGN ||
+ (var_context == kForStatement && PeekInOrOf()) ||
+ parsing_result->descriptor.mode == VariableMode::kLet) {
+ // Assignments need the variable expression for the assignment LHS, and
+ // for of/in will need it later, so create the expression now.
+ pattern = impl()->ExpressionFromIdentifier(name, decl_pos);
+ } else {
+ // Otherwise, elide the variable expression and just declare it.
+ impl()->DeclareIdentifier(name, decl_pos);
+ pattern = impl()->NullExpression();
+ }
+ } else {
+ name = impl()->NullIdentifier();
+ pattern = ParseBindingPattern();
+ DCHECK(!impl()->IsIdentifier(pattern));
+ }
Scanner::Location variable_loc = scanner()->location();
ExpressionT value = impl()->NullExpression();
- int initializer_position = kNoSourcePosition;
- int value_beg_position = kNoSourcePosition;
+ int value_beg_pos = kNoSourcePosition;
if (Check(Token::ASSIGN)) {
- value_beg_position = peek_position();
-
+ DCHECK(!impl()->IsNull(pattern));
{
+ value_beg_pos = peek_position();
AcceptINScope scope(this, var_context != kForStatement);
value = ParseAssignmentExpression();
}
@@ -3510,18 +3608,36 @@ void ParserBase<Impl>::ParseVariableDeclarations(
}
impl()->SetFunctionNameFromIdentifierRef(value, pattern);
-
- // End position of the initializer is after the assignment expression.
- initializer_position = end_position();
} else {
+#ifdef DEBUG
+ // We can fall through into here on error paths, so don't DCHECK those.
+ if (!has_error()) {
+ // We should never get identifier patterns for the non-initializer path,
+ // as those expressions should be elided.
+ DCHECK_EQ(!impl()->IsNull(name),
+ Token::IsAnyIdentifier(scanner()->current_token()));
+ DCHECK_IMPLIES(impl()->IsNull(pattern), !impl()->IsNull(name));
+ // The only times we have a non-null pattern are:
+ // 1. This is a destructuring declaration (with no initializer, which
+ // is immediately an error),
+ // 2. This is a declaration in a for in/of loop, or
+ // 3. This is a let (which has an implicit undefined initializer)
+ DCHECK_IMPLIES(
+ !impl()->IsNull(pattern),
+ !impl()->IsIdentifier(pattern) ||
+ (var_context == kForStatement && PeekInOrOf()) ||
+ parsing_result->descriptor.mode == VariableMode::kLet);
+ }
+#endif
+
if (var_context != kForStatement || !PeekInOrOf()) {
// ES6 'const' and binding patterns require initializers.
if (parsing_result->descriptor.mode == VariableMode::kConst ||
- !impl()->IsIdentifier(pattern)) {
+ impl()->IsNull(name)) {
impl()->ReportMessageAt(
Scanner::Location(decl_pos, end_position()),
MessageTemplate::kDeclarationMissingInitializer,
- !impl()->IsIdentifier(pattern) ? "destructuring" : "const");
+ impl()->IsNull(name) ? "destructuring" : "const");
return;
}
// 'let x' initializes 'x' to undefined.
@@ -3529,14 +3645,22 @@ void ParserBase<Impl>::ParseVariableDeclarations(
value = factory()->NewUndefinedLiteral(position());
}
}
+ }
- // End position of the initializer is after the variable.
- initializer_position = position();
+ int initializer_position = end_position();
+ auto declaration_end = target_scope->declarations()->end();
+ for (; declaration_it != declaration_end; ++declaration_it) {
+ declaration_it->var()->set_initializer_position(initializer_position);
}
- typename DeclarationParsingResult::Declaration decl(
- pattern, initializer_position, value);
- decl.value_beg_position = value_beg_position;
+ // Patterns should be elided iff. they don't have an initializer.
+ DCHECK_IMPLIES(impl()->IsNull(pattern),
+ impl()->IsNull(value) ||
+ (var_context == kForStatement && PeekInOrOf()));
+
+ typename DeclarationParsingResult::Declaration decl(pattern, value);
+ decl.value_beg_pos = value_beg_pos;
+
parsing_result->declarations.push_back(decl);
} while (Check(Token::COMMA));
@@ -3617,10 +3741,10 @@ ParserBase<Impl>::ParseHoistableDeclaration(
FuncNameInferrerState fni_state(&fni_);
impl()->PushEnclosingName(name);
- FunctionKind kind = FunctionKindFor(flags);
+ FunctionKind function_kind = FunctionKindFor(flags);
FunctionLiteralT function = impl()->ParseFunctionLiteral(
- name, scanner()->location(), name_validity, kind, pos,
+ name, scanner()->location(), name_validity, function_kind, pos,
FunctionLiteral::kDeclaration, language_mode(), nullptr);
// In ES6, a function behaves as a lexical binding, except in
@@ -3631,16 +3755,17 @@ ParserBase<Impl>::ParseHoistableDeclaration(
: VariableMode::kVar;
// Async functions don't undergo sloppy mode block scoped hoisting, and don't
// allow duplicates in a block. Both are represented by the
- // sloppy_block_function_map. Don't add them to the map for async functions.
+ // sloppy_block_functions_. Don't add them to the map for async functions.
// Generators are also supposed to be prohibited; currently doing this behind
// a flag and UseCounting violations to assess web compatibility.
- bool is_sloppy_block_function = is_sloppy(language_mode()) &&
- !scope()->is_declaration_scope() &&
- flags == ParseFunctionFlag::kIsNormal;
-
- return impl()->DeclareFunction(variable_name, function, mode, pos,
- end_position(), is_sloppy_block_function,
- names);
+ VariableKind kind = is_sloppy(language_mode()) &&
+ !scope()->is_declaration_scope() &&
+ flags == ParseFunctionFlag::kIsNormal
+ ? SLOPPY_BLOCK_FUNCTION_VARIABLE
+ : NORMAL_VARIABLE;
+
+ return impl()->DeclareFunction(variable_name, function, mode, kind, pos,
+ end_position(), names);
}
template <typename Impl>
@@ -3716,6 +3841,9 @@ ParserBase<Impl>::ParseAsyncFunctionDeclaration(
// async [no LineTerminator here] function BindingIdentifier[Await]
// ( FormalParameters[Await] ) { AsyncFunctionBody }
DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
int pos = position();
DCHECK(!scanner()->HasLineTerminatorBeforeNext());
Consume(Token::FUNCTION);
@@ -3786,8 +3914,10 @@ void ParserBase<Impl>::ParseFunctionBody(
}
if (IsDerivedConstructor(kind)) {
+ ExpressionParsingScope expression_scope(impl());
inner_body.Add(factory()->NewReturnStatement(impl()->ThisExpression(),
kNoSourcePosition));
+ expression_scope.ValidateExpression();
}
Expect(closing_token);
}
@@ -3797,6 +3927,8 @@ void ParserBase<Impl>::ParseFunctionBody(
bool allow_duplicate_parameters = false;
+ CheckConflictingVarDeclarations(inner_scope);
+
if (V8_LIKELY(parameters.is_simple)) {
DCHECK_EQ(inner_scope, function_scope);
if (is_sloppy(function_scope->language_mode())) {
@@ -3820,12 +3952,13 @@ void ParserBase<Impl>::ParseFunctionBody(
inner_body.Rewind();
inner_body.Add(inner_block);
inner_block->set_scope(inner_scope);
- const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
- function_scope, VariableMode::kLastLexicalVariableMode);
- if (conflict != nullptr) {
- impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ if (!impl()->HasCheckedSyntax()) {
+ const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
+ function_scope, VariableMode::kLastLexicalVariableMode);
+ if (conflict != nullptr) {
+ impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ }
}
- impl()->CheckConflictingVarDeclarations(inner_scope);
impl()->InsertShadowingVarBindingInitializers(inner_block);
}
}
@@ -3851,6 +3984,7 @@ void ParserBase<Impl>::CheckArityRestrictions(int param_count,
bool has_rest,
int formals_start_pos,
int formals_end_pos) {
+ if (impl()->HasCheckedSyntax()) return;
if (IsGetterFunction(function_kind)) {
if (param_count != 0) {
impl()->ReportMessageAt(
@@ -3887,6 +4021,8 @@ bool ParserBase<Impl>::IsNextLetKeyword() {
// tokens.
case Token::YIELD:
case Token::AWAIT:
+ case Token::GET:
+ case Token::SET:
case Token::ASYNC:
return true;
case Token::FUTURE_STRICT_RESERVED_WORD:
@@ -3912,7 +4048,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
DCHECK_IMPLIES(!has_error(), peek() == Token::ARROW);
- if (scanner_->HasLineTerminatorBeforeNext()) {
+ if (!impl()->HasCheckedSyntax() && scanner_->HasLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
// If next token is not `=>`, it's a syntax error anyways.
@@ -4068,7 +4204,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
bool is_anonymous = impl()->IsNull(name);
// All parts of a ClassDeclaration and ClassExpression are strict code.
- if (!is_anonymous) {
+ if (!impl()->HasCheckedSyntax() && !is_anonymous) {
if (name_is_strict_reserved) {
impl()->ReportMessageAt(class_name_location,
MessageTemplate::kUnexpectedStrictReserved);
@@ -4167,6 +4303,9 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral() {
// async [no LineTerminator here] function BindingIdentifier[Await]
// ( FormalParameters[Await] ) { AsyncFunctionBody }
DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
+ impl()->ReportUnexpectedToken(Token::ESCAPED_KEYWORD);
+ }
int pos = position();
Consume(Token::FUNCTION);
IdentifierT name = impl()->NullIdentifier();
@@ -4418,9 +4557,9 @@ void ParserBase<Impl>::ParseStatementList(StatementListT* body,
Scanner::Location token_loc = scanner()->peek_location();
- if (scanner()->NextLiteralEquals("use strict")) {
+ if (scanner()->NextLiteralExactlyEquals("use strict")) {
use_strict = true;
- } else if (scanner()->NextLiteralEquals("use asm")) {
+ } else if (scanner()->NextLiteralExactlyEquals("use asm")) {
use_asm = true;
}
@@ -4604,7 +4743,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::VAR:
return ParseVariableStatement(kStatement, nullptr);
case Token::ASYNC:
- if (!scanner()->HasLineTerminatorAfterNext() &&
+ if (!impl()->HasCheckedSyntax() &&
+ !scanner()->HasLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
impl()->ReportMessageAt(
scanner()->peek_location(),
@@ -4922,7 +5062,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement() {
ExpressionT return_value = impl()->NullExpression();
if (scanner()->HasLineTerminatorBeforeNext() || Token::IsAutoSemicolon(tok)) {
if (IsDerivedConstructor(function_state_->kind())) {
- return_value = impl()->ThisExpression(loc.beg_pos);
+ ExpressionParsingScope expression_scope(impl());
+ return_value = impl()->ThisExpression();
+ expression_scope.ValidateExpression();
}
} else {
return_value = ParseExpression();
@@ -5181,9 +5323,20 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement() {
} else {
catch_info.variable = catch_info.scope->DeclareCatchVariableName(
ast_value_factory()->dot_catch_string());
+
+ auto declaration_it = scope()->declarations()->end();
+
VariableDeclarationParsingScope destructuring(
impl(), VariableMode::kLet, nullptr);
catch_info.pattern = ParseBindingPattern();
+
+ int initializer_position = end_position();
+ auto declaration_end = scope()->declarations()->end();
+ for (; declaration_it != declaration_end; ++declaration_it) {
+ declaration_it->var()->set_initializer_position(
+ initializer_position);
+ }
+
RETURN_IF_PARSE_ERROR;
catch_statements.Add(impl()->RewriteCatchPattern(&catch_info));
}
@@ -5194,18 +5347,20 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement() {
catch_statements.Add(inner_block);
// Check for `catch(e) { let e; }` and similar errors.
- Scope* inner_scope = inner_block->scope();
- if (inner_scope != nullptr) {
- const AstRawString* conflict = nullptr;
- if (impl()->IsNull(catch_info.pattern)) {
- const AstRawString* name = catch_info.variable->raw_name();
- if (inner_scope->LookupLocal(name)) conflict = name;
- } else {
- conflict = inner_scope->FindVariableDeclaredIn(
- scope(), VariableMode::kVar);
- }
- if (conflict != nullptr) {
- impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ if (!impl()->HasCheckedSyntax()) {
+ Scope* inner_scope = inner_block->scope();
+ if (inner_scope != nullptr) {
+ const AstRawString* conflict = nullptr;
+ if (impl()->IsNull(catch_info.pattern)) {
+ const AstRawString* name = catch_info.variable->raw_name();
+ if (inner_scope->LookupLocal(name)) conflict = name;
+ } else {
+ conflict = inner_scope->FindVariableDeclaredIn(
+ scope(), VariableMode::kVar);
+ }
+ if (conflict != nullptr) {
+ impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ }
}
}
@@ -5424,7 +5579,8 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
}
impl()->RecordIterationStatementSourceRange(loop, body_range);
- DesugarBindingInForEachStatement(for_info, &body_block, &each_variable);
+ impl()->DesugarBindingInForEachStatement(for_info, &body_block,
+ &each_variable);
body_block->statements()->Add(body, zone());
if (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode)) {
@@ -5680,7 +5836,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
if (has_declarations) {
BlockT body_block = impl()->NullBlock();
- DesugarBindingInForEachStatement(&for_info, &body_block, &each_variable);
+ impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
+ &each_variable);
body_block->statements()->Add(body, zone());
body_block->set_scope(scope()->FinalizeBlockScope());
body = body_block;
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index d6d55af2b6..5399a93cbf 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -12,6 +12,7 @@
#include "src/ast/ast.h"
#include "src/ast/source-range-ast-visitor.h"
#include "src/bailout-reason.h"
+#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
@@ -26,6 +27,7 @@
#include "src/runtime/runtime.h"
#include "src/string-stream.h"
#include "src/tracing/trace-event.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@@ -179,7 +181,8 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
return true;
}
case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1F);
+ int value =
+ base::ShlWithWraparound(DoubleToInt32(x_val), DoubleToInt32(y_val));
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
@@ -195,13 +198,9 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
- case Token::EXP: {
- double value = Pow(x_val, y_val);
- int int_value = static_cast<int>(value);
- *x = factory()->NewNumberLiteral(
- int_value == value && value != -0.0 ? int_value : value, pos);
+ case Token::EXP:
+ *x = factory()->NewNumberLiteral(base::ieee754::pow(x_val, y_val), pos);
return true;
- }
default:
break;
}
@@ -287,8 +286,7 @@ Expression* Parser::NewSuperPropertyReference(int pos) {
AstSymbol::kHomeObjectSymbol, kNoSourcePosition);
Expression* home_object = factory()->NewProperty(
this_function_proxy, home_object_symbol_literal, pos);
- return factory()->NewSuperPropertyReference(
- ThisExpression(pos)->AsVariableProxy(), home_object, pos);
+ return factory()->NewSuperPropertyReference(home_object, pos);
}
Expression* Parser::NewSuperCallReference(int pos) {
@@ -296,9 +294,8 @@ Expression* Parser::NewSuperCallReference(int pos) {
NewUnresolved(ast_value_factory()->new_target_string(), pos);
VariableProxy* this_function_proxy =
NewUnresolved(ast_value_factory()->this_function_string(), pos);
- return factory()->NewSuperCallReference(
- ThisExpression(pos)->AsVariableProxy(), new_target_proxy,
- this_function_proxy, pos);
+ return factory()->NewSuperCallReference(new_target_proxy, this_function_proxy,
+ pos);
}
Expression* Parser::NewTargetExpression(int pos) {
@@ -453,6 +450,10 @@ void Parser::DeserializeScopeChain(
original_scope_ = Scope::DeserializeScopeChain(
isolate, zone(), *outer_scope_info, info->script_scope(),
ast_value_factory(), mode);
+ if (info->is_eval() || IsArrowFunction(info->function_kind())) {
+ original_scope_->GetReceiverScope()->DeserializeReceiver(
+ ast_value_factory());
+ }
}
}
@@ -536,8 +537,8 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
ResetFunctionLiteralId();
- DCHECK(info->function_literal_id() == FunctionLiteral::kIdTypeTopLevel ||
- info->function_literal_id() == FunctionLiteral::kIdTypeInvalid);
+ DCHECK(info->function_literal_id() == kFunctionLiteralIdTopLevel ||
+ info->function_literal_id() == kFunctionLiteralIdInvalid);
FunctionLiteral* result = nullptr;
{
@@ -786,6 +787,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
// TODO(adamk): We should construct this scope from the ScopeInfo.
DeclarationScope* scope = NewFunctionScope(kind);
+ scope->set_has_checked_syntax(true);
// This bit only needs to be explicitly set because we're
// not passing the ScopeInfo to the Scope constructor.
@@ -1015,8 +1017,8 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
return nullptr;
}
- DeclareVariable(local_name, VariableMode::kConst, kNeedsInitialization,
- position());
+ DeclareUnboundVariable(local_name, VariableMode::kConst,
+ kNeedsInitialization, position());
NamedImport* import =
new (zone()) NamedImport(import_name, local_name, location);
@@ -1065,8 +1067,8 @@ void Parser::ParseImportDeclaration() {
if (tok != Token::MUL && tok != Token::LBRACE) {
import_default_binding = ParseNonRestrictedIdentifier();
import_default_binding_loc = scanner()->location();
- DeclareVariable(import_default_binding, VariableMode::kConst,
- kNeedsInitialization, pos);
+ DeclareUnboundVariable(import_default_binding, VariableMode::kConst,
+ kNeedsInitialization, pos);
}
// Parse NameSpaceImport or NamedImports if present.
@@ -1080,8 +1082,8 @@ void Parser::ParseImportDeclaration() {
ExpectContextualKeyword(ast_value_factory()->as_string());
module_namespace_binding = ParseNonRestrictedIdentifier();
module_namespace_binding_loc = scanner()->location();
- DeclareVariable(module_namespace_binding, VariableMode::kConst,
- kCreatedInitialized, pos);
+ DeclareUnboundVariable(module_namespace_binding, VariableMode::kConst,
+ kCreatedInitialized, pos);
break;
}
@@ -1171,13 +1173,13 @@ Statement* Parser::ParseExportDefault() {
SetFunctionName(value, ast_value_factory()->default_string());
const AstRawString* local_name =
- ast_value_factory()->star_default_star_string();
+ ast_value_factory()->dot_default_string();
local_names.Add(local_name, zone());
// It's fine to declare this as VariableMode::kConst because the user has
// no way of writing to it.
VariableProxy* proxy =
- DeclareVariable(local_name, VariableMode::kConst, pos);
+ DeclareBoundVariable(local_name, VariableMode::kConst, pos);
proxy->var()->set_initializer_position(position());
Assignment* assignment = factory()->NewAssignment(
@@ -1236,7 +1238,8 @@ void Parser::ParseExportStar() {
Scanner::Location export_name_loc = scanner()->location();
const AstRawString* local_name = NextInternalNamespaceExportName();
Scanner::Location local_name_loc = Scanner::Location::invalid();
- DeclareVariable(local_name, VariableMode::kConst, kCreatedInitialized, pos);
+ DeclareUnboundVariable(local_name, VariableMode::kConst, kCreatedInitialized,
+ pos);
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
@@ -1350,28 +1353,42 @@ Statement* Parser::ParseExportDeclaration() {
return result;
}
-VariableProxy* Parser::DeclareVariable(const AstRawString* name,
- VariableMode mode, int pos) {
- return DeclareVariable(name, mode, Variable::DefaultInitializationFlag(mode),
- pos);
+void Parser::DeclareUnboundVariable(const AstRawString* name, VariableMode mode,
+ InitializationFlag init, int pos) {
+ bool was_added;
+ Variable* var = DeclareVariable(name, NORMAL_VARIABLE, mode, init, scope(),
+ &was_added, pos, end_position());
+ // The variable will be added to the declarations list, but since we are not
+ // binding it to anything, we can simply ignore it here.
+ USE(var);
}
-VariableProxy* Parser::DeclareVariable(const AstRawString* name,
- VariableMode mode,
- InitializationFlag init, int pos) {
+VariableProxy* Parser::DeclareBoundVariable(const AstRawString* name,
+ VariableMode mode, int pos) {
DCHECK_NOT_NULL(name);
VariableProxy* proxy =
factory()->NewVariableProxy(name, NORMAL_VARIABLE, position());
bool was_added;
- DeclareVariable(proxy, NORMAL_VARIABLE, mode, init, scope(), &was_added, pos,
- end_position());
+ Variable* var = DeclareVariable(name, NORMAL_VARIABLE, mode,
+ Variable::DefaultInitializationFlag(mode),
+ scope(), &was_added, pos, end_position());
+ proxy->BindTo(var);
return proxy;
}
-void Parser::DeclareVariable(VariableProxy* proxy, VariableKind kind,
- VariableMode mode, InitializationFlag init,
- Scope* scope, bool* was_added, int begin,
- int end) {
+void Parser::DeclareAndBindVariable(VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* scope, bool* was_added, int begin,
+ int end) {
+ Variable* var = DeclareVariable(proxy->raw_name(), kind, mode, init, scope,
+ was_added, begin, end);
+ proxy->BindTo(var);
+}
+
+Variable* Parser::DeclareVariable(const AstRawString* name, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* scope, bool* was_added, int begin,
+ int end) {
Declaration* declaration;
if (mode == VariableMode::kVar && !scope->is_declaration_scope()) {
DCHECK(scope->is_block_scope() || scope->is_with_scope());
@@ -1379,25 +1396,26 @@ void Parser::DeclareVariable(VariableProxy* proxy, VariableKind kind,
} else {
declaration = factory()->NewVariableDeclaration(begin);
}
- return Declare(declaration, proxy, kind, mode, init, scope, was_added, end);
+ Declare(declaration, name, kind, mode, init, scope, was_added, begin, end);
+ return declaration->var();
}
-void Parser::Declare(Declaration* declaration, VariableProxy* proxy,
+void Parser::Declare(Declaration* declaration, const AstRawString* name,
VariableKind variable_kind, VariableMode mode,
InitializationFlag init, Scope* scope, bool* was_added,
- int var_end_pos) {
+ int var_begin_pos, int var_end_pos) {
bool local_ok = true;
bool sloppy_mode_block_scope_function_redefinition = false;
scope->DeclareVariable(
- declaration, proxy, mode, variable_kind, init, was_added,
+ declaration, name, var_begin_pos, mode, variable_kind, init, was_added,
&sloppy_mode_block_scope_function_redefinition, &local_ok);
if (!local_ok) {
// If we only have the start position of a proxy, we can't highlight the
// whole variable name. Pretend its length is 1 so that we highlight at
// least the first character.
- Scanner::Location loc(proxy->position(), var_end_pos != kNoSourcePosition
- ? var_end_pos
- : proxy->position() + 1);
+ Scanner::Location loc(var_begin_pos, var_end_pos != kNoSourcePosition
+ ? var_end_pos
+ : var_begin_pos + 1);
if (variable_kind == PARAMETER_VARIABLE) {
ReportMessageAt(loc, MessageTemplate::kParamDupe);
} else {
@@ -1413,6 +1431,7 @@ Statement* Parser::BuildInitializationBlock(
DeclarationParsingResult* parsing_result) {
ScopedPtrList<Statement> statements(pointer_buffer());
for (const auto& declaration : parsing_result->declarations) {
+ if (!declaration.initializer) continue;
InitializeVariables(&statements, parsing_result->descriptor.kind,
&declaration);
}
@@ -1421,22 +1440,25 @@ Statement* Parser::BuildInitializationBlock(
Statement* Parser::DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int beg_pos, int end_pos,
- bool is_sloppy_block_function,
+ VariableKind kind, int beg_pos, int end_pos,
ZonePtrList<const AstRawString>* names) {
- VariableProxy* proxy =
- factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, beg_pos);
- Declaration* declaration = factory()->NewFunctionDeclaration(
- function, is_sloppy_block_function, beg_pos);
+ Declaration* declaration =
+ factory()->NewFunctionDeclaration(function, beg_pos);
bool was_added;
- Declare(declaration, proxy, NORMAL_VARIABLE, mode, kCreatedInitialized,
- scope(), &was_added);
+ Declare(declaration, variable_name, kind, mode, kCreatedInitialized, scope(),
+ &was_added, beg_pos);
+ if (info()->coverage_enabled()) {
+ // Force the function to be allocated when collecting source coverage, so
+ // that even dead functions get source coverage data.
+ declaration->var()->set_is_used();
+ }
if (names) names->Add(variable_name, zone());
- if (is_sloppy_block_function) {
+ if (kind == SLOPPY_BLOCK_FUNCTION_VARIABLE) {
+ Token::Value init = loop_nesting_depth() > 0 ? Token::ASSIGN : Token::INIT;
SloppyBlockFunctionStatement* statement =
- factory()->NewSloppyBlockFunctionStatement(end_pos);
- GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name, scope(),
- statement);
+ factory()->NewSloppyBlockFunctionStatement(end_pos, declaration->var(),
+ init);
+ GetDeclarationScope()->DeclareSloppyBlockFunction(statement);
return statement;
}
return factory()->EmptyStatement();
@@ -1447,7 +1469,7 @@ Statement* Parser::DeclareClass(const AstRawString* variable_name,
ZonePtrList<const AstRawString>* names,
int class_token_pos, int end_pos) {
VariableProxy* proxy =
- DeclareVariable(variable_name, VariableMode::kLet, class_token_pos);
+ DeclareBoundVariable(variable_name, VariableMode::kLet, class_token_pos);
proxy->var()->set_initializer_position(end_pos);
if (names) names->Add(variable_name, zone());
@@ -1467,7 +1489,7 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = DeclareVariable(name, VariableMode::kVar, pos);
+ VariableProxy* proxy = DeclareBoundVariable(name, VariableMode::kVar, pos);
NativeFunctionLiteral* lit =
factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
return factory()->NewExpressionStatement(
@@ -1549,8 +1571,11 @@ Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
// is_undefined ? this : temp
+ // We don't need to call UseThis() since it's guaranteed to be called
+ // for derived constructors after parsing the constructor in
+ // ParseFunctionBody.
return_value =
- factory()->NewConditional(is_undefined, ThisExpression(pos),
+ factory()->NewConditional(is_undefined, factory()->ThisExpression(),
factory()->NewVariableProxy(temp), pos);
}
return return_value;
@@ -1594,15 +1619,27 @@ Statement* Parser::RewriteSwitchStatement(SwitchStatement* switch_statement,
return switch_block;
}
+void Parser::InitializeVariables(
+ ScopedPtrList<Statement>* statements, VariableKind kind,
+ const DeclarationParsingResult::Declaration* declaration) {
+ if (has_error()) return;
+
+ DCHECK_NOT_NULL(declaration->initializer);
+
+ int pos = declaration->value_beg_pos;
+ if (pos == kNoSourcePosition) {
+ pos = declaration->initializer->position();
+ }
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, declaration->pattern, declaration->initializer, pos);
+ statements->Add(factory()->NewExpressionStatement(assignment, pos));
+}
+
Block* Parser::RewriteCatchPattern(CatchInfo* catch_info) {
DCHECK_NOT_NULL(catch_info->pattern);
- // Initializer position for variables declared by the pattern.
- const int initializer_position = position();
-
DeclarationParsingResult::Declaration decl(
- catch_info->pattern, initializer_position,
- factory()->NewVariableProxy(catch_info->variable));
+ catch_info->pattern, factory()->NewVariableProxy(catch_info->variable));
ScopedPtrList<Statement> init_statements(pointer_buffer());
InitializeVariables(&init_statements, NORMAL_VARIABLE, &decl);
@@ -1789,7 +1826,7 @@ Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
const DeclarationParsingResult::Declaration& decl =
for_info.parsing_result.declarations[0];
if (!IsLexicalVariableMode(for_info.parsing_result.descriptor.mode) &&
- decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ decl.initializer != nullptr && decl.pattern->IsVariableProxy()) {
++use_counts_[v8::Isolate::kForInInitializer];
const AstRawString* name = decl.pattern->AsVariableProxy()->raw_name();
VariableProxy* single_var = NewUnresolved(name);
@@ -1797,7 +1834,7 @@ Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
init_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(Token::ASSIGN, single_var,
- decl.initializer, kNoSourcePosition),
+ decl.initializer, decl.value_beg_pos),
kNoSourcePosition),
zone());
return init_block;
@@ -1827,7 +1864,8 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
for_info->parsing_result.declarations[0];
Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
ScopedPtrList<Statement> each_initialization_statements(pointer_buffer());
- decl.initializer = factory()->NewVariableProxy(temp);
+ DCHECK_IMPLIES(!has_error(), decl.pattern != nullptr);
+ decl.initializer = factory()->NewVariableProxy(temp, for_info->position);
InitializeVariables(&each_initialization_statements, NORMAL_VARIABLE, &decl);
*body_block = factory()->NewBlock(3, false);
@@ -1849,7 +1887,7 @@ Block* Parser::CreateForEachStatementTDZ(Block* init_block,
// TODO(adamk): This needs to be some sort of special
// INTERNAL variable that's invisible to the debugger
// but visible to everything else.
- VariableProxy* tdz_proxy = DeclareVariable(
+ VariableProxy* tdz_proxy = DeclareBoundVariable(
for_info.bound_names[i], VariableMode::kLet, kNoSourcePosition);
tdz_proxy->var()->set_initializer_position(position());
}
@@ -1898,7 +1936,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// }
DCHECK_GT(for_info.bound_names.length(), 0);
- ZonePtrList<Variable> temps(for_info.bound_names.length(), zone());
+ ScopedPtrList<Variable> temps(pointer_buffer());
Block* outer_block =
factory()->NewBlock(for_info.bound_names.length() + 4, false);
@@ -1919,7 +1957,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, kNoSourcePosition);
outer_block->statements()->Add(assignment_statement, zone());
- temps.Add(temp, zone());
+ temps.Add(temp);
}
Variable* first = nullptr;
@@ -1957,14 +1995,14 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Block* ignore_completion_block =
factory()->NewBlock(for_info.bound_names.length() + 3, true);
- ZonePtrList<Variable> inner_vars(for_info.bound_names.length(), zone());
+ ScopedPtrList<Variable> inner_vars(pointer_buffer());
// For each let variable x:
// make statement: let/const x = temp_x.
for (int i = 0; i < for_info.bound_names.length(); i++) {
- VariableProxy* proxy = DeclareVariable(
+ VariableProxy* proxy = DeclareBoundVariable(
for_info.bound_names[i], for_info.parsing_result.descriptor.mode,
kNoSourcePosition);
- inner_vars.Add(proxy->var(), zone());
+ inner_vars.Add(proxy->var());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment = factory()->NewAssignment(
Token::INIT, proxy, temp_proxy, kNoSourcePosition);
@@ -2175,8 +2213,7 @@ void Parser::AddArrowFunctionFormalParameters(
expr = assignment->target();
}
- AddFormalParameter(parameters, expr, initializer,
- end_pos, is_rest);
+ AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
}
void Parser::DeclareArrowFunctionFormalParameters(
@@ -2398,7 +2435,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position());
}
- CheckConflictingVarDeclarations(scope);
FunctionLiteral::ParameterFlag duplicate_parameters =
has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
@@ -2510,65 +2546,6 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
return true;
}
-Statement* Parser::BuildAssertIsCoercible(Variable* var,
- ObjectLiteral* pattern) {
- // if (var === null || var === undefined)
- // throw /* type error kNonCoercible) */;
- auto source_position = pattern->position();
- const AstRawString* property = ast_value_factory()->empty_string();
- MessageTemplate msg = MessageTemplate::kNonCoercible;
- for (ObjectLiteralProperty* literal_property : *pattern->properties()) {
- Expression* key = literal_property->key();
- if (key->IsPropertyName()) {
- property = key->AsLiteral()->AsRawPropertyName();
- msg = MessageTemplate::kNonCoercibleWithProperty;
- source_position = key->position();
- break;
- }
- }
-
- Expression* condition = factory()->NewBinaryOperation(
- Token::OR,
- factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(var),
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
- factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(var),
- factory()->NewNullLiteral(kNoSourcePosition), kNoSourcePosition),
- kNoSourcePosition);
- Expression* throw_type_error =
- NewThrowTypeError(msg, property, source_position);
- IfStatement* if_statement = factory()->NewIfStatement(
- condition,
- factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
- factory()->EmptyStatement(), kNoSourcePosition);
- return if_statement;
-}
-
-class InitializerRewriter final
- : public AstTraversalVisitor<InitializerRewriter> {
- public:
- InitializerRewriter(uintptr_t stack_limit, Expression* root, Parser* parser)
- : AstTraversalVisitor(stack_limit, root), parser_(parser) {}
-
- private:
- // This is required so that the overriden Visit* methods can be
- // called by the base class (template).
- friend class AstTraversalVisitor<InitializerRewriter>;
-
- // Code in function literals does not need to be eagerly rewritten, it will be
- // rewritten when scheduled.
- void VisitFunctionLiteral(FunctionLiteral* expr) {}
-
- Parser* parser_;
-};
-
-void Parser::RewriteParameterInitializer(Expression* expr) {
- if (has_error()) return;
- InitializerRewriter rewriter(stack_limit_, expr, this);
- rewriter.Run();
-}
-
Block* Parser::BuildParameterInitializationBlock(
const ParserFormalParameters& parameters) {
DCHECK(!parameters.is_simple);
@@ -2582,19 +2559,6 @@ Block* Parser::BuildParameterInitializationBlock(
if (parameter->initializer() != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
- if (parameter->initializer()->IsClassLiteral()) {
- // Initializers could have their own scopes. So set the scope
- // here if necessary.
- BlockState block_state(
- &scope_, parameter->initializer()->AsClassLiteral()->scope());
-
- // Ensure initializer is rewritten
- RewriteParameterInitializer(parameter->initializer());
- } else {
- // Ensure initializer is rewritten
- RewriteParameterInitializer(parameter->initializer());
- }
-
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(index)),
@@ -2617,12 +2581,13 @@ Block* Parser::BuildParameterInitializationBlock(
non_simple_param_init_statements.emplace(pointer_buffer());
param_init_statements = &non_simple_param_init_statements.value();
// Rewrite the outer initializer to point to param_scope
+ ReparentExpressionScope(stack_limit(), parameter->pattern, param_scope);
ReparentExpressionScope(stack_limit(), initial_value, param_scope);
}
BlockState block_state(&scope_, param_scope);
- DeclarationParsingResult::Declaration decl(
- parameter->pattern, parameter->initializer_end_position, initial_value);
+ DeclarationParsingResult::Declaration decl(parameter->pattern,
+ initial_value);
InitializeVariables(param_init_statements, PARAMETER_VARIABLE, &decl);
@@ -2787,7 +2752,7 @@ void Parser::DeclareClassVariable(const AstRawString* name,
if (name != nullptr) {
VariableProxy* proxy =
- DeclareVariable(name, VariableMode::kConst, class_token_pos);
+ DeclareBoundVariable(name, VariableMode::kConst, class_token_pos);
class_info->variable = proxy->var();
}
}
@@ -2797,7 +2762,7 @@ void Parser::DeclareClassVariable(const AstRawString* name,
// index in the AST, instead of storing the variable.
Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
VariableProxy* proxy =
- DeclareVariable(name, VariableMode::kConst, kNoSourcePosition);
+ DeclareBoundVariable(name, VariableMode::kConst, kNoSourcePosition);
proxy->var()->ForceContextAllocation();
return proxy->var();
}
@@ -2923,21 +2888,6 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
return class_literal;
}
-void Parser::CheckConflictingVarDeclarations(Scope* scope) {
- if (has_error()) return;
- Declaration* decl = scope->CheckConflictingVarDeclarations();
- if (decl != nullptr) {
- // In ES6, conflicting variable bindings are early errors.
- const AstRawString* name = decl->var()->raw_name();
- int position = decl->position();
- Scanner::Location location =
- position == kNoSourcePosition
- ? Scanner::Location::invalid()
- : Scanner::Location(position, position + 1);
- ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
- }
-}
-
bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
if (!expression->IsProperty()) return false;
Property* property = expression->AsProperty();
@@ -3101,13 +3051,11 @@ void Parser::AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
}
}
-
void Parser::AddTemplateExpression(TemplateLiteralState* state,
Expression* expression) {
(*state)->AddExpression(expression, zone());
}
-
Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag) {
TemplateLiteral* lit = *state;
@@ -3177,7 +3125,7 @@ Expression* Parser::SpreadCall(Expression* function,
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
- Expression* home = ThisExpression(kNoSourcePosition);
+ Expression* home = ThisExpression();
args.Add(function);
args.Add(home);
} else {
@@ -3214,7 +3162,6 @@ Expression* Parser::SpreadCallNew(Expression* function,
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
-
void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
v8::Isolate::UseCounterFeature feature;
if (is_sloppy(mode))
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index cc0ceb2607..9bf412f236 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -341,8 +341,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int beg_pos, int end_pos,
- bool is_sloppy_block_function,
+ VariableKind kind, int beg_pos, int end_pos,
ZonePtrList<const AstRawString>* names);
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
FunctionLiteral* CreateInitializerFunction(
@@ -374,6 +373,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scope* NewHiddenCatchScope();
+ bool HasCheckedSyntax() {
+ return scope()->GetDeclarationScope()->has_checked_syntax();
+ }
+
// PatternRewriter and associated methods defined in pattern-rewriter.cc.
friend class PatternRewriter;
void InitializeVariables(
@@ -401,17 +404,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return object_literal;
}
- // Check if the scope has conflicting var/let declarations from different
- // scopes. This covers for example
- //
- // function f() { { { var x; } let x; } }
- // function g() { { var x; let x; } }
- //
- // The var declarations are hoisted to the function scope, but originate from
- // a scope where the name has also been let bound or the var declaration is
- // hoisted over such a scope.
- void CheckConflictingVarDeclarations(Scope* scope);
-
bool IsPropertyWithPrivateFieldKey(Expression* property);
// Insert initializer statements for var-bindings shadowing parameter bindings
@@ -421,25 +413,29 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
- VariableProxy* DeclareVariable(const AstRawString* name, VariableMode mode,
- int pos);
- VariableProxy* DeclareVariable(const AstRawString* name, VariableMode mode,
- InitializationFlag init, int pos);
- void DeclareVariable(VariableProxy* proxy, VariableKind kind,
- VariableMode mode, InitializationFlag init,
- Scope* declaration_scope, bool* added, int begin,
- int end = kNoSourcePosition);
- void Declare(Declaration* declaration, VariableProxy* proxy,
+ void DeclareUnboundVariable(const AstRawString* name, VariableMode mode,
+ InitializationFlag init, int pos);
+ V8_WARN_UNUSED_RESULT
+ VariableProxy* DeclareBoundVariable(const AstRawString* name,
+ VariableMode mode, int pos);
+ void DeclareAndBindVariable(VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* declaration_scope, bool* was_added,
+ int begin, int end = kNoSourcePosition);
+ V8_WARN_UNUSED_RESULT
+ Variable* DeclareVariable(const AstRawString* name, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* declaration_scope, bool* was_added,
+ int begin, int end = kNoSourcePosition);
+ void Declare(Declaration* declaration, const AstRawString* name,
VariableKind kind, VariableMode mode, InitializationFlag init,
- Scope* declaration_scope, bool* added,
+ Scope* declaration_scope, bool* was_added, int var_begin_pos,
int var_end_pos = kNoSourcePosition);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label);
IterationStatement* LookupContinueTarget(const AstRawString* label);
- Statement* BuildAssertIsCoercible(Variable* var, ObjectLiteral* pattern);
-
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
int pos, int end_pos);
@@ -530,9 +526,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* RewriteSpreads(ArrayLiteral* lit);
- friend class InitializerRewriter;
- void RewriteParameterInitializer(Expression* expr);
-
Expression* BuildInitialYield(int pos, FunctionKind kind);
Assignment* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
@@ -541,8 +534,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
MessageTemplate message, const AstRawString* arg,
int pos);
- Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
- IteratorType type, int pos);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
void RewriteAsyncFunctionBody(ScopedPtrList<Statement>* body, Block* block,
@@ -574,8 +565,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE static bool IsThisProperty(Expression* expression) {
DCHECK_NOT_NULL(expression);
Property* property = expression->AsProperty();
- return property != nullptr && property->obj()->IsVariableProxy() &&
- property->obj()->AsVariableProxy()->is_this();
+ return property != nullptr && property->obj()->IsThisExpression();
}
// This returns true if the expression is an indentifier (wrapped
@@ -583,8 +573,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// has been converted to a variable proxy.
V8_INLINE static bool IsIdentifier(Expression* expression) {
VariableProxy* operand = expression->AsVariableProxy();
- return operand != nullptr && !operand->is_this() &&
- !operand->is_new_target();
+ return operand != nullptr && !operand->is_new_target();
}
V8_INLINE static const AstRawString* AsIdentifier(Expression* expression) {
@@ -633,11 +622,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return arg == nullptr || literal->AsRawString() == arg;
}
- V8_INLINE void GetDefaultStrings(
- const AstRawString** default_string,
- const AstRawString** star_default_star_string) {
+ V8_INLINE void GetDefaultStrings(const AstRawString** default_string,
+ const AstRawString** dot_default_string) {
*default_string = ast_value_factory()->default_string();
- *star_default_star_string = ast_value_factory()->star_default_star_string();
+ *dot_default_string = ast_value_factory()->dot_default_string();
}
// Functions for encapsulating the differences between parsing and preparsing;
@@ -654,7 +642,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
if (expression->IsPropertyName()) {
fni_.PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
} else {
- fni_.PushLiteralName(ast_value_factory()->anonymous_function_string());
+ fni_.PushLiteralName(ast_value_factory()->computed_string());
}
}
@@ -788,6 +776,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return result;
}
+ V8_INLINE const AstRawString* GetIdentifier() const { return GetSymbol(); }
+
V8_INLINE const AstRawString* GetNextSymbol() const {
return scanner()->NextSymbol(ast_value_factory());
}
@@ -799,9 +789,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return ast_value_factory()->GetOneByteString(string);
}
- V8_INLINE Expression* ThisExpression(int pos = kNoSourcePosition) {
- return NewUnresolved(ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
+ class ThisExpression* ThisExpression() {
+ UseThis();
+ return factory()->ThisExpression();
}
Expression* NewSuperPropertyReference(int pos);
@@ -820,6 +810,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return expression_scope()->NewVariable(name, start_position);
}
+ V8_INLINE void DeclareIdentifier(const AstRawString* name,
+ int start_position) {
+ expression_scope()->Declare(name, start_position);
+ }
+
V8_INLINE Variable* DeclareCatchVariableName(Scope* scope,
const AstRawString* name) {
return scope->DeclareCatchVariableName(name);
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index 7ff080b2f9..eb17d17793 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -12,6 +12,7 @@
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/vm-state-inl.h"
+#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
deleted file mode 100644
index 0ef570ee52..0000000000
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
-#include "src/parsing/expression-scope-reparenter.h"
-#include "src/parsing/parser.h"
-
-namespace v8 {
-
-namespace internal {
-
-// An AST visitor which performs declaration and assignment related tasks,
-// particularly for destructuring patterns:
-//
-// 1. Declares variables from variable proxies (particularly for destructuring
-// declarations),
-// 2. Marks destructuring-assigned variable proxies as assigned, and
-// 3. Rewrites scopes for parameters containing a sloppy eval.
-//
-// Historically this also rewrote destructuring assignments/declarations as a
-// block of multiple assignments, hence the named, however this is now done
-// during bytecode generation.
-//
-// TODO(leszeks): Rename or remove this class
-class PatternRewriter final : public AstVisitor<PatternRewriter> {
- public:
- typedef Parser::DeclarationDescriptor DeclarationDescriptor;
-
- static void InitializeVariables(
- Parser* parser, VariableKind kind,
- const Parser::DeclarationParsingResult::Declaration* declaration);
-
- private:
- PatternRewriter(Parser* parser, VariableKind kind, int initializer_position,
- bool declares_parameter_containing_sloppy_eval)
- : parser_(parser),
- initializer_position_(initializer_position),
- declares_parameter_containing_sloppy_eval_(
- declares_parameter_containing_sloppy_eval) {}
-
-#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node);
- // Visiting functions for AST nodes make this an AstVisitor.
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- Expression* Visit(Assignment* assign) {
- if (parser_->has_error()) return parser_->FailureExpression();
- DCHECK_EQ(Token::ASSIGN, assign->op());
-
- Expression* pattern = assign->target();
- if (pattern->IsObjectLiteral()) {
- VisitObjectLiteral(pattern->AsObjectLiteral());
- } else {
- DCHECK(pattern->IsArrayLiteral());
- VisitArrayLiteral(pattern->AsArrayLiteral());
- }
- return assign;
- }
-
- void RewriteParameterScopes(Expression* expr);
-
- Scope* scope() const { return parser_->scope(); }
-
- Parser* const parser_;
- const int initializer_position_;
- const bool declares_parameter_containing_sloppy_eval_;
-
- DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
-};
-
-void Parser::InitializeVariables(
- ScopedPtrList<Statement>* statements, VariableKind kind,
- const DeclarationParsingResult::Declaration* declaration) {
- if (has_error()) return;
-
- if (!declaration->initializer) {
- // The parameter scope is only a block scope if the initializer calls sloppy
- // eval. Since there is no initializer, we can't be calling sloppy eval.
- DCHECK_IMPLIES(kind == PARAMETER_VARIABLE, scope()->is_function_scope());
- return;
- }
-
- PatternRewriter::InitializeVariables(this, kind, declaration);
- int pos = declaration->value_beg_position;
- if (pos == kNoSourcePosition) {
- pos = declaration->initializer_position;
- }
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, declaration->pattern, declaration->initializer, pos);
- statements->Add(factory()->NewExpressionStatement(assignment, pos));
-}
-
-void PatternRewriter::InitializeVariables(
- Parser* parser, VariableKind kind,
- const Parser::DeclarationParsingResult::Declaration* declaration) {
- PatternRewriter rewriter(
- parser, kind, declaration->initializer_position,
- kind == PARAMETER_VARIABLE && parser->scope()->is_block_scope());
-
- rewriter.Visit(declaration->pattern);
-}
-
-void PatternRewriter::VisitVariableProxy(VariableProxy* proxy) {
- DCHECK(!parser_->has_error());
- Variable* var =
- proxy->is_resolved()
- ? proxy->var()
- : scope()->GetDeclarationScope()->LookupLocal(proxy->raw_name());
-
- DCHECK_NOT_NULL(var);
-
- DCHECK_NE(initializer_position_, kNoSourcePosition);
- var->set_initializer_position(initializer_position_);
-}
-
-// When an extra declaration scope needs to be inserted to account for
-// a sloppy eval in a default parameter or function body, the expressions
-// needs to be in that new inner scope which was added after initial
-// parsing.
-void PatternRewriter::RewriteParameterScopes(Expression* expr) {
- if (declares_parameter_containing_sloppy_eval_) {
- ReparentExpressionScope(parser_->stack_limit(), expr, scope());
- }
-}
-
-void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
- for (ObjectLiteralProperty* property : *pattern->properties()) {
- Expression* key = property->key();
- if (!key->IsLiteral()) {
- // Computed property names contain expressions which might require
- // scope rewriting.
- RewriteParameterScopes(key);
- }
- Visit(property->value());
- }
-}
-
-void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- for (Expression* value : *node->values()) {
- if (value->IsTheHoleLiteral()) continue;
- Visit(value);
- }
-}
-
-void PatternRewriter::VisitAssignment(Assignment* node) {
- DCHECK_EQ(Token::ASSIGN, node->op());
-
- // Initializer may have been parsed in the wrong scope.
- RewriteParameterScopes(node->value());
-
- Visit(node->target());
-}
-
-void PatternRewriter::VisitSpread(Spread* node) { Visit(node->expression()); }
-
-// =============== UNREACHABLE =============================
-
-#define NOT_A_PATTERN(Node) \
- void PatternRewriter::Visit##Node(v8::internal::Node*) { UNREACHABLE(); }
-
-NOT_A_PATTERN(BinaryOperation)
-NOT_A_PATTERN(NaryOperation)
-NOT_A_PATTERN(Block)
-NOT_A_PATTERN(BreakStatement)
-NOT_A_PATTERN(Call)
-NOT_A_PATTERN(CallNew)
-NOT_A_PATTERN(CallRuntime)
-NOT_A_PATTERN(ClassLiteral)
-NOT_A_PATTERN(CompareOperation)
-NOT_A_PATTERN(CompoundAssignment)
-NOT_A_PATTERN(Conditional)
-NOT_A_PATTERN(ContinueStatement)
-NOT_A_PATTERN(CountOperation)
-NOT_A_PATTERN(DebuggerStatement)
-NOT_A_PATTERN(DoExpression)
-NOT_A_PATTERN(DoWhileStatement)
-NOT_A_PATTERN(EmptyStatement)
-NOT_A_PATTERN(EmptyParentheses)
-NOT_A_PATTERN(ExpressionStatement)
-NOT_A_PATTERN(ForInStatement)
-NOT_A_PATTERN(ForOfStatement)
-NOT_A_PATTERN(ForStatement)
-NOT_A_PATTERN(FunctionDeclaration)
-NOT_A_PATTERN(FunctionLiteral)
-NOT_A_PATTERN(GetTemplateObject)
-NOT_A_PATTERN(IfStatement)
-NOT_A_PATTERN(ImportCallExpression)
-NOT_A_PATTERN(Literal)
-NOT_A_PATTERN(NativeFunctionLiteral)
-NOT_A_PATTERN(Property)
-NOT_A_PATTERN(RegExpLiteral)
-NOT_A_PATTERN(ResolvedProperty)
-NOT_A_PATTERN(ReturnStatement)
-NOT_A_PATTERN(SloppyBlockFunctionStatement)
-NOT_A_PATTERN(StoreInArrayLiteral)
-NOT_A_PATTERN(SuperPropertyReference)
-NOT_A_PATTERN(SuperCallReference)
-NOT_A_PATTERN(SwitchStatement)
-NOT_A_PATTERN(TemplateLiteral)
-NOT_A_PATTERN(ThisFunction)
-NOT_A_PATTERN(Throw)
-NOT_A_PATTERN(TryCatchStatement)
-NOT_A_PATTERN(TryFinallyStatement)
-NOT_A_PATTERN(UnaryOperation)
-NOT_A_PATTERN(VariableDeclaration)
-NOT_A_PATTERN(WhileStatement)
-NOT_A_PATTERN(WithStatement)
-NOT_A_PATTERN(Yield)
-NOT_A_PATTERN(YieldStar)
-NOT_A_PATTERN(Await)
-NOT_A_PATTERN(InitializeClassMembersStatement)
-
-#undef NOT_A_PATTERN
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index 7d1f0feed8..27b95bb6ce 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -59,7 +59,7 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
private:
ByteData* consumed_data_;
- DISALLOW_HEAP_ALLOCATION(no_gc);
+ DISALLOW_HEAP_ALLOCATION(no_gc)
};
void SetPosition(int position) {
@@ -92,8 +92,8 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
}
int32_t ReadVarint32() {
- DCHECK(HasRemainingBytes(kVarintMinSize));
- DCHECK_EQ(data_.get(index_++), kVarintMinSize);
+ DCHECK(HasRemainingBytes(kVarint32MinSize));
+ DCHECK_EQ(data_.get(index_++), kVarint32MinSize);
int32_t value = 0;
bool has_another_byte;
unsigned shift = 0;
@@ -103,7 +103,7 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
shift += 7;
has_another_byte = byte & 0x80;
} while (has_another_byte);
- DCHECK_EQ(data_.get(index_++), kVarintEndMarker);
+ DCHECK_EQ(data_.get(index_++), kVarint32EndMarker);
stored_quarters_ = 0;
return value;
}
@@ -213,6 +213,12 @@ class ZonePreparseData : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ZonePreparseData);
};
+ZonePreparseData* PreparseDataBuilder::ByteData::CopyToZone(
+ Zone* zone, int children_length) {
+ DCHECK(is_finalized_);
+ return new (zone) ZonePreparseData(zone, &zone_byte_data_, children_length);
+}
+
// Implementation of ConsumedPreparseData for PreparseData
// serialized into zone memory.
class ZoneConsumedPreparseData final
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 68986e451a..a38d280ce4 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -14,6 +14,7 @@
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data-impl.h"
#include "src/parsing/preparser.h"
+#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
namespace internal {
@@ -84,10 +85,11 @@ STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
*/
PreparseDataBuilder::PreparseDataBuilder(Zone* zone,
- PreparseDataBuilder* parent_builder)
+ PreparseDataBuilder* parent_builder,
+ std::vector<void*>* children_buffer)
: parent_(parent_builder),
byte_data_(),
- children_(zone),
+ children_buffer_(children_buffer),
function_scope_(nullptr),
num_inner_functions_(0),
num_inner_with_data_(0),
@@ -98,56 +100,81 @@ void PreparseDataBuilder::DataGatheringScope::Start(
DeclarationScope* function_scope) {
Zone* main_zone = preparser_->main_zone();
builder_ = new (main_zone)
- PreparseDataBuilder(main_zone, preparser_->preparse_data_builder());
+ PreparseDataBuilder(main_zone, preparser_->preparse_data_builder(),
+ preparser_->preparse_data_builder_buffer());
preparser_->set_preparse_data_builder(builder_);
function_scope->set_preparse_data_builder(builder_);
}
-PreparseDataBuilder::DataGatheringScope::~DataGatheringScope() {
- if (builder_ == nullptr) return;
- // Copy over the data from the buffer into the zone-allocated byte_data_
+void PreparseDataBuilder::DataGatheringScope::Close() {
PreparseDataBuilder* parent = builder_->parent_;
- if (parent != nullptr && builder_->HasDataForParent()) {
- parent->children_.push_back(builder_);
- }
preparser_->set_preparse_data_builder(parent);
+ builder_->FinalizeChildren(preparser_->main_zone());
+
+ if (parent == nullptr) return;
+ if (!builder_->HasDataForParent()) return;
+ parent->AddChild(builder_);
+}
+
+void PreparseDataBuilder::ByteData::Start(std::vector<uint8_t>* buffer) {
+ DCHECK(!is_finalized_);
+ byte_data_ = buffer;
+ DCHECK_EQ(byte_data_->size(), 0);
+ DCHECK_EQ(index_, 0);
+}
+
+void PreparseDataBuilder::ByteData::Finalize(Zone* zone) {
+ uint8_t* raw_zone_data =
+ static_cast<uint8_t*>(ZoneAllocationPolicy(zone).New(index_));
+ memcpy(raw_zone_data, byte_data_->data(), index_);
+ byte_data_->resize(0);
+ zone_byte_data_ = Vector<uint8_t>(raw_zone_data, index_);
+#ifdef DEBUG
+ is_finalized_ = true;
+#endif
+}
+
+void PreparseDataBuilder::ByteData::Reserve(size_t bytes) {
+ // Make sure we have at least {bytes} capacity left in the buffer_.
+ DCHECK_LE(length(), byte_data_->size());
+ size_t capacity = byte_data_->size() - length();
+ if (capacity >= bytes) return;
+ size_t delta = bytes - capacity;
+ byte_data_->insert(byte_data_->end(), delta, 0);
+}
+
+int PreparseDataBuilder::ByteData::length() const { return index_; }
+
+void PreparseDataBuilder::ByteData::Add(uint8_t byte) {
+ DCHECK_LE(0, index_);
+ DCHECK_LT(index_, byte_data_->size());
+ (*byte_data_)[index_++] = byte;
}
#ifdef DEBUG
void PreparseDataBuilder::ByteData::WriteUint32(uint32_t data) {
DCHECK(!is_finalized_);
- byte_data_->push_back(kUint32Size);
- byte_data_->push_back(data & 0xFF);
- byte_data_->push_back((data >> 8) & 0xFF);
- byte_data_->push_back((data >> 16) & 0xFF);
- byte_data_->push_back((data >> 24) & 0xFF);
+ Add(kUint32Size);
+ Add(data & 0xFF);
+ Add((data >> 8) & 0xFF);
+ Add((data >> 16) & 0xFF);
+ Add((data >> 24) & 0xFF);
free_quarters_in_last_byte_ = 0;
}
void PreparseDataBuilder::ByteData::SaveCurrentSizeAtFirstUint32() {
- CHECK(!is_finalized_);
- uint32_t data = static_cast<uint32_t>(byte_data_->size());
- uint8_t* start = &byte_data_->front();
- int i = 0;
- // Check that that position already holds an item of the expected size.
- CHECK_GE(byte_data_->size(), kUint32Size);
- CHECK_EQ(start[i++], kUint32Size);
- start[i++] = data & 0xFF;
- start[i++] = (data >> 8) & 0xFF;
- start[i++] = (data >> 16) & 0xFF;
- start[i++] = (data >> 24) & 0xFF;
-}
-
-int PreparseDataBuilder::ByteData::length() const {
- CHECK(!is_finalized_);
- return static_cast<int>(byte_data_->size());
+ int current_length = length();
+ index_ = 0;
+ CHECK_EQ(byte_data_->at(0), kUint32Size);
+ WriteUint32(current_length);
+ index_ = current_length;
}
#endif
void PreparseDataBuilder::ByteData::WriteVarint32(uint32_t data) {
#ifdef DEBUG
// Save expected item size in debug mode.
- byte_data_->push_back(kVarintMinSize);
+ Add(kVarint32MinSize);
#endif
// See ValueSerializer::WriteVarint.
do {
@@ -155,11 +182,10 @@ void PreparseDataBuilder::ByteData::WriteVarint32(uint32_t data) {
data >>= 7;
// Add continue bit.
if (data) next_byte |= 0x80;
- byte_data_->push_back(next_byte & 0xFF);
+ Add(next_byte & 0xFF);
} while (data);
#ifdef DEBUG
- // Save a varint marker in debug mode.
- byte_data_->push_back(kVarintEndMarker);
+ Add(kVarint32EndMarker);
#endif
free_quarters_in_last_byte_ = 0;
}
@@ -168,9 +194,9 @@ void PreparseDataBuilder::ByteData::WriteUint8(uint8_t data) {
DCHECK(!is_finalized_);
#ifdef DEBUG
// Save expected item size in debug mode.
- byte_data_->push_back(kUint8Size);
+ Add(kUint8Size);
#endif
- byte_data_->push_back(data);
+ Add(data);
free_quarters_in_last_byte_ = 0;
}
@@ -180,37 +206,17 @@ void PreparseDataBuilder::ByteData::WriteQuarter(uint8_t data) {
if (free_quarters_in_last_byte_ == 0) {
#ifdef DEBUG
// Save a marker in debug mode.
- byte_data_->push_back(kQuarterMarker);
+ Add(kQuarterMarker);
#endif
- byte_data_->push_back(0);
+ Add(0);
free_quarters_in_last_byte_ = 3;
} else {
--free_quarters_in_last_byte_;
}
uint8_t shift_amount = free_quarters_in_last_byte_ * 2;
- DCHECK_EQ(byte_data_->back() & (3 << shift_amount), 0);
- byte_data_->back() |= (data << shift_amount);
-}
-
-void PreparseDataBuilder::ByteData::Start(std::vector<uint8_t>* buffer) {
- DCHECK(!is_finalized_);
- byte_data_ = buffer;
- DCHECK_EQ(byte_data_->size(), 0);
-}
-
-void PreparseDataBuilder::ByteData::Finalize(Zone* zone) {
- int size = static_cast<int>(byte_data_->size());
- uint8_t* raw_zone_data =
- static_cast<uint8_t*>(ZoneAllocationPolicy(zone).New(size));
- memcpy(raw_zone_data, &byte_data_->front(), size);
-
- byte_data_->resize(0);
-
- zone_byte_data_ = Vector<uint8_t>(raw_zone_data, size);
-#ifdef DEBUG
- is_finalized_ = true;
-#endif
+ DCHECK_EQ(byte_data_->at(index_ - 1) & (3 << shift_amount), 0);
+ (*byte_data_)[index_ - 1] |= (data << shift_amount);
}
void PreparseDataBuilder::DataGatheringScope::SetSkippableFunction(
@@ -232,8 +238,23 @@ bool PreparseDataBuilder::HasDataForParent() const {
return HasData() || function_scope_ != nullptr;
}
+void PreparseDataBuilder::AddChild(PreparseDataBuilder* child) {
+ DCHECK(!finalized_children_);
+ children_buffer_.Add(child);
+}
+
+void PreparseDataBuilder::FinalizeChildren(Zone* zone) {
+ DCHECK(!finalized_children_);
+ Vector<PreparseDataBuilder*> children = children_buffer_.CopyTo(zone);
+ children_buffer_.Rewind();
+ children_ = children;
+#ifdef DEBUG
+ finalized_children_ = true;
+#endif
+}
+
bool PreparseDataBuilder::ScopeNeedsData(Scope* scope) {
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ if (scope->is_function_scope()) {
// Default constructors don't need data (they cannot contain inner functions
// defined by the user). Other functions do.
return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
@@ -282,9 +303,11 @@ void PreparseDataBuilder::SaveScopeAllocationData(DeclarationScope* scope,
#ifdef DEBUG
// Reserve Uint32 for scope_data_start debug info.
+ byte_data_.Reserve(kUint32Size);
byte_data_.WriteUint32(0);
#endif
-
+ byte_data_.Reserve(children_.size() * kSkippableFunctionMaxDataSize);
+ DCHECK(finalized_children_);
for (const auto& builder : children_) {
// Keep track of functions with inner data. {children_} contains also the
// builders that have no inner functions at all.
@@ -301,6 +324,7 @@ void PreparseDataBuilder::SaveScopeAllocationData(DeclarationScope* scope,
byte_data_.SaveCurrentSizeAtFirstUint32();
// For a data integrity check, write a value between data about skipped inner
// funcs and data about variables.
+ byte_data_.Reserve(kUint32Size * 3);
byte_data_.WriteUint32(kMagicValue);
byte_data_.WriteUint32(scope->start_position());
byte_data_.WriteUint32(scope->end_position());
@@ -316,6 +340,7 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
DCHECK(ScopeNeedsData(scope));
#ifdef DEBUG
+ byte_data_.Reserve(kUint8Size);
byte_data_.WriteUint8(scope->scope_type());
#endif
@@ -324,9 +349,10 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->calls_sloppy_eval()) |
InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
+ byte_data_.Reserve(kUint8Size);
byte_data_.WriteUint8(eval);
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ if (scope->is_function_scope()) {
Variable* function = scope->AsDeclarationScope()->function_var();
if (function != nullptr) SaveDataForVariable(function);
}
@@ -343,6 +369,7 @@ void PreparseDataBuilder::SaveDataForVariable(Variable* var) {
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
const AstRawString* name = var->raw_name();
+ byte_data_.Reserve(kUint32Size + (name->length() + 1) * kUint8Size);
byte_data_.WriteUint8(name->is_one_byte());
byte_data_.WriteUint32(name->length());
for (int i = 0; i < name->length(); ++i) {
@@ -354,6 +381,7 @@ void PreparseDataBuilder::SaveDataForVariable(Variable* var) {
var->maybe_assigned() == kMaybeAssigned) |
VariableContextAllocatedField::encode(
var->has_forced_context_allocation());
+ byte_data_.Reserve(kUint8Size);
byte_data_.WriteQuarter(variable_data);
}
@@ -363,7 +391,7 @@ void PreparseDataBuilder::SaveDataForInnerScopes(Scope* scope) {
// want to recurse here.
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
- if (ScopeIsSkippableFunctionScope(inner)) {
+ if (inner->IsSkippableFunctionScope()) {
// Don't save data about function scopes, since they'll have their own
// PreparseDataBuilder where their data is saved.
DCHECK_NOT_NULL(inner->AsDeclarationScope()->preparse_data_builder());
@@ -374,17 +402,6 @@ void PreparseDataBuilder::SaveDataForInnerScopes(Scope* scope) {
}
}
-bool PreparseDataBuilder::ScopeIsSkippableFunctionScope(Scope* scope) {
- // Lazy non-arrow function scopes are skippable. Lazy functions are exactly
- // those Scopes which have their own PreparseDataBuilder object. This
- // logic ensures that the scope allocation data is consistent with the
- // skippable function data (both agree on where the lazy function boundaries
- // are).
- if (scope->scope_type() != ScopeType::FUNCTION_SCOPE) return false;
- DeclarationScope* declaration_scope = scope->AsDeclarationScope();
- return !declaration_scope->is_arrow_scope() &&
- declaration_scope->preparse_data_builder() != nullptr;
-}
Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToHeap(
Isolate* isolate, int children_length) {
@@ -396,18 +413,13 @@ Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToHeap(
return data;
}
-ZonePreparseData* PreparseDataBuilder::ByteData::CopyToZone(
- Zone* zone, int children_length) {
- DCHECK(is_finalized_);
- return new (zone) ZonePreparseData(zone, &zone_byte_data_, children_length);
-}
-
Handle<PreparseData> PreparseDataBuilder::Serialize(Isolate* isolate) {
DCHECK(HasData());
DCHECK(!ThisOrParentBailedOut());
Handle<PreparseData> data =
byte_data_.CopyToHeap(isolate, num_inner_with_data_);
int i = 0;
+ DCHECK(finalized_children_);
for (const auto& builder : children_) {
if (!builder->HasData()) continue;
Handle<PreparseData> child_data = builder->Serialize(isolate);
@@ -422,6 +434,7 @@ ZonePreparseData* PreparseDataBuilder::Serialize(Zone* zone) {
DCHECK(!ThisOrParentBailedOut());
ZonePreparseData* data = byte_data_.CopyToZone(zone, num_inner_with_data_);
int i = 0;
+ DCHECK(finalized_children_);
for (const auto& builder : children_) {
if (!builder->HasData()) continue;
ZonePreparseData* child = builder->Serialize(zone);
@@ -444,7 +457,7 @@ class BuilderProducedPreparseData final : public ProducedPreparseData {
ZonePreparseData* Serialize(Zone* zone) final {
return builder_->Serialize(zone);
- };
+ }
private:
PreparseDataBuilder* builder_;
@@ -463,7 +476,7 @@ class OnHeapProducedPreparseData final : public ProducedPreparseData {
ZonePreparseData* Serialize(Zone* zone) final {
// Not required.
UNREACHABLE();
- };
+ }
private:
Handle<PreparseData> data_;
@@ -477,7 +490,7 @@ class ZoneProducedPreparseData final : public ProducedPreparseData {
return data_->Serialize(isolate);
}
- ZonePreparseData* Serialize(Zone* zone) final { return data_; };
+ ZonePreparseData* Serialize(Zone* zone) final { return data_; }
private:
ZonePreparseData* data_;
@@ -575,7 +588,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
if (ScopeCallsSloppyEvalField::decode(eval)) scope->RecordEvalCall();
if (InnerScopeCallsEvalField::decode(eval)) scope->RecordInnerScopeEvalCall();
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ if (scope->is_function_scope()) {
Variable* function = scope->AsDeclarationScope()->function_var();
if (function != nullptr) RestoreDataForVariable(function);
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 0e08297c36..ea9bded9b9 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -8,6 +8,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/maybe-handles.h"
+#include "src/vector.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone-containers.h"
@@ -68,20 +69,24 @@ struct PreparseByteDataConstants {
static constexpr int kMagicValue = 0xC0DE0DE;
static constexpr size_t kUint32Size = 5;
- static constexpr size_t kVarintMinSize = 3;
- static constexpr size_t kVarintEndMarker = 0xF1;
+ static constexpr size_t kVarint32MinSize = 3;
+ static constexpr size_t kVarint32MaxSize = 7;
+ static constexpr size_t kVarint32EndMarker = 0xF1;
static constexpr size_t kUint8Size = 2;
static constexpr size_t kQuarterMarker = 0xF2;
static constexpr size_t kPlaceholderSize = kUint32Size;
#else
static constexpr size_t kUint32Size = 4;
- static constexpr size_t kVarintMinSize = 1;
+ static constexpr size_t kVarint32MinSize = 1;
+ static constexpr size_t kVarint32MaxSize = 5;
static constexpr size_t kUint8Size = 1;
static constexpr size_t kPlaceholderSize = 0;
#endif
static const size_t kSkippableFunctionMinDataSize =
- 4 * kVarintMinSize + 1 * kUint8Size;
+ 4 * kVarint32MinSize + 1 * kUint8Size;
+ static const size_t kSkippableFunctionMaxDataSize =
+ 4 * kVarint32MaxSize + 1 * kUint8Size;
};
class PreparseDataBuilder : public ZoneObject,
@@ -89,7 +94,9 @@ class PreparseDataBuilder : public ZoneObject,
public:
// Create a PreparseDataBuilder object which will collect data as we
// parse.
- explicit PreparseDataBuilder(Zone* zone, PreparseDataBuilder* parent_builder);
+ explicit PreparseDataBuilder(Zone* zone, PreparseDataBuilder* parent_builder,
+ std::vector<void*>* children_buffer);
+ ~PreparseDataBuilder() {}
PreparseDataBuilder* parent() const { return parent_; }
@@ -104,9 +111,14 @@ class PreparseDataBuilder : public ZoneObject,
void Start(DeclarationScope* function_scope);
void SetSkippableFunction(DeclarationScope* function_scope,
int num_inner_functions);
- ~DataGatheringScope();
+ inline ~DataGatheringScope() {
+ if (builder_ == nullptr) return;
+ Close();
+ }
private:
+ void Close();
+
PreParser* preparser_;
PreparseDataBuilder* builder_;
@@ -115,7 +127,8 @@ class PreparseDataBuilder : public ZoneObject,
class ByteData : public ZoneObject, public PreparseByteDataConstants {
public:
- ByteData() : byte_data_(nullptr), free_quarters_in_last_byte_(0) {}
+ ByteData()
+ : byte_data_(nullptr), index_(0), free_quarters_in_last_byte_(0) {}
~ByteData() {}
@@ -123,7 +136,11 @@ class PreparseDataBuilder : public ZoneObject,
void Finalize(Zone* zone);
Handle<PreparseData> CopyToHeap(Isolate* isolate, int children_length);
- ZonePreparseData* CopyToZone(Zone* zone, int children_length);
+ inline ZonePreparseData* CopyToZone(Zone* zone, int children_length);
+
+ void Reserve(size_t bytes);
+ void Add(uint8_t byte);
+ int length() const;
void WriteVarint32(uint32_t data);
void WriteUint8(uint8_t data);
@@ -133,13 +150,15 @@ class PreparseDataBuilder : public ZoneObject,
void WriteUint32(uint32_t data);
// For overwriting previously written data at position 0.
void SaveCurrentSizeAtFirstUint32();
- int length() const;
#endif
private:
union {
- // Only used during construction (is_finalized_ == false).
- std::vector<uint8_t>* byte_data_;
+ struct {
+ // Only used during construction (is_finalized_ == false).
+ std::vector<uint8_t>* byte_data_;
+ int index_;
+ };
// Once the data is finalized, it lives in a Zone, this implies
// is_finalized_ == true.
Vector<uint8_t> zone_byte_data_;
@@ -180,7 +199,6 @@ class PreparseDataBuilder : public ZoneObject,
bool HasDataForParent() const;
static bool ScopeNeedsData(Scope* scope);
- static bool ScopeIsSkippableFunctionScope(Scope* scope);
void AddSkippableFunction(int start_position, int end_position,
int num_parameters, int num_inner_functions,
LanguageMode language_mode, bool has_data,
@@ -192,6 +210,9 @@ class PreparseDataBuilder : public ZoneObject,
Handle<PreparseData> Serialize(Isolate* isolate);
ZonePreparseData* Serialize(Zone* zone);
+ void FinalizeChildren(Zone* zone);
+ void AddChild(PreparseDataBuilder* child);
+
void SaveDataForScope(Scope* scope);
void SaveDataForVariable(Variable* var);
void SaveDataForInnerScopes(Scope* scope);
@@ -201,7 +222,10 @@ class PreparseDataBuilder : public ZoneObject,
PreparseDataBuilder* parent_;
ByteData byte_data_;
- ZoneChunkList<PreparseDataBuilder*> children_;
+ union {
+ ScopedPtrList<PreparseDataBuilder> children_buffer_;
+ Vector<PreparseDataBuilder*> children_;
+ };
DeclarationScope* function_scope_;
int num_inner_functions_;
@@ -211,6 +235,10 @@ class PreparseDataBuilder : public ZoneObject,
bool bailed_out_ : 1;
bool has_data_ : 1;
+#ifdef DEBUG
+ bool finalized_children_ = false;
+#endif
+
DISALLOW_COPY_AND_ASSIGN(PreparseDataBuilder);
};
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index ee496aad10..773453fbc7 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -14,15 +14,16 @@
#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
namespace {
-PreParserIdentifier GetSymbolHelper(Scanner* scanner,
- const AstRawString* string,
- AstValueFactory* avf) {
+PreParserIdentifier GetIdentifierHelper(Scanner* scanner,
+ const AstRawString* string,
+ AstValueFactory* avf) {
// These symbols require slightly different treatement:
// - regular keywords (async, await, etc.; treated in 1st switch.)
// - 'contextual' keywords (and may contain escaped; treated in 2nd switch.)
@@ -57,10 +58,10 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner,
} // unnamed namespace
-PreParserIdentifier PreParser::GetSymbol() const {
+PreParserIdentifier PreParser::GetIdentifier() const {
const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
PreParserIdentifier symbol =
- GetSymbolHelper(scanner(), result, ast_value_factory());
+ GetIdentifierHelper(scanner(), result, ast_value_factory());
DCHECK_NOT_NULL(result);
symbol.string_ = result;
return symbol;
@@ -89,6 +90,7 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
int start_position = peek_position();
PreParserScopedStatementList body(pointer_buffer());
ParseStatementList(&body, Token::EOS);
+ CheckConflictingVarDeclarations(scope);
original_scope_ = nullptr;
if (stack_overflow()) return kPreParseStackOverflow;
if (is_strict(language_mode())) {
@@ -173,25 +175,29 @@ PreParser::PreParseResult PreParser::PreParseFunction(
}
bool allow_duplicate_parameters = false;
+ CheckConflictingVarDeclarations(inner_scope);
- if (formals.is_simple) {
- if (is_sloppy(function_scope->language_mode())) {
- function_scope->HoistSloppyBlockFunctions(nullptr);
- }
+ if (!has_error()) {
+ if (formals.is_simple) {
+ if (is_sloppy(function_scope->language_mode())) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
- allow_duplicate_parameters =
- is_sloppy(function_scope->language_mode()) && !IsConciseMethod(kind);
- } else {
- if (is_sloppy(inner_scope->language_mode())) {
- inner_scope->HoistSloppyBlockFunctions(nullptr);
- }
+ allow_duplicate_parameters =
+ is_sloppy(function_scope->language_mode()) && !IsConciseMethod(kind);
+ } else {
+ if (is_sloppy(inner_scope->language_mode())) {
+ inner_scope->HoistSloppyBlockFunctions(nullptr);
+ }
- SetLanguageMode(function_scope, inner_scope->language_mode());
- inner_scope->set_end_position(scanner()->peek_location().end_pos);
- if (inner_scope->FinalizeBlockScope() != nullptr) {
- const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
- function_scope, VariableMode::kLastLexicalVariableMode);
- if (conflict != nullptr) ReportVarRedeclarationIn(conflict, inner_scope);
+ SetLanguageMode(function_scope, inner_scope->language_mode());
+ inner_scope->set_end_position(scanner()->peek_location().end_pos);
+ if (inner_scope->FinalizeBlockScope() != nullptr) {
+ const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
+ function_scope, VariableMode::kLastLexicalVariableMode);
+ if (conflict != nullptr)
+ ReportVarRedeclarationIn(conflict, inner_scope);
+ }
}
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index d403854743..d48ef1bb7f 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -943,7 +943,10 @@ class PreParser : public ParserBase<PreParser> {
runtime_call_stats, logger, script_id,
parsing_module, parsing_on_main_thread),
use_counts_(nullptr),
- preparse_data_builder_(nullptr) {}
+ preparse_data_builder_(nullptr),
+ preparse_data_builder_buffer_() {
+ preparse_data_builder_buffer_.reserve(16);
+ }
static bool IsPreParser() { return true; }
@@ -977,6 +980,10 @@ class PreParser : public ParserBase<PreParser> {
preparse_data_builder_ = preparse_data_builder;
}
+ std::vector<void*>* preparse_data_builder_buffer() {
+ return &preparse_data_builder_buffer_;
+ }
+
private:
friend class i::ExpressionScope<ParserTypes<PreParser>>;
friend class i::VariableDeclarationParsingScope<ParserTypes<PreParser>>;
@@ -1021,6 +1028,8 @@ class PreParser : public ParserBase<PreParser> {
return literal;
}
+ bool HasCheckedSyntax() { return false; }
+
void ParseStatementListAndLogFunction(PreParserFormalParameters* formals);
struct TemplateLiteralState {};
@@ -1040,8 +1049,6 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& expression) {
return expression.IsPropertyWithPrivateFieldKey();
}
- V8_INLINE void CheckConflictingVarDeclarations(Scope* scope) {}
-
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
}
@@ -1082,18 +1089,39 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
- void DeclareVariable(VariableProxy* proxy, VariableKind kind,
- VariableMode mode, InitializationFlag init, Scope* scope,
- bool* was_added, int position) {
- DeclareVariableName(proxy->raw_name(), mode, scope, was_added, kind);
+ Variable* DeclareVariable(const AstRawString* name, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* scope, bool* was_added, int position) {
+ return DeclareVariableName(name, mode, scope, was_added, position, kind);
+ }
+
+ void DeclareAndBindVariable(const VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* scope, bool* was_added, int position) {
+ DeclareVariableName(proxy->raw_name(), mode, scope, was_added, position,
+ kind);
+ // Don't bother actually binding the proxy.
}
- void DeclareVariableName(const AstRawString* name, VariableMode mode,
- Scope* scope, bool* was_added,
- VariableKind kind = NORMAL_VARIABLE) {
- if (scope->DeclareVariableName(name, mode, was_added, kind) == nullptr) {
+ Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
+ Scope* scope, bool* was_added,
+ int position = kNoSourcePosition,
+ VariableKind kind = NORMAL_VARIABLE) {
+ Variable* var = scope->DeclareVariableName(name, mode, was_added, kind);
+ if (var == nullptr) {
ReportUnidentifiableError();
+ if (!IsLexicalVariableMode(mode)) scope = scope->GetDeclarationScope();
+ var = scope->LookupLocal(name);
+ } else if (var->scope() != scope) {
+ DCHECK_NE(kNoSourcePosition, position);
+ DCHECK_EQ(VariableMode::kVar, mode);
+ Declaration* nested_declaration =
+ factory()->ast_node_factory()->NewNestedVariableDeclaration(scope,
+ position);
+ nested_declaration->set_var(var);
+ var->scope()->declarations()->Add(nested_declaration);
}
+ return var;
}
V8_INLINE PreParserBlock RewriteCatchPattern(CatchInfo* catch_info) {
@@ -1158,23 +1186,21 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
- V8_INLINE PreParserStatement
- DeclareFunction(const PreParserIdentifier& variable_name,
- const PreParserExpression& function, VariableMode mode,
- int beg_pos, int end_pos, bool is_sloppy_block_function,
- ZonePtrList<const AstRawString>* names) {
+ V8_INLINE PreParserStatement DeclareFunction(
+ const PreParserIdentifier& variable_name,
+ const PreParserExpression& function, VariableMode mode, VariableKind kind,
+ int beg_pos, int end_pos, ZonePtrList<const AstRawString>* names) {
DCHECK_NULL(names);
- if (variable_name.string_ != nullptr) {
- bool was_added;
- if (is_strict(language_mode())) {
- DeclareVariableName(variable_name.string_, mode, scope(), &was_added);
- } else {
- scope()->DeclareVariableName(variable_name.string_, mode, &was_added);
- }
- if (is_sloppy_block_function) {
- GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name.string_,
- scope());
- }
+ bool was_added;
+ Variable* var = DeclareVariableName(variable_name.string_, mode, scope(),
+ &was_added, beg_pos, kind);
+ if (kind == SLOPPY_BLOCK_FUNCTION_VARIABLE) {
+ Token::Value init =
+ loop_nesting_depth() > 0 ? Token::ASSIGN : Token::INIT;
+ SloppyBlockFunctionStatement* statement =
+ factory()->ast_node_factory()->NewSloppyBlockFunctionStatement(
+ end_pos, var, init);
+ GetDeclarationScope()->DeclareSloppyBlockFunction(statement);
}
return Statement::Default();
}
@@ -1185,17 +1211,15 @@ class PreParser : public ParserBase<PreParser> {
int class_token_pos, int end_pos) {
// Preparser shouldn't be used in contexts where we need to track the names.
DCHECK_NULL(names);
- if (variable_name.string_ != nullptr) {
- bool was_added;
- DeclareVariableName(variable_name.string_, VariableMode::kLet, scope(),
- &was_added);
- }
+ bool was_added;
+ DeclareVariableName(variable_name.string_, VariableMode::kLet, scope(),
+ &was_added);
return PreParserStatement::Default();
}
V8_INLINE void DeclareClassVariable(const PreParserIdentifier& name,
ClassInfo* class_info,
int class_token_pos) {
- if (name.string_ != nullptr) {
+ if (!IsNull(name)) {
bool was_added;
DeclareVariableName(name.string_, VariableMode::kConst, scope(),
&was_added);
@@ -1217,7 +1241,7 @@ class PreParser : public ParserBase<PreParser> {
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
VariableMode::kConst, scope(), &was_added);
- } else if (is_private && property_name.string_ != nullptr) {
+ } else if (is_private) {
bool was_added;
DeclareVariableName(property_name.string_, VariableMode::kConst, scope(),
&was_added);
@@ -1336,7 +1360,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE static void GetDefaultStrings(
PreParserIdentifier* default_string,
- PreParserIdentifier* star_default_star_string) {}
+ PreParserIdentifier* dot_default_string) {}
// Functions for encapsulating the differences between parsing and preparsing;
// operations interleaved with the recursive descent.
@@ -1497,11 +1521,17 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserIdentifier EmptyIdentifierString() const {
- return PreParserIdentifier::Default();
+ PreParserIdentifier result = PreParserIdentifier::Default();
+ result.string_ = ast_value_factory()->empty_string();
+ return result;
}
// Producing data during the recursive descent.
- PreParserIdentifier GetSymbol() const;
+ PreParserIdentifier GetSymbol() const {
+ return PreParserIdentifier::Default();
+ }
+
+ PreParserIdentifier GetIdentifier() const;
V8_INLINE PreParserIdentifier GetNextSymbol() const {
return PreParserIdentifier::Default();
@@ -1511,10 +1541,8 @@ class PreParser : public ParserBase<PreParser> {
return PreParserIdentifier::Default();
}
- V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
+ V8_INLINE PreParserExpression ThisExpression() {
+ UseThis();
return PreParserExpression::This();
}
@@ -1522,9 +1550,6 @@ class PreParser : public ParserBase<PreParser> {
scope()->NewUnresolved(factory()->ast_node_factory(),
ast_value_factory()->this_function_string(), pos,
NORMAL_VARIABLE);
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
return PreParserExpression::Default();
}
@@ -1535,9 +1560,6 @@ class PreParser : public ParserBase<PreParser> {
scope()->NewUnresolved(factory()->ast_node_factory(),
ast_value_factory()->new_target_string(), pos,
NORMAL_VARIABLE);
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
return PreParserExpression::SuperCallReference();
}
@@ -1558,12 +1580,15 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression ExpressionFromIdentifier(
const PreParserIdentifier& name, int start_position,
InferName infer = InferName::kYes) {
- if (name.string_ != nullptr) {
- expression_scope()->NewVariable(name.string_, start_position);
- }
+ expression_scope()->NewVariable(name.string_, start_position);
return PreParserExpression::FromIdentifier(name);
}
+ V8_INLINE void DeclareIdentifier(const PreParserIdentifier& name,
+ int start_position) {
+ expression_scope()->Declare(name.string_, start_position);
+ }
+
V8_INLINE Variable* DeclareCatchVariableName(
Scope* scope, const PreParserIdentifier& identifier) {
return scope->DeclareCatchVariableName(identifier.string_);
@@ -1640,6 +1665,7 @@ class PreParser : public ParserBase<PreParser> {
PreParserLogger log_;
PreparseDataBuilder* preparse_data_builder_;
+ std::vector<void*> preparse_data_builder_buffer_;
};
PreParserExpression PreParser::SpreadCall(const PreParserExpression& function,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 5ba7b3ba51..5ab1937c3c 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -9,6 +9,7 @@
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 32dcaacbf5..981a93ee0c 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -514,23 +514,38 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
unibrow::Utf8::State state = chunk.start.state;
uint32_t incomplete_char = chunk.start.incomplete_char;
size_t it = current_.pos.bytes - chunk.start.bytes;
- size_t chars = chunk.start.chars;
- while (it < chunk.length && chars < position) {
- unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
- chunk.data[it], &it, &state, &incomplete_char);
- if (t == kUtf8Bom && current_.pos.chars == 0) {
- // BOM detected at beginning of the stream. Don't copy it.
- } else if (t != unibrow::Utf8::kIncomplete) {
+ const uint8_t* cursor = &chunk.data[it];
+ const uint8_t* end = &chunk.data[chunk.length];
+
+ size_t chars = current_.pos.chars;
+
+ if (V8_UNLIKELY(current_.pos.bytes < 3 && chars == 0)) {
+ while (cursor < end) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (t == unibrow::Utf8::kIncomplete) continue;
+ if (t != kUtf8Bom) {
+ chars++;
+ if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
+ }
+ break;
+ }
+ }
+
+ while (cursor < end && chars < position) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (t != unibrow::Utf8::kIncomplete) {
chars++;
if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
}
}
- current_.pos.bytes += it;
+ current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
current_.pos.chars = chars;
current_.pos.incomplete_char = incomplete_char;
current_.pos.state = state;
- current_.chunk_no += (it == chunk.length);
+ current_.chunk_no += (cursor == end);
return current_.pos.chars == position;
}
@@ -544,8 +559,8 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
// The buffer_ is writable, but buffer_*_ members are const. So we get a
// non-const pointer into buffer that points to the same char as buffer_end_.
- uint16_t* cursor = buffer_ + (buffer_end_ - buffer_start_);
- DCHECK_EQ(cursor, buffer_end_);
+ uint16_t* output_cursor = buffer_ + (buffer_end_ - buffer_start_);
+ DCHECK_EQ(output_cursor, buffer_end_);
unibrow::Utf8::State state = current_.pos.state;
uint32_t incomplete_char = current_.pos.incomplete_char;
@@ -556,7 +571,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
if (t != unibrow::Utf8::kBufferEmpty) {
DCHECK_EQ(t, unibrow::Utf8::kBadChar);
- *cursor = static_cast<uc16>(t);
+ *output_cursor = static_cast<uc16>(t);
buffer_end_++;
current_.pos.chars++;
current_.pos.incomplete_char = 0;
@@ -566,30 +581,50 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
size_t it = current_.pos.bytes - chunk.start.bytes;
- while (it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize) {
- unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
- chunk.data[it], &it, &state, &incomplete_char);
- if (V8_LIKELY(t < kUtf8Bom)) {
- *(cursor++) = static_cast<uc16>(t); // The by most frequent case.
+ const uint8_t* cursor = chunk.data + it;
+ const uint8_t* end = chunk.data + chunk.length;
+
+ // Deal with possible BOM.
+ if (V8_UNLIKELY(current_.pos.bytes < 3 && current_.pos.chars == 0)) {
+ while (cursor < end) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (V8_LIKELY(t < kUtf8Bom)) {
+ *(output_cursor++) = static_cast<uc16>(t); // The most frequent case.
+ } else if (t == unibrow::Utf8::kIncomplete) {
+ continue;
+ } else if (t == kUtf8Bom) {
+ // BOM detected at beginning of the stream. Don't copy it.
+ } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *(output_cursor++) = static_cast<uc16>(t);
+ } else {
+ *(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
+ *(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
+ }
+ break;
+ }
+ }
+
+ while (cursor < end && output_cursor + 1 < buffer_start_ + kBufferSize) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (V8_LIKELY(t <= unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ *(output_cursor++) = static_cast<uc16>(t); // The most frequent case.
} else if (t == unibrow::Utf8::kIncomplete) {
continue;
- } else if (t == kUtf8Bom && current_.pos.bytes + it == 3) {
- // BOM detected at beginning of the stream. Don't copy it.
- } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *(cursor++) = static_cast<uc16>(t);
} else {
- *(cursor++) = unibrow::Utf16::LeadSurrogate(t);
- *(cursor++) = unibrow::Utf16::TrailSurrogate(t);
+ *(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
+ *(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
}
}
- current_.pos.bytes = chunk.start.bytes + it;
- current_.pos.chars += (cursor - buffer_end_);
+ current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
+ current_.pos.chars += (output_cursor - buffer_end_);
current_.pos.incomplete_char = incomplete_char;
current_.pos.state = state;
- current_.chunk_no += (it == chunk.length);
+ current_.chunk_no += (cursor == end);
- buffer_end_ = cursor;
+ buffer_end_ = output_cursor;
}
bool Utf8ExternalStreamingStream::FetchChunk() {
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index 1e2cf9e447..86b3f3c606 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -42,6 +42,8 @@ namespace internal {
KEYWORD("finally", Token::FINALLY) \
KEYWORD("for", Token::FOR) \
KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('g') \
+ KEYWORD("get", Token::GET) \
KEYWORD_GROUP('i') \
KEYWORD("if", Token::IF) \
KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
@@ -62,6 +64,7 @@ namespace internal {
KEYWORD_GROUP('r') \
KEYWORD("return", Token::RETURN) \
KEYWORD_GROUP('s') \
+ KEYWORD("set", Token::SET) \
KEYWORD("static", Token::STATIC) \
KEYWORD("super", Token::SUPER) \
KEYWORD("switch", Token::SWITCH) \
@@ -188,7 +191,8 @@ enum class ScanFlags : uint8_t {
kCannotBeKeyword = 1 << 1,
kCannotBeKeywordStart = 1 << 2,
kStringTerminator = 1 << 3,
- kNeedsSlowPath = 1 << 4,
+ kIdentifierNeedsSlowPath = 1 << 4,
+ kMultilineCommentCharacterNeedsSlowPath = 1 << 5,
};
constexpr uint8_t GetScanFlags(char c) {
return
@@ -212,7 +216,14 @@ constexpr uint8_t GetScanFlags(char c) {
? static_cast<uint8_t>(ScanFlags::kStringTerminator)
: 0) |
// Escapes are processed on the slow path.
- (c == '\\' ? static_cast<uint8_t>(ScanFlags::kNeedsSlowPath) : 0);
+ (c == '\\' ? static_cast<uint8_t>(ScanFlags::kIdentifierNeedsSlowPath)
+ : 0) |
+ // Newlines and * are interesting characters for multiline comment
+ // scanning.
+ (c == '\n' || c == '\r' || c == '*'
+ ? static_cast<uint8_t>(
+ ScanFlags::kMultilineCommentCharacterNeedsSlowPath)
+ : 0);
}
inline bool TerminatesLiteral(uint8_t scan_flags) {
return (scan_flags & static_cast<uint8_t>(ScanFlags::kTerminatesLiteral));
@@ -220,8 +231,13 @@ inline bool TerminatesLiteral(uint8_t scan_flags) {
inline bool CanBeKeyword(uint8_t scan_flags) {
return !(scan_flags & static_cast<uint8_t>(ScanFlags::kCannotBeKeyword));
}
-inline bool NeedsSlowPath(uint8_t scan_flags) {
- return (scan_flags & static_cast<uint8_t>(ScanFlags::kNeedsSlowPath));
+inline bool IdentifierNeedsSlowPath(uint8_t scan_flags) {
+ return (scan_flags &
+ static_cast<uint8_t>(ScanFlags::kIdentifierNeedsSlowPath));
+}
+inline bool MultilineCommentCharacterNeedsSlowPath(uint8_t scan_flags) {
+ return (scan_flags & static_cast<uint8_t>(
+ ScanFlags::kMultilineCommentCharacterNeedsSlowPath));
}
inline bool MayTerminateString(uint8_t scan_flags) {
return (scan_flags & static_cast<uint8_t>(ScanFlags::kStringTerminator));
@@ -252,9 +268,9 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
STATIC_ASSERT(static_cast<uint8_t>(ScanFlags::kCannotBeKeywordStart) ==
static_cast<uint8_t>(ScanFlags::kCannotBeKeyword) << 1);
scan_flags >>= 1;
- // Make sure the shifting above doesn't set NeedsSlowPath. Otherwise we'll
- // fall into the slow path after scanning the identifier.
- DCHECK(!NeedsSlowPath(scan_flags));
+ // Make sure the shifting above doesn't set IdentifierNeedsSlowPath.
+ // Otherwise we'll fall into the slow path after scanning the identifier.
+ DCHECK(!IdentifierNeedsSlowPath(scan_flags));
AddLiteralChar(static_cast<char>(c0_));
AdvanceUntil([this, &scan_flags](uc32 c0) {
if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
@@ -262,7 +278,8 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
// path.
// TODO(leszeks): This would be most efficient as a goto to the slow
// path, check codegen and maybe use a bool instead.
- scan_flags |= static_cast<uint8_t>(ScanFlags::kNeedsSlowPath);
+ scan_flags |=
+ static_cast<uint8_t>(ScanFlags::kIdentifierNeedsSlowPath);
return true;
}
uint8_t char_flags = character_scan_flags[c0];
@@ -275,7 +292,7 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
}
});
- if (V8_LIKELY(!NeedsSlowPath(scan_flags))) {
+ if (V8_LIKELY(!IdentifierNeedsSlowPath(scan_flags))) {
if (!CanBeKeyword(scan_flags)) return Token::IDENTIFIER;
// Could be a keyword or identifier.
Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 43fc589e88..08e82bea17 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -73,7 +73,9 @@ int Scanner::LiteralBuffer::NewCapacity(int min_capacity) {
void Scanner::LiteralBuffer::ExpandBuffer() {
int min_capacity = Max(kInitialCapacity, backing_store_.length());
Vector<byte> new_store = Vector<byte>::New(NewCapacity(min_capacity));
- MemCopy(new_store.start(), backing_store_.start(), position_);
+ if (position_ > 0) {
+ MemCopy(new_store.start(), backing_store_.start(), position_);
+ }
backing_store_.Dispose();
backing_store_ = new_store;
}
@@ -275,11 +277,10 @@ Token::Value Scanner::SkipSingleLineComment() {
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
- while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
- Advance();
+ if (unibrow::IsLineTerminator(c0_) || c0_ == kEndOfInput) {
+ return Token::WHITESPACE;
}
-
- return Token::WHITESPACE;
+ return SkipSingleLineComment();
}
void Scanner::TryToParseSourceURLComment() {
@@ -337,27 +338,46 @@ void Scanner::TryToParseSourceURLComment() {
Token::Value Scanner::SkipMultiLineComment() {
DCHECK_EQ(c0_, '*');
- Advance();
+ // Until we see the first newline, check for * and newline characters.
+ if (!next().after_line_terminator) {
+ do {
+ AdvanceUntil([](uc32 c0) {
+ if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
+ return unibrow::IsLineTerminator(c0);
+ }
+ uint8_t char_flags = character_scan_flags[c0];
+ return MultilineCommentCharacterNeedsSlowPath(char_flags);
+ });
+
+ while (c0_ == '*') {
+ Advance();
+ if (c0_ == '/') {
+ Advance();
+ return Token::WHITESPACE;
+ }
+ }
+
+ if (unibrow::IsLineTerminator(c0_)) {
+ next().after_line_terminator = true;
+ break;
+ }
+ } while (c0_ != kEndOfInput);
+ }
+
+ // After we've seen newline, simply try to find '*/'.
while (c0_ != kEndOfInput) {
- DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
- if (!HasLineTerminatorBeforeNext() && unibrow::IsLineTerminator(c0_)) {
- // Following ECMA-262, section 7.4, a comment containing
- // a newline will make the comment count as a line-terminator.
- next().after_line_terminator = true;
- }
+ AdvanceUntil([](uc32 c0) { return c0 == '*'; });
- while (V8_UNLIKELY(c0_ == '*')) {
+ while (c0_ == '*') {
Advance();
if (c0_ == '/') {
Advance();
return Token::WHITESPACE;
}
}
- Advance();
}
- // Unterminated multi-line comment.
return Token::ILLEGAL;
}
@@ -434,9 +454,6 @@ bool Scanner::ScanEscape() {
}
switch (c) {
- case '\'': // fall through
- case '"' : // fall through
- case '\\': break;
case 'b' : c = '\b'; break;
case 'f' : c = '\f'; break;
case 'n' : c = '\n'; break;
@@ -499,48 +516,42 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
Token::Value Scanner::ScanString() {
uc32 quote = c0_;
- Advance(); // consume quote
next().literal_chars.Start();
while (true) {
- if (V8_UNLIKELY(c0_ == kEndOfInput)) return Token::ILLEGAL;
- if ((V8_UNLIKELY(static_cast<uint32_t>(c0_) >= kMaxAscii) &&
- !unibrow::IsStringLiteralLineTerminator(c0_)) ||
- !MayTerminateString(character_scan_flags[c0_])) {
- AddLiteralChar(c0_);
- AdvanceUntil([this](uc32 c0) {
- if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
- if (V8_UNLIKELY(unibrow::IsStringLiteralLineTerminator(c0))) {
- return true;
- }
- AddLiteralChar(c0);
- return false;
+ AdvanceUntil([this](uc32 c0) {
+ if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
+ if (V8_UNLIKELY(unibrow::IsStringLiteralLineTerminator(c0))) {
+ return true;
}
- uint8_t char_flags = character_scan_flags[c0];
- if (MayTerminateString(char_flags)) return true;
AddLiteralChar(c0);
return false;
- });
- }
- if (c0_ == quote) {
- Advance();
- return Token::STRING;
- }
- if (c0_ == '\\') {
+ }
+ uint8_t char_flags = character_scan_flags[c0];
+ if (MayTerminateString(char_flags)) return true;
+ AddLiteralChar(c0);
+ return false;
+ });
+
+ while (c0_ == '\\') {
Advance();
// TODO(verwaest): Check whether we can remove the additional check.
if (V8_UNLIKELY(c0_ == kEndOfInput || !ScanEscape<false>())) {
return Token::ILLEGAL;
}
- continue;
}
+
+ if (c0_ == quote) {
+ Advance();
+ return Token::STRING;
+ }
+
if (V8_UNLIKELY(c0_ == kEndOfInput ||
unibrow::IsStringLiteralLineTerminator(c0_))) {
return Token::ILLEGAL;
}
- DCHECK_NE(quote, c0_);
- DCHECK((c0_ == '\'' || c0_ == '"'));
- AddLiteralCharAdvance();
+
+ AddLiteralChar(c0_);
}
}
@@ -842,15 +853,15 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
// an octal number.
- if (c0_ == 'x' || c0_ == 'X') {
+ if (AsciiAlphaToLower(c0_) == 'x') {
AddLiteralCharAdvance();
kind = HEX;
if (!ScanHexDigits()) return Token::ILLEGAL;
- } else if (c0_ == 'o' || c0_ == 'O') {
+ } else if (AsciiAlphaToLower(c0_) == 'o') {
AddLiteralCharAdvance();
kind = OCTAL;
if (!ScanOctalDigits()) return Token::ILLEGAL;
- } else if (c0_ == 'b' || c0_ == 'B') {
+ } else if (AsciiAlphaToLower(c0_) == 'b') {
AddLiteralCharAdvance();
kind = BINARY;
if (!ScanBinaryDigits()) return Token::ILLEGAL;
@@ -872,14 +883,12 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
// Parse decimal digits and allow trailing fractional part.
- if (kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO) {
+ if (IsDecimalNumberKind(kind)) {
// This is an optimization for parsing Decimal numbers as Smi's.
if (at_start) {
uint64_t value = 0;
// scan subsequent decimal digits
- if (!ScanDecimalAsSmi(&value)) {
- return Token::ILLEGAL;
- }
+ if (!ScanDecimalAsSmi(&value)) return Token::ILLEGAL;
if (next().literal_chars.one_byte_literal().length() <= 10 &&
value <= Smi::kMaxValue && c0_ != '.' && !IsIdentifierStart(c0_)) {
@@ -906,8 +915,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
bool is_bigint = false;
- if (c0_ == 'n' && !seen_period &&
- (kind == DECIMAL || kind == HEX || kind == OCTAL || kind == BINARY)) {
+ if (c0_ == 'n' && !seen_period && IsValidBigIntKind(kind)) {
// Check that the literal is within our limits for BigInt length.
// For simplicity, use 4 bits per character to calculate the maximum
// allowed literal length.
@@ -921,12 +929,11 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
is_bigint = true;
Advance();
- } else if (c0_ == 'e' || c0_ == 'E') {
+ } else if (AsciiAlphaToLower(c0_) == 'e') {
// scan exponent, if any
DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
- return Token::ILLEGAL;
+ if (!IsDecimalNumberKind(kind)) return Token::ILLEGAL;
// scan exponent
AddLiteralCharAdvance();
@@ -1005,16 +1012,17 @@ Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(bool escaped,
Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
- /* TODO(adamk): YIELD should be handled specially. */
+ if (IsInRange(token, Token::IDENTIFIER, Token::YIELD)) return token;
+
if (token == Token::FUTURE_STRICT_RESERVED_WORD) {
if (escaped) return Token::ESCAPED_STRICT_RESERVED_WORD;
return token;
}
- if (token == Token::IDENTIFIER) return token;
if (!escaped) return token;
- if (token == Token::LET || token == Token::STATIC) {
+ STATIC_ASSERT(Token::LET + 1 == Token::STATIC);
+ if (IsInRange(token, Token::LET, Token::STATIC)) {
return Token::ESCAPED_STRICT_RESERVED_WORD;
}
return Token::ESCAPED_KEYWORD;
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 383159557b..368e150781 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -316,6 +316,10 @@ class Scanner {
return LiteralContainsEscapes(current());
}
+ bool next_literal_contains_escapes() const {
+ return LiteralContainsEscapes(next());
+ }
+
const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory) const;
const AstRawString* NextSymbol(AstValueFactory* ast_value_factory) const;
@@ -332,8 +336,8 @@ class Scanner {
}
template <size_t N>
- bool NextLiteralEquals(const char (&s)[N]) {
- DCHECK_EQ(Token::STRING, peek());
+ bool NextLiteralExactlyEquals(const char (&s)[N]) {
+ DCHECK(next().CanAccessLiteral());
// The length of the token is used to make sure the literal equals without
// taking escape sequences (e.g., "use \x73trict") or line continuations
// (e.g., "use \(newline) strict") into account.
@@ -345,6 +349,16 @@ class Scanner {
return next.length() == N - 1 && strncmp(s, chars, N - 1) == 0;
}
+ template <size_t N>
+ bool CurrentLiteralEquals(const char (&s)[N]) {
+ DCHECK(current().CanAccessLiteral());
+ if (!is_literal_one_byte()) return false;
+
+ Vector<const uint8_t> current = literal_one_byte_string();
+ const char* chars = reinterpret_cast<const char*>(current.start());
+ return current.length() == N - 1 && strncmp(s, chars, N - 1) == 0;
+ }
+
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
void clear_octal_position() {
@@ -517,9 +531,8 @@ class Scanner {
bool CanAccessLiteral() const {
return token == Token::PRIVATE_NAME || token == Token::ILLEGAL ||
token == Token::UNINITIALIZED || token == Token::REGEXP_LITERAL ||
- token == Token::ESCAPED_KEYWORD ||
IsInRange(token, Token::NUMBER, Token::STRING) ||
- (Token::IsAnyIdentifier(token) && !Token::IsKeyword(token)) ||
+ Token::IsAnyIdentifier(token) || Token::IsKeyword(token) ||
IsInRange(token, Token::TEMPLATE_SPAN, Token::TEMPLATE_TAIL);
}
bool CanAccessRawLiteral() const {
@@ -530,14 +543,22 @@ class Scanner {
};
enum NumberKind {
+ IMPLICIT_OCTAL,
BINARY,
OCTAL,
- IMPLICIT_OCTAL,
HEX,
DECIMAL,
DECIMAL_WITH_LEADING_ZERO
};
+ inline bool IsValidBigIntKind(NumberKind kind) {
+ return IsInRange(kind, BINARY, DECIMAL);
+ }
+
+ inline bool IsDecimalNumberKind(NumberKind kind) {
+ return IsInRange(kind, DECIMAL, DECIMAL_WITH_LEADING_ZERO);
+ }
+
static const int kCharacterLookaheadBufferSize = 1;
static const int kMaxAscii = 127;
diff --git a/deps/v8/src/parsing/token.cc b/deps/v8/src/parsing/token.cc
index ec4b623775..4dbae2d3f9 100644
--- a/deps/v8/src/parsing/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -34,8 +34,7 @@ const int8_t Token::precedence_[2][NUM_TOKENS] = {{TOKEN_LIST(T1, T1)},
#undef T2
#undef T1
-#define KT(a, b, c) \
- IsPropertyNameBits::encode(Token::IsAnyIdentifier(a) || a == ESCAPED_KEYWORD),
+#define KT(a, b, c) IsPropertyNameBits::encode(Token::IsAnyIdentifier(a)),
#define KK(a, b, c) \
IsKeywordBits::encode(true) | IsPropertyNameBits::encode(true),
const uint8_t Token::token_flags[] = {TOKEN_LIST(KT, KK)};
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index c457d39e92..e1de2011ab 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -171,6 +171,8 @@ namespace internal {
/* BEGIN AnyIdentifier */ \
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, nullptr, 0) \
+ K(GET, "get", 0) \
+ K(SET, "set", 0) \
K(ASYNC, "async", 0) \
/* `await` is a reserved word in module code only */ \
K(AWAIT, "await", 0) \
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 5119e06cc8..9bb10f9b67 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -120,18 +120,21 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<Name> key_start_pos = factory->error_start_pos_symbol();
Object::SetProperty(isolate, jserror, key_start_pos,
handle(Smi::FromInt(location.start_pos()), isolate),
- LanguageMode::kSloppy)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Check();
Handle<Name> key_end_pos = factory->error_end_pos_symbol();
Object::SetProperty(isolate, jserror, key_end_pos,
handle(Smi::FromInt(location.end_pos()), isolate),
- LanguageMode::kSloppy)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Check();
Handle<Name> key_script = factory->error_script_symbol();
Object::SetProperty(isolate, jserror, key_script, script,
- LanguageMode::kSloppy)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Check();
isolate->Throw(*error, &location);
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 0701f1b75f..c30047abb3 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -225,10 +225,10 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode abstract_code,
const char* code_name = name;
uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
+
// Code generated by Turbofan will have the safepoint table directly after
// instructions. There is no need to record the safepoint table itself.
- uint32_t code_size = code->is_turbofanned() ? code->safepoint_table_offset()
- : code->InstructionSize();
+ uint32_t code_size = code->ExecutableInstructionSize();
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 99e75c377c..31aaf7ae80 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -230,25 +230,6 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
- mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(host(), this);
- } else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
void Assembler::UntrackBranch() {
@@ -488,7 +469,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(p, 5 * kInstrSize);
+ FlushInstructionCache(p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -503,7 +484,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(p, 2 * kInstrSize);
+ FlushInstructionCache(p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index db84384595..8d8e667738 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -245,7 +245,9 @@ Assembler::Assembler(const AssemblerOptions& options,
relocations_.reserve(128);
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
// Emit constant pool if necessary.
int constant_pool_size = EmitConstantPool();
@@ -256,19 +258,26 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size =
- (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
- desc->constant_pool_size = constant_pool_size;
- desc->origin = this;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - constant_pool_size;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
-
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 46c810334f..0773484c79 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -55,6 +55,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -187,10 +189,19 @@ class Assembler : public AssemblerBase {
virtual ~Assembler() {}
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ void MaybeEmitOutOfLineConstantPool() { EmitConstantPool(); }
// Label operations & relative jumps (PPUM Appendix D)
//
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 4d79fad031..016bc71d26 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -2829,11 +2829,11 @@ class Instruction {
inline int RSValue() const { return Bits(25, 21); }
inline int RTValue() const { return Bits(25, 21); }
inline int RAValue() const { return Bits(20, 16); }
- DECLARE_STATIC_ACCESSOR(RAValue);
+ DECLARE_STATIC_ACCESSOR(RAValue)
inline int RBValue() const { return Bits(15, 11); }
- DECLARE_STATIC_ACCESSOR(RBValue);
+ DECLARE_STATIC_ACCESSOR(RBValue)
inline int RCValue() const { return Bits(10, 6); }
- DECLARE_STATIC_ACCESSOR(RCValue);
+ DECLARE_STATIC_ACCESSOR(RCValue)
inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
inline uint32_t OpcodeField() const {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index b5640d75c8..20e395834c 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -99,6 +99,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r4 : function template info
+ // r5 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments (on the stack, not including receiver)
@@ -204,9 +212,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- r4, // kApiFunctionAddress
- r5, // kArgc
+ r4, // kApiFunctionAddress
+ r5, // kArgc
+ r6, // kCallData
+ r3, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 94bb328bc9..e5b2434755 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -16,6 +16,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -144,11 +145,6 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-void MacroAssembler::JumpToJSEntry(Register target) {
- Move(ip, target);
- Jump(ip);
-}
-
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
Label skip;
@@ -916,7 +912,7 @@ void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
lwz(kConstantPoolRegister,
MemOperand(code_target_address,
- Code::kConstantPoolOffset - Code::kHeaderSize));
+ Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
@@ -1556,6 +1552,20 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ Register scratch = r0;
+ if (lower_limit != 0) {
+ mov(scratch, Operand(lower_limit));
+ sub(scratch, value, scratch);
+ cmpli(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ mov(scratch, Operand(higher_limit));
+ cmpl(value, scratch);
+ }
+ ble(on_in_range);
+}
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
@@ -2733,60 +2743,29 @@ void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
}
}
+void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
-void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
- Representation r, Register scratch) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8()) {
- LoadByte(dst, mem, scratch);
- extsb(dst, dst);
- } else if (r.IsUInteger8()) {
- LoadByte(dst, mem, scratch);
- } else if (r.IsInteger16()) {
- LoadHalfWordArith(dst, mem, scratch);
- } else if (r.IsUInteger16()) {
- LoadHalfWord(dst, mem, scratch);
-#if V8_TARGET_ARCH_PPC64
- } else if (r.IsInteger32()) {
- LoadWordArith(dst, mem, scratch);
-#endif
- } else {
- LoadP(dst, mem, scratch);
- }
-}
-
-
-void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
- Representation r, Register scratch) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8() || r.IsUInteger8()) {
- StoreByte(src, mem, scratch);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- StoreHalfWord(src, mem, scratch);
-#if V8_TARGET_ARCH_PPC64
- } else if (r.IsInteger32()) {
- StoreWord(src, mem, scratch);
-#endif
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfdx(dst, MemOperand(base, scratch));
} else {
- if (r.IsHeapObject()) {
- AssertNotSmi(src);
- } else if (r.IsSmi()) {
- AssertSmi(src);
- }
- StoreP(src, mem, scratch);
+ lfd(dst, mem);
}
}
-void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
+void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
mov(scratch, Operand(offset));
- lfdx(dst, MemOperand(base, scratch));
+ lfsx(dst, MemOperand(base, scratch));
} else {
- lfd(dst, mem);
+ lfs(dst, mem);
}
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index a85af61761..03e09066ce 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -153,6 +153,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
+ void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
// load a literal signed int value <value> to GPR <dst>
@@ -654,34 +656,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- void IncrementalMarkingRecordWriteHelper(Register object, Register value,
- Register address);
-
- void JumpToJSEntry(Register target);
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -743,10 +717,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LoadByte(Register dst, const MemOperand& mem, Register scratch);
void StoreByte(Register src, const MemOperand& mem, Register scratch);
- void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
- Register scratch = no_reg);
- void StoreRepresentation(Register src, const MemOperand& mem,
- Representation r, Register scratch = no_reg);
void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -844,6 +814,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -991,17 +966,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/ppc/register-ppc.h b/deps/v8/src/ppc/register-ppc.h
index 11ddb17dc5..df6bcb6491 100644
--- a/deps/v8/src/ppc/register-ppc.h
+++ b/deps/v8/src/ppc/register-ppc.h
@@ -286,8 +286,8 @@ C_REGISTERS(DECLARE_C_REGISTER)
#undef DECLARE_C_REGISTER
// Define {RegisterName} methods for the register types.
-DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
-DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = r3;
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index b46610d592..0a3b9296fd 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -27,7 +27,7 @@ namespace v8 {
namespace internal {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
- Simulator::GlobalMonitor::Get);
+ Simulator::GlobalMonitor::Get)
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -883,7 +883,7 @@ void Simulator::TrashCallerSaveRegisters() {
return WriteEx(addr, value); \
}
-RW_VAR_LIST(GENERATE_RW_FUNC);
+RW_VAR_LIST(GENERATE_RW_FUNC)
#undef GENERATE_RW_FUNC
// Returns the limit of the stack area to enable checking for stack overflows.
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index e0f4eeae2b..02d1b5a350 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -295,7 +295,7 @@ class Simulator : public SimulatorBase {
inline void Write##size(uintptr_t addr, type value); \
inline int32_t WriteEx##size(uintptr_t addr, type value);
- RW_VAR_LIST(GENERATE_RW_FUNC);
+ RW_VAR_LIST(GENERATE_RW_FUNC)
#undef GENERATE_RW_FUNC
void Trace(Instruction* instr);
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index d01060543d..4f22d1b472 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -208,7 +208,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
// while we are capturing stack trace.
heap->CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
- Isolate* isolate = heap->isolate();
+ Isolate* isolate = Isolate::FromHeap(heap);
int length = 0;
JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index c3fba16879..013afed47b 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -8,7 +8,6 @@
#include <utility>
#include "src/base/lazy-instance.h"
-#include "src/base/platform/mutex.h"
#include "src/base/template-utils.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -17,6 +16,7 @@
#include "src/log.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -107,6 +107,10 @@ void ProfilerEventsProcessor::AddSample(TickSample sample) {
void ProfilerEventsProcessor::StopSynchronously() {
if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
+ {
+ base::MutexGuard guard(&running_mutex_);
+ running_cond_.NotifyOne();
+ }
Join();
}
@@ -178,6 +182,7 @@ SamplingEventsProcessor::ProcessOneSample() {
}
void SamplingEventsProcessor::Run() {
+ base::MutexGuard guard(&running_mutex_);
while (!!base::Relaxed_Load(&running_)) {
base::TimeTicks nextSampleTime =
base::TimeTicks::HighResolutionNow() + period_;
@@ -205,7 +210,18 @@ void SamplingEventsProcessor::Run() {
} else // NOLINT
#endif
{
- base::OS::Sleep(nextSampleTime - now);
+ // Allow another thread to interrupt the delay between samples in the
+ // event of profiler shutdown.
+ while (now < nextSampleTime &&
+ running_cond_.WaitFor(&running_mutex_, nextSampleTime - now)) {
+ // If true was returned, we got interrupted before the timeout
+ // elapsed. If this was not due to a change in running state, a
+ // spurious wakeup occurred (thus we should continue to wait).
+ if (!base::Relaxed_Load(&running_)) {
+ break;
+ }
+ now = base::TimeTicks::HighResolutionNow();
+ }
}
}
@@ -286,7 +302,7 @@ class CpuProfilersManager {
base::Mutex mutex_;
};
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager);
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
} // namespace
@@ -367,6 +383,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
processor_->AddCurrentStack();
return;
}
+ isolate_->wasm_engine()->EnableCodeLogging(isolate_);
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
saved_is_logging_ = logger->is_logging();
@@ -380,7 +397,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
processor_.reset(new SamplingEventsProcessor(isolate_, generator_.get(),
sampling_interval_));
- if (!profiler_listener_) {
+ if (profiler_listener_) {
+ profiler_listener_->set_observer(processor_.get());
+ } else {
profiler_listener_.reset(new ProfilerListener(isolate_, processor_.get()));
}
logger->AddCodeEventListener(profiler_listener_.get());
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index ff5975a7a7..aa60c5172f 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -10,6 +10,8 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/isolate.h"
#include "src/libsampler/sampler.h"
@@ -163,6 +165,8 @@ class ProfilerEventsProcessor : public base::Thread, public CodeEventObserver {
ProfileGenerator* generator_;
base::Atomic32 running_;
+ base::ConditionVariable running_cond_;
+ base::Mutex running_mutex_;
LockedQueue<CodeEventsContainer> events_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
std::atomic<unsigned> last_code_event_id_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 495baf9b34..8018280ff1 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -40,38 +40,6 @@ void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
}));
}
-
-void HeapProfiler::DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
- DCHECK_NE(class_id, v8::HeapProfiler::kPersistentHandleNoClassId);
- if (wrapper_callbacks_.size() <= class_id) {
- wrapper_callbacks_.insert(wrapper_callbacks_.end(),
- class_id - wrapper_callbacks_.size() + 1,
- nullptr);
- }
- wrapper_callbacks_[class_id] = callback;
-}
-
-v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
- uint16_t class_id, Handle<Object> wrapper) {
- if (wrapper_callbacks_.size() <= class_id) return nullptr;
- return wrapper_callbacks_[class_id](class_id, Utils::ToLocal(wrapper));
-}
-
-void HeapProfiler::SetGetRetainerInfosCallback(
- v8::HeapProfiler::GetRetainerInfosCallback callback) {
- get_retainer_infos_callback_ = callback;
-}
-
-v8::HeapProfiler::RetainerInfos HeapProfiler::GetRetainerInfos(
- Isolate* isolate) {
- v8::HeapProfiler::RetainerInfos infos;
- if (get_retainer_infos_callback_ != nullptr)
- infos =
- get_retainer_infos_callback_(reinterpret_cast<v8::Isolate*>(isolate));
- return infos;
-}
-
void HeapProfiler::AddBuildEmbedderGraphCallback(
v8::HeapProfiler::BuildEmbedderGraphCallback callback, void* data) {
build_embedder_graph_callbacks_.push_back({callback, data});
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index efeb8f769b..b946f62758 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -61,16 +61,6 @@ class HeapProfiler : public HeapObjectAllocationTracker {
void UpdateObjectSizeEvent(Address addr, int size) override;
- void DefineWrapperClass(
- uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
-
- v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
- Handle<Object> wrapper);
-
- void SetGetRetainerInfosCallback(
- v8::HeapProfiler::GetRetainerInfosCallback callback);
- v8::HeapProfiler::RetainerInfos GetRetainerInfos(Isolate* isolate);
-
void AddBuildEmbedderGraphCallback(
v8::HeapProfiler::BuildEmbedderGraphCallback callback, void* data);
void RemoveBuildEmbedderGraphCallback(
@@ -100,13 +90,10 @@ class HeapProfiler : public HeapObjectAllocationTracker {
std::unique_ptr<HeapObjectsMap> ids_;
std::vector<std::unique_ptr<HeapSnapshot>> snapshots_;
std::unique_ptr<StringsStorage> names_;
- std::vector<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
- v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_ =
- nullptr;
std::vector<std::pair<v8::HeapProfiler::BuildEmbedderGraphCallback, void*>>
build_embedder_graph_callbacks_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 17daea1964..4d6aa029f7 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -14,6 +14,7 @@
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
#include "src/objects/feedback-cell-inl.h"
@@ -31,7 +32,8 @@
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/prototype.h"
-#include "src/transitions.h"
+#include "src/transitions-inl.h"
+#include "src/vector.h"
#include "src/visitors.h"
namespace v8 {
@@ -181,10 +183,10 @@ const char* HeapEntry::TypeAsString() {
HeapSnapshot::HeapSnapshot(HeapProfiler* profiler) : profiler_(profiler) {
// It is very important to keep objects that form a heap snapshot
// as small as possible. Check assumptions about data structure sizes.
- STATIC_ASSERT((kTaggedSize == 4 && sizeof(HeapGraphEdge) == 12) ||
- (kTaggedSize == 8 && sizeof(HeapGraphEdge) == 24));
- STATIC_ASSERT((kTaggedSize == 4 && sizeof(HeapEntry) == 28) ||
- (kTaggedSize == 8 && sizeof(HeapEntry) == 40));
+ STATIC_ASSERT((kSystemPointerSize == 4 && sizeof(HeapGraphEdge) == 12) ||
+ (kSystemPointerSize == 8 && sizeof(HeapGraphEdge) == 24));
+ STATIC_ASSERT((kSystemPointerSize == 4 && sizeof(HeapEntry) == 28) ||
+ (kSystemPointerSize == 8 && sizeof(HeapEntry) == 40));
memset(&gc_subroot_entries_, 0, sizeof(gc_subroot_entries_));
}
@@ -489,20 +491,6 @@ void HeapObjectsMap::RemoveDeadEntries() {
entries_map_.occupancy());
}
-
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
- SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
- const char* label = info->GetLabel();
- id ^= StringHasher::HashSequentialString(label,
- static_cast<int>(strlen(label)),
- heap_->HashSeed());
- intptr_t element_count = info->GetElementCount();
- if (element_count != -1) {
- id ^= ComputeUnseededHash(static_cast<uint32_t>(element_count));
- }
- return id << 1;
-}
-
V8HeapExplorer::V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
@@ -802,8 +790,9 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
- PrototypeIterator iter(heap_->isolate(), js_obj);
- ReadOnlyRoots roots(heap_);
+ Isolate* isolate = Isolate::FromHeap(heap_);
+ PrototypeIterator iter(isolate, js_obj);
+ ReadOnlyRoots roots(isolate);
SetPropertyReference(entry, roots.proto_string(), iter.GetCurrent());
if (obj->IsJSBoundFunction()) {
JSBoundFunction js_fun = JSBoundFunction::cast(obj);
@@ -824,7 +813,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
JSFunction js_fun = JSFunction::cast(js_obj);
if (js_fun->has_prototype_slot()) {
Object proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole(heap_->isolate())) {
+ if (!proto_or_map->IsTheHole(isolate)) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(entry, roots.prototype_string(), proto_or_map,
nullptr,
@@ -1708,7 +1697,7 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
const char* V8HeapExplorer::GetStrongGcSubrootName(Object object) {
if (strong_gc_subroot_names_.empty()) {
- Isolate* isolate = heap_->isolate();
+ Isolate* isolate = Isolate::FromHeap(heap_);
for (RootIndex root_index = RootIndex::kFirstStrongOrReadOnlyRoot;
root_index <= RootIndex::kLastStrongOrReadOnlyRoot; ++root_index) {
const char* name = RootsTable::name(root_index);
@@ -1753,7 +1742,7 @@ class GlobalObjectsEnumerator : public RootVisitor {
// Modifies heap. Must not be run during heap traversal.
void V8HeapExplorer::TagGlobalObjects() {
- Isolate* isolate = heap_->isolate();
+ Isolate* isolate = Isolate::FromHeap(heap_);
HandleScope scope(isolate);
GlobalObjectsEnumerator enumerator;
isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -1825,57 +1814,6 @@ class EmbedderGraphImpl : public EmbedderGraph {
std::vector<Edge> edges_;
};
-class GlobalHandlesExtractor : public PersistentHandleVisitor {
- public:
- explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
- : explorer_(explorer) {}
- ~GlobalHandlesExtractor() override = default;
- void VisitPersistentHandle(Persistent<Value>* value,
- uint16_t class_id) override {
- Handle<Object> object = Utils::OpenPersistent(value);
- explorer_->VisitSubtreeWrapper(object, class_id);
- }
-
- private:
- NativeObjectsExplorer* explorer_;
-};
-
-
-class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
- public:
- BasicHeapEntriesAllocator(
- HeapSnapshot* snapshot,
- HeapEntry::Type entries_type)
- : snapshot_(snapshot),
- names_(snapshot_->profiler()->names()),
- heap_object_map_(snapshot_->profiler()->heap_object_map()),
- entries_type_(entries_type) {
- }
- HeapEntry* AllocateEntry(HeapThing ptr) override;
- private:
- HeapSnapshot* snapshot_;
- StringsStorage* names_;
- HeapObjectsMap* heap_object_map_;
- HeapEntry::Type entries_type_;
-};
-
-
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
- v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
- intptr_t elements = info->GetElementCount();
- intptr_t size = info->GetSizeInBytes();
- const char* name = elements != -1
- ? names_->GetFormatted("%s / %" V8PRIdPTR " entries",
- info->GetLabel(), elements)
- : names_->GetCopy(info->GetLabel());
- return snapshot_->AddEntry(
- entries_type_,
- name,
- heap_object_map_->GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0,
- 0);
-}
-
class EmbedderGraphEntriesAllocator : public HeapEntriesAllocator {
public:
explicit EmbedderGraphEntriesAllocator(HeapSnapshot* snapshot)
@@ -1927,125 +1865,15 @@ HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(HeapThing ptr) {
static_cast<int>(size), 0);
}
-class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
- public:
- explicit NativeGroupRetainedObjectInfo(const char* label)
- : disposed_(false),
- hash_(reinterpret_cast<intptr_t>(label)),
- label_(label) {}
-
- ~NativeGroupRetainedObjectInfo() override = default;
- void Dispose() override {
- CHECK(!disposed_);
- disposed_ = true;
- delete this;
- }
- bool IsEquivalent(RetainedObjectInfo* other) override {
- return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
- }
- intptr_t GetHash() override { return hash_; }
- const char* GetLabel() override { return label_; }
-
- private:
- bool disposed_;
- intptr_t hash_;
- const char* label_;
-};
-
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
+ : isolate_(
+ Isolate::FromHeap(snapshot->profiler()->heap_object_map()->heap())),
snapshot_(snapshot),
names_(snapshot_->profiler()->names()),
- embedder_queried_(false),
- native_groups_(0, SeededStringHasher(isolate_->heap()->HashSeed())),
- synthetic_entries_allocator_(
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic)),
- native_entries_allocator_(
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative)),
embedder_graph_entries_allocator_(
new EmbedderGraphEntriesAllocator(snapshot)) {}
-NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (auto map_entry : objects_by_info_) {
- v8::RetainedObjectInfo* info = map_entry.first;
- info->Dispose();
- std::vector<HeapObject>* objects = map_entry.second;
- delete objects;
- }
- for (auto map_entry : native_groups_) {
- NativeGroupRetainedObjectInfo* info = map_entry.second;
- info->Dispose();
- }
-}
-
-
-int NativeObjectsExplorer::EstimateObjectsCount() {
- FillRetainedObjects();
- return static_cast<int>(objects_by_info_.size());
-}
-
-
-void NativeObjectsExplorer::FillRetainedObjects() {
- if (embedder_queried_) return;
- v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
- v8::HeapProfiler::RetainerInfos infos =
- snapshot_->profiler()->GetRetainerInfos(isolate_);
- for (auto& pair : infos.groups) {
- std::vector<HeapObject>* info = GetVectorMaybeDisposeInfo(pair.first);
- for (auto& persistent : pair.second) {
- if (persistent->IsEmpty()) continue;
-
- Handle<Object> object = v8::Utils::OpenHandle(
- *persistent->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
- DCHECK(!object.is_null());
- HeapObject heap_object = HeapObject::cast(*object);
- info->push_back(heap_object);
- in_groups_.insert(heap_object);
- }
- }
-
- // Record objects that are not in ObjectGroups, but have class ID.
- GlobalHandlesExtractor extractor(this);
- isolate_->global_handles()->IterateAllRootsWithClassIds(&extractor);
-
- edges_ = std::move(infos.edges);
- embedder_queried_ = true;
-}
-
-void NativeObjectsExplorer::FillEdges() {
- v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
- // Fill in actual edges found.
- for (auto& pair : edges_) {
- if (pair.first->IsEmpty() || pair.second->IsEmpty()) continue;
-
- Handle<Object> parent_object = v8::Utils::OpenHandle(
- *pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
- HeapObject parent = HeapObject::cast(*parent_object);
- HeapEntry* parent_entry = generator_->FindOrAddEntry(
- reinterpret_cast<void*>(parent.ptr()), native_entries_allocator_.get());
- DCHECK_NOT_NULL(parent_entry);
- Handle<Object> child_object = v8::Utils::OpenHandle(
- *pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
- HeapObject child = HeapObject::cast(*child_object);
- HeapEntry* child_entry = generator_->FindOrAddEntry(
- reinterpret_cast<void*>(child.ptr()), native_entries_allocator_.get());
- parent_entry->SetNamedReference(HeapGraphEdge::kInternal, "native",
- child_entry);
- }
- edges_.clear();
-}
-
-std::vector<HeapObject>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
- v8::RetainedObjectInfo* info) {
- if (objects_by_info_.count(info)) {
- info->Dispose();
- } else {
- objects_by_info_[info] = new std::vector<HeapObject>();
- }
- return objects_by_info_[info];
-}
-
HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
EmbedderGraphImpl::Node* node) {
EmbedderGraphImpl::Node* wrapper = node->WrapperNode();
@@ -2104,81 +1932,11 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
from->SetNamedReference(HeapGraphEdge::kInternal, edge.name, to);
}
}
- } else {
- FillRetainedObjects();
- FillEdges();
- if (EstimateObjectsCount() > 0) {
- for (auto map_entry : objects_by_info_) {
- v8::RetainedObjectInfo* info = map_entry.first;
- SetNativeRootReference(info);
- std::vector<HeapObject>* objects = map_entry.second;
- for (HeapObject object : *objects) {
- SetWrapperNativeReferences(object, info);
- }
- }
- SetRootNativeRootsReference();
- }
}
generator_ = nullptr;
return true;
}
-NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
- const char* label) {
- const char* label_copy = names_->GetCopy(label);
- if (!native_groups_.count(label_copy)) {
- native_groups_[label_copy] = new NativeGroupRetainedObjectInfo(label);
- }
- return native_groups_[label_copy];
-}
-
-void NativeObjectsExplorer::SetNativeRootReference(
- v8::RetainedObjectInfo* info) {
- HeapEntry* child_entry =
- generator_->FindOrAddEntry(info, native_entries_allocator_.get());
- DCHECK_NOT_NULL(child_entry);
- NativeGroupRetainedObjectInfo* group_info =
- FindOrAddGroupInfo(info->GetGroupLabel());
- HeapEntry* group_entry = generator_->FindOrAddEntry(
- group_info, synthetic_entries_allocator_.get());
- group_entry->SetNamedAutoIndexReference(HeapGraphEdge::kInternal, nullptr,
- child_entry, names_);
-}
-
-void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry =
- generator_->FindEntry(reinterpret_cast<void*>(wrapper.ptr()));
- DCHECK_NOT_NULL(wrapper_entry);
- HeapEntry* info_entry =
- generator_->FindOrAddEntry(info, native_entries_allocator_.get());
- DCHECK_NOT_NULL(info_entry);
- wrapper_entry->SetNamedReference(HeapGraphEdge::kInternal, "native",
- info_entry);
- info_entry->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- wrapper_entry);
-}
-
-void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (auto map_entry : native_groups_) {
- NativeGroupRetainedObjectInfo* group_info = map_entry.second;
- HeapEntry* group_entry =
- generator_->FindOrAddEntry(group_info, native_entries_allocator_.get());
- DCHECK_NOT_NULL(group_entry);
- snapshot_->root()->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- group_entry);
- }
-}
-
-void NativeObjectsExplorer::VisitSubtreeWrapper(Handle<Object> p,
- uint16_t class_id) {
- if (in_groups_.count(*p)) return;
- v8::RetainedObjectInfo* info =
- isolate_->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == nullptr) return;
- GetVectorMaybeDisposeInfo(info)->push_back(HeapObject::cast(*p));
-}
-
HeapSnapshotGenerator::HeapSnapshotGenerator(
HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -2218,7 +1976,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
- NullContextScope null_context_scope(heap_->isolate());
+ NullContextScope null_context_scope(Isolate::FromHeap(heap_));
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
@@ -2268,8 +2026,7 @@ void HeapSnapshotGenerator::InitProgressCounter() {
// Only the forced ProgressReport() at the end of GenerateSnapshot()
// should signal that the work is finished because signalling finished twice
// breaks the DevTools frontend.
- progress_total_ = v8_heap_explorer_.EstimateObjectsCount() +
- dom_explorer_.EstimateObjectsCount() + 1;
+ progress_total_ = v8_heap_explorer_.EstimateObjectsCount() + 1;
progress_counter_ = 0;
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 14cce75f90..cb1d4368e5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -262,8 +262,6 @@ class HeapObjectsMap {
int64_t* timestamp_us);
const std::vector<TimeInterval>& samples() const { return time_intervals_; }
- SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
-
static const int kObjectIdStep = 2;
static const SnapshotObjectId kInternalRootObjectId;
static const SnapshotObjectId kGcRootsObjectId;
@@ -444,63 +442,22 @@ class V8HeapExplorer : public HeapEntriesAllocator {
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
};
-
-class NativeGroupRetainedObjectInfo;
-
-
// An implementation of retained native objects extractor.
class NativeObjectsExplorer {
public:
NativeObjectsExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
- virtual ~NativeObjectsExplorer();
- int EstimateObjectsCount();
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
private:
- void FillRetainedObjects();
- void FillEdges();
- std::vector<HeapObject>* GetVectorMaybeDisposeInfo(
- v8::RetainedObjectInfo* info);
- void SetNativeRootReference(v8::RetainedObjectInfo* info);
- void SetRootNativeRootsReference();
- void SetWrapperNativeReferences(HeapObject wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Handle<Object> p, uint16_t class_id);
-
- struct RetainedInfoHasher {
- std::size_t operator()(v8::RetainedObjectInfo* info) const {
- return ComputeUnseededHash(static_cast<uint32_t>(info->GetHash()));
- }
- };
- struct RetainedInfoEquals {
- bool operator()(v8::RetainedObjectInfo* info1,
- v8::RetainedObjectInfo* info2) const {
- return info1 == info2 || info1->IsEquivalent(info2);
- }
- };
-
- NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
-
HeapEntry* EntryForEmbedderGraphNode(EmbedderGraph::Node* node);
Isolate* isolate_;
HeapSnapshot* snapshot_;
StringsStorage* names_;
- bool embedder_queried_;
- std::unordered_set<Object, Object::Hasher> in_groups_;
- std::unordered_map<v8::RetainedObjectInfo*, std::vector<HeapObject>*,
- RetainedInfoHasher, RetainedInfoEquals>
- objects_by_info_;
- std::unordered_map<const char*, NativeGroupRetainedObjectInfo*,
- SeededStringHasher, StringEquals>
- native_groups_;
- std::unique_ptr<HeapEntriesAllocator> synthetic_entries_allocator_;
- std::unique_ptr<HeapEntriesAllocator> native_entries_allocator_;
std::unique_ptr<HeapEntriesAllocator> embedder_graph_entries_allocator_;
// Used during references extraction.
HeapSnapshotGenerator* generator_ = nullptr;
- v8::HeapProfiler::RetainerEdges edges_;
static HeapThing const kNativesRootObject;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 8ce9fb392e..9c09679f22 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -493,8 +493,7 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
top_down_.AddPathFromEnd(path, src_line, update_stats, mode_);
if (record_samples_ && !timestamp.IsNull()) {
- timestamps_.push_back(timestamp);
- samples_.push_back(top_frame_node);
+ samples_.push_back({top_frame_node, timestamp, src_line});
}
const int kSamplesFlushCount = 100;
@@ -553,7 +552,7 @@ void CpuProfile::StreamPendingTraceEvents() {
if (streaming_next_sample_ != samples_.size()) {
value->BeginArray("samples");
for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
- value->AppendInteger(samples_[i]->id());
+ value->AppendInteger(samples_[i].node->id());
}
value->EndArray();
}
@@ -562,15 +561,24 @@ void CpuProfile::StreamPendingTraceEvents() {
if (streaming_next_sample_ != samples_.size()) {
value->BeginArray("timeDeltas");
base::TimeTicks lastTimestamp =
- streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
+ streaming_next_sample_ ? samples_[streaming_next_sample_ - 1].timestamp
: start_time();
- for (size_t i = streaming_next_sample_; i < timestamps_.size(); ++i) {
- value->AppendInteger(
- static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
- lastTimestamp = timestamps_[i];
+ for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
+ value->AppendInteger(static_cast<int>(
+ (samples_[i].timestamp - lastTimestamp).InMicroseconds()));
+ lastTimestamp = samples_[i].timestamp;
}
value->EndArray();
- DCHECK_EQ(samples_.size(), timestamps_.size());
+ bool has_non_zero_lines =
+ std::any_of(samples_.begin() + streaming_next_sample_, samples_.end(),
+ [](const SampleInfo& sample) { return sample.line != 0; });
+ if (has_non_zero_lines) {
+ value->BeginArray("lines");
+ for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
+ value->AppendInteger(samples_[i].line);
+ }
+ value->EndArray();
+ }
streaming_next_sample_ = samples_.size();
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index ebb4f0ea2c..1a01efb5e9 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -344,6 +344,12 @@ class CpuProfile {
public:
typedef v8::CpuProfilingMode ProfilingMode;
+ struct SampleInfo {
+ ProfileNode* node;
+ base::TimeTicks timestamp;
+ int line;
+ };
+
CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples,
ProfilingMode mode);
@@ -356,10 +362,7 @@ class CpuProfile {
const ProfileTree* top_down() const { return &top_down_; }
int samples_count() const { return static_cast<int>(samples_.size()); }
- ProfileNode* sample(int index) const { return samples_.at(index); }
- base::TimeTicks sample_timestamp(int index) const {
- return timestamps_.at(index);
- }
+ const SampleInfo& sample(int index) const { return samples_[index]; }
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
@@ -377,8 +380,7 @@ class CpuProfile {
ProfilingMode mode_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
- std::vector<ProfileNode*> samples_;
- std::vector<base::TimeTicks> timestamps_;
+ std::deque<SampleInfo> samples_;
ProfileTree top_down_;
CpuProfiler* const profiler_;
size_t streaming_next_sample_;
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index 6bd794df70..e0e4797685 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -75,6 +75,8 @@ class ProfilerListener : public CodeEventListener {
return function_and_resource_names_.GetConsName(prefix, name);
}
+ void set_observer(CodeEventObserver* observer) { observer_ = observer; }
+
private:
void AttachDeoptInlinedFrames(Code code, CodeDeoptEventRecord* rec);
Name InferScriptName(Name name, SharedFunctionInfo info);
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 3e158544fd..659ed25c00 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -53,14 +53,14 @@ v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
SamplingHeapProfiler::SamplingHeapProfiler(
Heap* heap, StringsStorage* names, uint64_t rate, int stack_depth,
v8::HeapProfiler::SamplingFlags flags)
- : isolate_(heap->isolate()),
+ : isolate_(Isolate::FromHeap(heap)),
heap_(heap),
new_space_observer_(new SamplingAllocationObserver(
heap_, static_cast<intptr_t>(rate), rate, this,
- heap->isolate()->random_number_generator())),
+ isolate_->random_number_generator())),
other_spaces_observer_(new SamplingAllocationObserver(
heap_, static_cast<intptr_t>(rate), rate, this,
- heap->isolate()->random_number_generator())),
+ isolate_->random_number_generator())),
names_(names),
profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0,
next_node_id()),
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 501dbd63a8..dca3e2d045 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -5,8 +5,10 @@
#include "src/profiler/tick-sample.h"
#include "include/v8-profiler.h"
+#include "src/asan.h"
#include "src/counters.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
#include "src/msan.h"
#include "src/simulator.h"
#include "src/vm-state-inl.h"
@@ -169,12 +171,17 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
external_callback_entry = info.external_callback_entry;
} else if (frames_count) {
// sp register may point at an arbitrary place in memory, make
- // sure MSAN doesn't complain about it.
+ // sure sanitizers don't complain about it.
+ ASAN_UNPOISON_MEMORY_REGION(regs.sp, sizeof(void*));
MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(void*));
// Sample potential return address value for frameless invocation of
// stubs (we'll figure out later, if this value makes sense).
- tos = reinterpret_cast<void*>(
- i::Memory<i::Address>(reinterpret_cast<i::Address>(regs.sp)));
+
+ // TODO(petermarshall): This read causes guard page violations on Windows.
+ // Either fix this mechanism for frameless stubs or remove it.
+ // tos =
+ // i::ReadUnalignedValue<void*>(reinterpret_cast<i::Address>(regs.sp));
+ tos = nullptr;
} else {
tos = nullptr;
}
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 4947bfb99f..9feab5d4bf 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -6,6 +6,7 @@
#include "src/bootstrapper.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 9fefb45afc..77c3d6ad3a 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -32,7 +32,6 @@ enum PropertyAttributes {
// a non-existent property.
};
-
enum PropertyFilter {
ALL_PROPERTIES = 0,
ONLY_WRITABLE = 1,
@@ -41,6 +40,7 @@ enum PropertyFilter {
SKIP_STRINGS = 8,
SKIP_SYMBOLS = 16,
ONLY_ALL_CAN_READ = 32,
+ PRIVATE_NAMES_ONLY = 64,
ENUMERABLE_STRINGS = ONLY_ENUMERABLE | SKIP_SYMBOLS,
};
// Enable fast comparisons of PropertyAttributes against PropertyFilters.
@@ -87,16 +87,10 @@ class Representation {
public:
enum Kind {
kNone,
- kInteger8,
- kUInteger8,
- kInteger16,
- kUInteger16,
kSmi,
- kInteger32,
kDouble,
kHeapObject,
kTagged,
- kExternal,
kNumRepresentations
};
@@ -104,15 +98,9 @@ class Representation {
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer8() { return Representation(kInteger8); }
- static Representation UInteger8() { return Representation(kUInteger8); }
- static Representation Integer16() { return Representation(kInteger16); }
- static Representation UInteger16() { return Representation(kUInteger16); }
static Representation Smi() { return Representation(kSmi); }
- static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
static Representation HeapObject() { return Representation(kHeapObject); }
- static Representation External() { return Representation(kExternal); }
static Representation FromKind(Kind kind) { return Representation(kind); }
@@ -121,8 +109,7 @@ class Representation {
}
bool IsCompatibleForLoad(const Representation& other) const {
- return (IsDouble() && other.IsDouble()) ||
- (!IsDouble() && !other.IsDouble());
+ return IsDouble() == other.IsDouble();
}
bool IsCompatibleForStore(const Representation& other) const {
@@ -130,15 +117,7 @@ class Representation {
}
bool is_more_general_than(const Representation& other) const {
- if (kind_ == kExternal && other.kind_ == kNone) return true;
- if (kind_ == kExternal && other.kind_ == kExternal) return false;
- if (kind_ == kNone && other.kind_ == kExternal) return false;
-
- DCHECK_NE(kind_, kExternal);
- DCHECK_NE(other.kind_, kExternal);
if (IsHeapObject()) return other.IsNone();
- if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
- if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
return kind_ > other.kind_;
}
@@ -154,35 +133,34 @@ class Representation {
int size() const {
DCHECK(!IsNone());
- if (IsInteger8() || IsUInteger8()) return kUInt8Size;
- if (IsInteger16() || IsUInteger16()) return kUInt16Size;
- if (IsInteger32()) return kInt32Size;
if (IsDouble()) return kDoubleSize;
- if (IsExternal()) return kSystemPointerSize;
DCHECK(IsTagged() || IsSmi() || IsHeapObject());
return kTaggedSize;
}
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
- bool IsInteger8() const { return kind_ == kInteger8; }
- bool IsUInteger8() const { return kind_ == kUInteger8; }
- bool IsInteger16() const { return kind_ == kInteger16; }
- bool IsUInteger16() const { return kind_ == kUInteger16; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsSmi() const { return kind_ == kSmi; }
bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsSmiOrInteger32() const { return IsSmi() || IsInteger32(); }
bool IsDouble() const { return kind_ == kDouble; }
bool IsHeapObject() const { return kind_ == kHeapObject; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return IsInteger8() || IsUInteger8() ||
- IsInteger16() || IsUInteger16() ||
- IsSmi() || IsInteger32() || IsDouble();
+
+ const char* Mnemonic() const {
+ switch (kind_) {
+ case kNone:
+ return "v";
+ case kTagged:
+ return "t";
+ case kSmi:
+ return "s";
+ case kDouble:
+ return "d";
+ case kHeapObject:
+ return "h";
+ }
+ UNREACHABLE();
}
- const char* Mnemonic() const;
private:
explicit Representation(Kind k) : kind_(k) { }
@@ -357,7 +335,7 @@ class PropertyDetails {
// Bit fields for fast objects.
class RepresentationField
- : public BitField<uint32_t, AttributesField::kNext, 4> {};
+ : public BitField<uint32_t, AttributesField::kNext, 3> {};
class DescriptorPointer
: public BitField<uint32_t, RepresentationField::kNext,
kDescriptorIndexBitCount> {}; // NOLINT
diff --git a/deps/v8/src/prototype-inl.h b/deps/v8/src/prototype-inl.h
index 8f5dedda71..4fbb6a8102 100644
--- a/deps/v8/src/prototype-inl.h
+++ b/deps/v8/src/prototype-inl.h
@@ -8,6 +8,7 @@
#include "src/prototype.h"
#include "src/handles-inl.h"
+#include "src/objects/js-proxy.h"
#include "src/objects/map-inl.h"
namespace v8 {
diff --git a/deps/v8/src/ptr-compr-inl.h b/deps/v8/src/ptr-compr-inl.h
index 2acb04fb06..ba2d15dc8a 100644
--- a/deps/v8/src/ptr-compr-inl.h
+++ b/deps/v8/src/ptr-compr-inl.h
@@ -16,10 +16,7 @@ namespace internal {
// Compresses full-pointer representation of a tagged value to on-heap
// representation.
V8_INLINE Tagged_t CompressTagged(Address tagged) {
- // The compression is no-op while we are using checked decompression.
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- // TODO(ishell): implement once kTaggedSize is equal to kInt32Size.
- return tagged;
+ return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
// Calculates isolate root value from any on-heap address.
@@ -30,38 +27,23 @@ V8_INLINE Address GetRootFromOnHeapAddress(Address addr) {
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
-V8_INLINE Address DecompressTaggedPointerImpl(Address on_heap_addr,
- int32_t value) {
+V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
+ Tagged_t raw_value) {
+ static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
+ static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
+ int32_t value = static_cast<int32_t>(raw_value);
Address root = GetRootFromOnHeapAddress(on_heap_addr);
// Current compression scheme requires value to be sign-extended to inptr_t
// before adding the |root|.
return root + static_cast<Address>(static_cast<intptr_t>(value));
}
-// Decompresses weak or strong heap object pointer or forwarding pointer,
-// preserving both weak- and smi- tags and checks that the result of
-// decompression matches full value stored in the field.
-// Checked decompression helps to find misuses of XxxSlots and FullXxxSlots.
-// TODO(ishell): remove in favour of DecompressTaggedPointerImpl() once
-// kTaggedSize is equal to kInt32Size.
-V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
- Tagged_t full_value) {
- // Use only lower 32-bits of the value for decompression.
- int32_t compressed = static_cast<int32_t>(full_value);
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- Address result = DecompressTaggedPointerImpl(on_heap_addr, compressed);
-#ifdef DEBUG
- if (full_value != result) {
- base::OS::DebugBreak();
- result = DecompressTaggedPointerImpl(on_heap_addr, compressed);
- }
-#endif
- DCHECK_EQ(full_value, result);
- return result;
-}
-
// Decompresses any tagged value, preserving both weak- and smi- tags.
-V8_INLINE Address DecompressTaggedAnyImpl(Address on_heap_addr, int32_t value) {
+V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
+ Tagged_t raw_value) {
+ static_assert(kTaggedSize == kSystemPointerSize, "has to be updated");
+ static_assert(!std::is_same<int32_t, Tagged_t>::value, "remove cast below");
+ int32_t value = static_cast<int32_t>(raw_value);
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = -static_cast<Address>(value & kSmiTagMask);
Address root_or_zero = root_mask & GetRootFromOnHeapAddress(on_heap_addr);
@@ -70,26 +52,11 @@ V8_INLINE Address DecompressTaggedAnyImpl(Address on_heap_addr, int32_t value) {
return root_or_zero + static_cast<Address>(static_cast<intptr_t>(value));
}
-// Decompresses any tagged value, preserving both weak- and smi- tags and checks
-// that the result of decompression matches full value stored in the field.
-// Checked decompression helps to find misuses of XxxSlots and FullXxxSlots.
-// TODO(ishell): remove in favour of DecompressTaggedAnyImpl() once
-// kTaggedSize is equal to kInt32Size.
-V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
- Tagged_t full_value) {
- // Use only lower 32-bits of the value for decompression.
- int32_t compressed = static_cast<int32_t>(full_value);
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- Address result = DecompressTaggedAnyImpl(on_heap_addr, compressed);
-#ifdef DEBUG
- if (full_value != result) {
- base::OS::DebugBreak();
- result = DecompressTaggedAnyImpl(on_heap_addr, compressed);
- }
-#endif
- DCHECK_EQ(full_value, result);
- return result;
-}
+STATIC_ASSERT(kPtrComprHeapReservationSize ==
+ Internals::kPtrComprHeapReservationSize);
+STATIC_ASSERT(kPtrComprIsolateRootBias == Internals::kPtrComprIsolateRootBias);
+STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
+ Internals::kPtrComprIsolateRootAlignment);
//
// CompressedObjectSlot implementation.
@@ -141,8 +108,9 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
//
bool CompressedMapWordSlot::contains_value(Address raw_value) const {
- Tagged_t value = *location();
- return value == static_cast<Tagged_t>(raw_value);
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return static_cast<uint32_t>(value) ==
+ static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
}
Object CompressedMapWordSlot::operator*() const {
@@ -227,12 +195,13 @@ void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
}
HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
- DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
- return HeapObject::cast(Object(*location()));
+ Tagged_t value = *location();
+ DCHECK_EQ(value & kHeapObjectTagMask, kHeapObjectTag);
+ return HeapObject::cast(Object(DecompressTaggedPointer(address(), value)));
}
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = value->ptr();
+ *location() = CompressTagged(value->ptr());
}
} // namespace internal
diff --git a/deps/v8/src/ptr-compr.h b/deps/v8/src/ptr-compr.h
index 930a80ccec..93d7834df3 100644
--- a/deps/v8/src/ptr-compr.h
+++ b/deps/v8/src/ptr-compr.h
@@ -99,6 +99,8 @@ class CompressedMaybeObjectSlot
explicit CompressedMaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit CompressedMaybeObjectSlot(Object* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
+ explicit CompressedMaybeObjectSlot(MaybeObject* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit CompressedMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 634259f8df..a523ccd3d3 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -19,7 +19,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
@@ -350,7 +349,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
Label fallthrough;
- Label success;
// Find length of back-referenced capture.
__ ldr(r0, register_location(start_reg));
@@ -856,7 +854,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
@@ -1047,7 +1044,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
__ PrepareCallCFunction(3);
@@ -1252,8 +1249,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 758fe88d6b..570b170dcd 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -12,8 +12,6 @@
namespace v8 {
namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone, Mode mode,
@@ -211,9 +209,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
-#endif // V8_INTERPRETED_REGEXP
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 54ad44d68a..70521f2603 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention:
* - w19 : Used to temporarely store a value before a call to C code.
@@ -723,7 +722,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ PushCPURegList(argument_registers);
// Set frame pointer in place.
- __ Add(frame_pointer(), sp, argument_registers.Count() * kPointerSize);
+ __ Add(frame_pointer(), sp, argument_registers.Count() * kSystemPointerSize);
// Initialize callee-saved registers.
__ Mov(start_offset(), w1);
@@ -881,9 +880,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Add(capture_end, input_length, capture_end);
}
// The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Stp(capture_start, capture_end,
+ MemOperand(output_array(), kSystemPointerSize, PostIndex));
}
// Only carry on if there are more than kNumCachedRegisters capture
@@ -902,9 +900,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
STATIC_ASSERT(kNumRegistersToUnroll > 2);
if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
+ __ Ldp(capture_end, capture_start,
+ MemOperand(base, -kSystemPointerSize, PostIndex));
if ((i == 0) && global_with_zero_length_check()) {
// Keep capture start for the zero-length check later.
__ Mov(first_capture_start, capture_start);
@@ -920,26 +917,23 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Add(capture_end, input_length, capture_end);
}
// The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Stp(capture_start, capture_end,
+ MemOperand(output_array(), kSystemPointerSize, PostIndex));
}
} else {
Label loop, start;
__ Mov(x11, num_registers_left_on_stack);
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
+ __ Ldp(capture_end, capture_start,
+ MemOperand(base, -kSystemPointerSize, PostIndex));
if (global_with_zero_length_check()) {
__ Mov(first_capture_start, capture_start);
}
__ B(&start);
__ Bind(&loop);
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
+ __ Ldp(capture_end, capture_start,
+ MemOperand(base, -kSystemPointerSize, PostIndex));
__ Bind(&start);
if (mode_ == UC16) {
__ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
@@ -949,9 +943,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Add(capture_end, input_length, capture_end);
}
// The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Stp(capture_start, capture_end,
+ MemOperand(output_array(), kSystemPointerSize, PostIndex));
__ Sub(x11, x11, 2);
__ Cbnz(x11, &loop);
}
@@ -1289,7 +1282,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
__ Mov(x11, num_registers);
__ Bind(&loop);
__ Str(twice_non_position_value(),
- MemOperand(base, -kPointerSize, PostIndex));
+ MemOperand(base, -kSystemPointerSize, PostIndex));
__ Sub(x11, x11, 2);
__ Cbnz(x11, &loop);
} else {
@@ -1354,7 +1347,7 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
// Private methods:
void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
// Allocate space on the stack to store the return address. The
@@ -1369,10 +1362,10 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Claim(xreg_to_claim);
// CheckStackGuardState needs the end and start addresses of the input string.
- __ Poke(input_end(), 2 * kPointerSize);
- __ Add(x5, sp, 2 * kPointerSize);
- __ Poke(input_start(), kPointerSize);
- __ Add(x4, sp, kPointerSize);
+ __ Poke(input_end(), 2 * kSystemPointerSize);
+ __ Add(x5, sp, 2 * kSystemPointerSize);
+ __ Poke(input_start(), kSystemPointerSize);
+ __ Add(x4, sp, kSystemPointerSize);
__ Mov(w3, start_offset());
// RegExp code frame pointer.
@@ -1407,8 +1400,8 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
}
// The input string may have been moved in memory, we need to reload it.
- __ Peek(input_start(), kPointerSize);
- __ Peek(input_end(), 2 * kPointerSize);
+ __ Peek(input_start(), kSystemPointerSize);
+ __ Peek(input_end(), 2 * kSystemPointerSize);
__ Drop(xreg_to_claim);
@@ -1658,8 +1651,6 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
}
}
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 2ab65a1523..6eba91bd36 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -12,8 +12,6 @@
namespace v8 {
namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
@@ -107,19 +105,20 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
static const int kCalleeSavedRegisters = 0;
// Return address.
// It is placed above the 11 callee-saved registers.
- static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kReturnAddress =
+ kCalleeSavedRegisters + 11 * kSystemPointerSize;
// Stack parameter placed by caller.
- static const int kIsolate = kReturnAddress + kPointerSize;
+ static const int kIsolate = kReturnAddress + kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
- static const int kStackBase = kDirectCall - kPointerSize;
- static const int kOutputSize = kStackBase - kPointerSize;
- static const int kInput = kOutputSize - kPointerSize;
+ static const int kDirectCall = kCalleeSavedRegisters - kSystemPointerSize;
+ static const int kStackBase = kDirectCall - kSystemPointerSize;
+ static const int kOutputSize = kStackBase - kSystemPointerSize;
+ static const int kInput = kOutputSize - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessCounter = kInput - kPointerSize;
+ static const int kSuccessCounter = kInput - kSystemPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
@@ -285,9 +284,6 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
-#endif // V8_INTERPRETED_REGEXP
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/bytecodes-irregexp.h
index 3848f15dc7..a27c9a0a2b 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/bytecodes-irregexp.h
@@ -6,8 +6,6 @@
#ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
#define V8_REGEXP_BYTECODES_IRREGEXP_H_
-#ifdef V8_INTERPRETED_REGEXP
-
namespace v8 {
namespace internal {
@@ -86,6 +84,4 @@ BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETED_REGEXP
-
#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 15b9e23692..af9237a264 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -17,7 +17,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - edx : Current character. Must be loaded using LoadCurrentCharacter
@@ -188,7 +187,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
__ j(not_equal, &fallthrough);
- __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
+ __ add(backtrack_stackpointer(), Immediate(kSystemPointerSize)); // Pop.
BranchOrBacktrack(no_condition, on_equal);
__ bind(&fallthrough);
}
@@ -279,7 +278,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Restore original value before continuing.
__ pop(backtrack_stackpointer());
// Drop original value of character position.
- __ add(esp, Immediate(kPointerSize));
+ __ add(esp, Immediate(kSystemPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
if (read_backward) {
@@ -307,15 +306,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set isolate.
#ifdef V8_INTL_SUPPORT
if (unicode) {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
+ __ mov(Operand(esp, 3 * kSystemPointerSize), Immediate(0));
} else // NOLINT
#endif // V8_INTL_SUPPORT
{
- __ mov(Operand(esp, 3 * kPointerSize),
+ __ mov(Operand(esp, 3 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
}
// Set byte_length.
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
+ __ mov(Operand(esp, 2 * kSystemPointerSize), ebx);
// Set byte_offset2.
// Found by adding negative string-end offset of current position (edi)
// to end of string.
@@ -323,11 +322,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
if (read_backward) {
__ sub(edi, ebx); // Offset by length when matching backwards.
}
- __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ mov(Operand(esp, 1 * kSystemPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
__ add(edx, esi);
- __ mov(Operand(esp, 0 * kPointerSize), edx);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), edx);
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -692,7 +691,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmp(ecx, num_registers_ * kPointerSize);
+ __ cmp(ecx, num_registers_ * kSystemPointerSize);
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -710,7 +709,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kPointerSize));
+ __ sub(esp, Immediate(num_registers_ * kSystemPointerSize));
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
@@ -734,7 +733,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
+ const int kRegistersPerPage = kPageSize / kSystemPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
@@ -765,8 +764,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+ __ sub(ecx, Immediate(kSystemPointerSize));
+ __ cmp(ecx, kRegisterZero - num_saved_registers_ * kSystemPointerSize);
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
@@ -806,7 +805,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}
- __ mov(Operand(ebx, i * kPointerSize), eax);
+ __ mov(Operand(ebx, i * kSystemPointerSize), eax);
}
}
@@ -825,7 +824,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebp, kNumOutputRegisters), ecx);
// Advance the location for output.
__ add(Operand(ebp, kRegisterOutput),
- Immediate(num_saved_registers_ * kPointerSize));
+ Immediate(num_saved_registers_ * kSystemPointerSize));
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kStringStartMinusOne));
@@ -905,7 +904,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
// Save registers before calling C function
__ push(esi);
__ push(edi);
@@ -913,11 +911,11 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kPointerSize),
+ __ mov(Operand(esp, 2 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
__ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kPointerSize), eax);
- __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
+ __ mov(Operand(esp, 1 * kSystemPointerSize), eax);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), backtrack_stackpointer());
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
@@ -1099,12 +1097,12 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
- __ mov(Operand(esp, 2 * kPointerSize), ebp);
+ __ mov(Operand(esp, 2 * kSystemPointerSize), ebp);
// Code of self.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
+ __ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(masm_->CodeObject()));
// Next address on the stack (will be address of return address).
- __ lea(eax, Operand(esp, -kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
+ __ lea(eax, Operand(esp, -kSystemPointerSize));
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state(isolate());
__ CallCFunction(check_stack_guard, num_arguments);
@@ -1145,7 +1143,7 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- return Operand(ebp, kRegisterZero - register_index * kPointerSize);
+ return Operand(ebp, kRegisterZero - register_index * kSystemPointerSize);
}
@@ -1203,14 +1201,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerIA32::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kSystemPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerIA32::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kSystemPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1219,7 +1217,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ add(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ add(backtrack_stackpointer(), Immediate(kSystemPointerSize));
}
@@ -1277,8 +1275,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 7757506b49..0a6eb558ab 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -12,7 +12,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone, Mode mode,
@@ -97,31 +96,32 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
+ static const int kReturn_eip = kFramePointer + kSystemPointerSize;
+ static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
// Parameters.
static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
+ static const int kStartIndex = kInputString + kSystemPointerSize;
+ static const int kInputStart = kStartIndex + kSystemPointerSize;
+ static const int kInputEnd = kInputStart + kSystemPointerSize;
+ static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
+ static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kIsolate = kDirectCall + kSystemPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kPointerSize;
- static const int kBackup_edi = kBackup_esi - kPointerSize;
- static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kBackup_esi = kFramePointer - kSystemPointerSize;
+ static const int kBackup_edi = kBackup_esi - kSystemPointerSize;
+ static const int kBackup_ebx = kBackup_edi - kSystemPointerSize;
+ static const int kSuccessfulCaptures = kBackup_ebx - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -197,7 +197,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
Label check_preempt_label_;
Label stack_overflow_label_;
};
-#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index f98dc062cf..526a3290f7 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -4,8 +4,6 @@
// A simple interpreter for the Irregexp byte code.
-#ifdef V8_INTERPRETED_REGEXP
-
#include "src/regexp/interpreter-irregexp.h"
#include "src/ast/ast.h"
@@ -623,5 +621,3 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
} // namespace internal
} // namespace v8
-
-#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp/interpreter-irregexp.h b/deps/v8/src/regexp/interpreter-irregexp.h
index 887fab6d0e..c51e320a07 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/interpreter-irregexp.h
@@ -7,8 +7,6 @@
#ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
#define V8_REGEXP_INTERPRETER_IRREGEXP_H_
-#ifdef V8_INTERPRETED_REGEXP
-
#include "src/regexp/jsregexp.h"
namespace v8 {
@@ -27,6 +25,4 @@ class IrregexpInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETED_REGEXP
-
#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 117ae6cd44..7d94adfb86 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -13,6 +13,7 @@
#include "src/elements.h"
#include "src/execution.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate-inl.h"
#include "src/message-template.h"
#include "src/ostreams.h"
@@ -28,13 +29,13 @@
#include "src/string-search.h"
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
+#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
#include "unicode/utypes.h"
#endif // V8_INTL_SUPPORT
-#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
@@ -54,8 +55,6 @@
#else
#error Unsupported target architecture.
#endif
-#endif
-
namespace v8 {
namespace internal {
@@ -300,11 +299,11 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
-#ifdef V8_INTERPRETED_REGEXP
- if (compiled_code->IsByteArray()) return true;
-#else // V8_INTERPRETED_REGEXP (RegExp native code)
- if (compiled_code->IsCode()) return true;
-#endif
+ if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
+ DCHECK(FLAG_regexp_interpret_all ? compiled_code->IsByteArray()
+ : compiled_code->IsCode());
+ return true;
+ }
return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
}
@@ -413,18 +412,18 @@ int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
if (!EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte)) return -1;
-#ifdef V8_INTERPRETED_REGEXP
- // Byte-code regexp needs space allocated for all its registers.
- // The result captures are copied to the start of the registers array
- // if the match succeeds. This way those registers are not clobbered
- // when we set the last match info from last successful match.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
- (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
-#else // V8_INTERPRETED_REGEXP
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
-#endif // V8_INTERPRETED_REGEXP
+ if (FLAG_regexp_interpret_all) {
+ // Byte-code regexp needs space allocated for all its registers.
+ // The result captures are copied to the start of the registers array
+ // if the match succeeds. This way those registers are not clobbered
+ // when we set the last match info from last successful match.
+ return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
+ (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
+ } else {
+ // Native regexp only needs room to output captures. Registers are handled
+ // internally.
+ return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
+ }
}
int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -438,75 +437,68 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
-#ifndef V8_INTERPRETED_REGEXP
- DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
- do {
- EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
- // The stack is used to allocate registers for the compiled regexp code.
- // This means that in case of failure, the output registers array is left
- // untouched and contains the capture results from the previous successful
- // match. We can use that to set the last match info lazily.
- NativeRegExpMacroAssembler::Result res =
- NativeRegExpMacroAssembler::Match(code,
- subject,
- output,
- output_size,
- index,
- isolate);
- if (res != NativeRegExpMacroAssembler::RETRY) {
- DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
- isolate->has_pending_exception());
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
- STATIC_ASSERT(
- static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION)
- == RE_EXCEPTION);
- return static_cast<IrregexpResult>(res);
- }
- // If result is RETRY, the string has changed representation, and we
- // must restart from scratch.
- // In this case, it means we must make sure we are prepared to handle
- // the, potentially, different subject (the string can switch between
- // being internal and external, and even between being Latin1 and UC16,
- // but the characters are always the same).
- IrregexpPrepare(isolate, regexp, subject);
- is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- } while (true);
- UNREACHABLE();
-#else // V8_INTERPRETED_REGEXP
-
- DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- int32_t* raw_output = &output[number_of_capture_registers];
- // We do not touch the actual capture result registers until we know there
- // has been a match so that we can use those capture results to set the
- // last match info.
- for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- raw_output[i] = -1;
- }
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
- isolate);
-
- IrregexpResult result = IrregexpInterpreter::Match(isolate,
- byte_codes,
- subject,
- raw_output,
- index);
- if (result == RE_SUCCESS) {
- // Copy capture results to the start of the registers array.
- MemCopy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
- }
- if (result == RE_EXCEPTION) {
- DCHECK(!isolate->has_pending_exception());
- isolate->StackOverflow();
+ if (!FLAG_regexp_interpret_all) {
+ DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ do {
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
+ // The stack is used to allocate registers for the compiled regexp code.
+ // This means that in case of failure, the output registers array is left
+ // untouched and contains the capture results from the previous successful
+ // match. We can use that to set the last match info lazily.
+ int res = NativeRegExpMacroAssembler::Match(code, subject, output,
+ output_size, index, isolate);
+ if (res != NativeRegExpMacroAssembler::RETRY) {
+ DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
+ isolate->has_pending_exception());
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) ==
+ RE_SUCCESS);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::FAILURE) ==
+ RE_FAILURE);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) ==
+ RE_EXCEPTION);
+ return res;
+ }
+ // If result is RETRY, the string has changed representation, and we
+ // must restart from scratch.
+ // In this case, it means we must make sure we are prepared to handle
+ // the, potentially, different subject (the string can switch between
+ // being internal and external, and even between being Latin1 and UC16,
+ // but the characters are always the same).
+ IrregexpPrepare(isolate, regexp, subject);
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ } while (true);
+ UNREACHABLE();
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
+ DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
+ // We must have done EnsureCompiledIrregexp, so we can get the number of
+ // registers.
+ int number_of_capture_registers =
+ (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+ int32_t* raw_output = &output[number_of_capture_registers];
+ // We do not touch the actual capture result registers until we know there
+ // has been a match so that we can use those capture results to set the
+ // last match info.
+ for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+ raw_output[i] = -1;
+ }
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
+ isolate);
+
+ IrregexpResult result = IrregexpInterpreter::Match(
+ isolate, byte_codes, subject, raw_output, index);
+ if (result == RE_SUCCESS) {
+ // Copy capture results to the start of the registers array.
+ MemCopy(output, raw_output,
+ number_of_capture_registers * sizeof(int32_t));
+ }
+ if (result == RE_EXCEPTION) {
+ DCHECK(!isolate->has_pending_exception());
+ isolate->StackOverflow();
+ }
+ return result;
}
- return result;
-#endif // V8_INTERPRETED_REGEXP
}
MaybeHandle<Object> RegExpImpl::IrregexpExec(
@@ -517,8 +509,8 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
subject = String::Flatten(isolate, subject);
// Prepare space for the return values.
-#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
- if (FLAG_trace_regexp_bytecodes) {
+#ifdef DEBUG
+ if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
String pattern = regexp->Pattern();
PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get());
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
@@ -572,12 +564,12 @@ Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
result->SetNumberOfCaptureRegisters(capture_register_count);
if (*result != *last_match_info) {
- // The match info has been reallocated, update the corresponding reference
- // on the native context.
if (*last_match_info == *isolate->regexp_last_match_info()) {
+ // This inner condition is only needed for special situations like the
+ // regexp fuzzer, where we pass our own custom RegExpMatchInfo to
+ // RegExpImpl::Exec; there actually want to bypass the Isolate's match
+ // info and execute the regexp without side effects.
isolate->native_context()->set_regexp_last_match_info(*result);
- } else if (*last_match_info == *isolate->regexp_internal_match_info()) {
- isolate->native_context()->set_regexp_internal_match_info(*result);
}
}
@@ -600,11 +592,7 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
regexp_(regexp),
subject_(subject),
isolate_(isolate) {
-#ifdef V8_INTERPRETED_REGEXP
- bool interpreted = true;
-#else
- bool interpreted = false;
-#endif // V8_INTERPRETED_REGEXP
+ bool interpreted = FLAG_regexp_interpret_all;
if (regexp_->TypeTag() == JSRegExp::ATOM) {
static const int kAtomRegistersPerMatch = 2;
@@ -1076,8 +1064,8 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
work_list_ = nullptr;
-#if defined(ENABLE_DISASSEMBLER) && !defined(V8_INTERPRETED_REGEXP)
- if (FLAG_print_code) {
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code && !FLAG_regexp_interpret_all) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
OFStream os(trace_scope.file());
Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
@@ -6698,57 +6686,57 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
}
// Create the correct assembler for the architecture.
-#ifndef V8_INTERPRETED_REGEXP
- DCHECK(!FLAG_jitless);
-
- // Native regexp implementation.
+ std::unique_ptr<RegExpMacroAssembler> macro_assembler;
+ if (!FLAG_regexp_interpret_all) {
+ // Native regexp implementation.
+ DCHECK(!FLAG_jitless);
- NativeRegExpMacroAssembler::Mode mode =
- is_one_byte ? NativeRegExpMacroAssembler::LATIN1
- : NativeRegExpMacroAssembler::UC16;
+ NativeRegExpMacroAssembler::Mode mode =
+ is_one_byte ? NativeRegExpMacroAssembler::LATIN1
+ : NativeRegExpMacroAssembler::UC16;
#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerIA32(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerX64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerARM(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_ARM64
- RegExpMacroAssemblerARM64 macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerARM64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_S390
- RegExpMacroAssemblerS390 macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerS390(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_PPC
- RegExpMacroAssemblerPPC macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerPPC(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_MIPS64
- RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
#else
#error "Unsupported architecture"
#endif
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
-#else // V8_INTERPRETED_REGEXP
- // Interpreted regexp implementation.
- EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(isolate, codes, zone);
-#endif // V8_INTERPRETED_REGEXP
+ // Interpreted regexp implementation.
+ macro_assembler.reset(new RegExpMacroAssemblerIrregexp(isolate, zone));
+ }
- macro_assembler.set_slow_safe(TooMuchRegExpCode(isolate, pattern));
+ macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
static const int kMaxBacksearchLimit = 1024;
if (is_end_anchored && !is_start_anchored && !is_sticky &&
max_length < kMaxBacksearchLimit) {
- macro_assembler.SetCurrentPositionFromEnd(max_length);
+ macro_assembler->SetCurrentPositionFromEnd(max_length);
}
if (is_global) {
@@ -6758,17 +6746,17 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
} else if (is_unicode) {
mode = RegExpMacroAssembler::GLOBAL_UNICODE;
}
- macro_assembler.set_global_mode(mode);
+ macro_assembler->set_global_mode(mode);
}
- return compiler.Assemble(isolate, &macro_assembler, node, data->capture_count,
- pattern);
+ return compiler.Assemble(isolate, macro_assembler.get(), node,
+ data->capture_count, pattern);
}
bool RegExpEngine::TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
Heap* heap = isolate->heap();
bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
- if (heap->isolate()->total_regexp_code_generated() >
+ if (isolate->total_regexp_code_generated() >
RegExpImpl::kRegExpCompiledLimit &&
heap->CommittedMemoryExecutable() >
RegExpImpl::kRegExpExecutableMemoryLimit) {
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index dffde6bb73..109dd91606 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -10,6 +10,7 @@
#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
+#include "src/zone/zone-splay-tree.h"
namespace v8 {
namespace internal {
@@ -53,14 +54,8 @@ inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
class RegExpImpl {
public:
- // Whether V8 is compiled with native regexp support or not.
- static bool UsesNativeRegExp() {
-#ifdef V8_INTERPRETED_REGEXP
- return false;
-#else
- return true;
-#endif
- }
+ // Whether the irregexp engine generates native code or interpreter bytecode.
+ static bool UsesNativeRegExp() { return !FLAG_regexp_interpret_all; }
// Returns a string representation of a regular expression.
// Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 81f7aa73c8..35303ff1d3 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - t7 : Temporarily stores the index of capture start after a matching pass
@@ -361,7 +360,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
Label fallthrough;
- Label success;
// Find length of back-referenced capture.
__ lw(a0, register_location(start_reg));
@@ -867,7 +865,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- Label grow_failed;
+
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, a0);
@@ -1088,7 +1086,7 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
int stack_alignment = base::OS::ActivationFrameAlignment();
@@ -1308,8 +1306,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 97cdef8b83..0fabd7e9f6 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -12,7 +12,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode,
@@ -218,9 +217,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
Label internal_failure_label_;
};
-#endif // V8_INTERPRETED_REGEXP
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index a92b0f59b8..0d1b591005 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -18,8 +18,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
-
/* clang-format off
*
* This assembler uses the following register assignment convention
@@ -398,7 +396,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
Label fallthrough;
- Label success;
// Find length of back-referenced capture.
__ Ld(a0, register_location(start_reg));
@@ -905,7 +902,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- Label grow_failed;
+
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, a0);
@@ -1126,7 +1123,7 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
int stack_alignment = base::OS::ActivationFrameAlignment();
@@ -1345,8 +1342,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 8c1275655d..8d2b4fc521 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -12,7 +12,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode,
@@ -223,9 +222,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
Label internal_failure_label_;
};
-#endif // V8_INTERPRETED_REGEXP
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index b0f2de4dd7..09df471e52 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -18,7 +18,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r25: Temporarily stores the index of capture start after a matching pass
@@ -373,7 +372,6 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
Label fallthrough;
- Label success;
// Find length of back-referenced capture.
__ LoadP(r3, register_location(start_reg), r0);
@@ -909,8 +907,6 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
@@ -1098,7 +1094,7 @@ void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
int frame_alignment = masm_->ActivationFrameAlignment();
@@ -1337,10 +1333,8 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
#endif
}
-
#undef __
-#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 1bbb45885e..c364c54943 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -12,8 +12,6 @@
namespace v8 {
namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone, Mode mode,
@@ -207,7 +205,6 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
const RegList kRegExpCalleeSaved =
1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
-#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 9c39dda64e..22c0ee199a 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -412,8 +412,12 @@ class RegExpQuantifier final : public RegExpTree {
: body_(body),
min_(min),
max_(max),
- min_match_(min * body->min_match()),
quantifier_type_(type) {
+ if (min > 0 && body->min_match() > kInfinity / min) {
+ min_match_ = kInfinity;
+ } else {
+ min_match_ = min * body->min_match();
+ }
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 7a0aa35e72..2a4d6e5e2f 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#ifdef V8_INTERPRETED_REGEXP
-
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast/ast.h"
@@ -59,6 +57,4 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETED_REGEXP
-
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index eb8d4741f9..eeffb7d262 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifdef V8_INTERPRETED_REGEXP
-
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast/ast.h"
@@ -16,16 +14,14 @@ namespace v8 {
namespace internal {
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
- Vector<byte> buffer,
Zone* zone)
: RegExpMacroAssembler(isolate, zone),
- buffer_(buffer),
+ buffer_(Vector<byte>::New(1024)),
pc_(0),
- own_buffer_(false),
+ own_buffer_(true),
advance_current_end_(kInvalidPC),
isolate_(isolate) {}
-
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
if (backtrack_.is_linked()) backtrack_.Unuse();
if (own_buffer_) buffer_.Dispose();
@@ -457,5 +453,3 @@ void RegExpMacroAssemblerIrregexp::Expand() {
} // namespace internal
} // namespace v8
-
-#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index 74c8526a23..3b693f6c6e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -5,8 +5,6 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#ifdef V8_INTERPRETED_REGEXP
-
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -20,17 +18,10 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
- // If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- RegExpMacroAssemblerIrregexp(Isolate* isolate, Vector<byte> buffer,
- Zone* zone);
+ // The assembler allocates and grows its own buffer, and buffer_size
+ // determines the initial buffer size. The buffer is owned by the assembler
+ // and deallocated upon destruction of the assembler.
+ RegExpMacroAssemblerIrregexp(Isolate* isolate, Zone* zone);
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
@@ -129,6 +120,4 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETED_REGEXP
-
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 90f065e94f..323e805fcd 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -108,8 +108,6 @@ bool RegExpMacroAssembler::CheckSpecialCharacterClass(uc16 type,
return false;
}
-#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
Zone* zone)
: RegExpMacroAssembler(isolate, zone) {}
@@ -209,14 +207,12 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
return return_value;
}
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
- Handle<Code> regexp_code,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate) {
-
+// Returns a {Result} sentinel, or the number of successful matches.
+int NativeRegExpMacroAssembler::Match(Handle<Code> regexp_code,
+ Handle<String> subject,
+ int* offsets_vector,
+ int offsets_vector_length,
+ int previous_index, Isolate* isolate) {
DCHECK(subject->IsFlat());
DCHECK_LE(0, previous_index);
DCHECK_LE(previous_index, subject->length());
@@ -255,18 +251,12 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
StringCharacterPosition(subject_ptr, start_offset + slice_offset, no_gc);
int byte_length = char_length << char_size_shift;
const byte* input_end = input_start + byte_length;
- Result res = Execute(*regexp_code,
- *subject,
- start_offset,
- input_start,
- input_end,
- offsets_vector,
- offsets_vector_length,
- isolate);
- return res;
+ return Execute(*regexp_code, *subject, start_offset, input_start, input_end,
+ offsets_vector, offsets_vector_length, isolate);
}
-NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
+// Returns a {Result} sentinel, or the number of successful matches.
+int NativeRegExpMacroAssembler::Execute(
Code code,
String input, // This needs to be the unpacked (sliced, cons) string.
int start_offset, const byte* input_start, const byte* input_end,
@@ -296,7 +286,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
AllowHeapAllocation allow_allocation;
isolate->StackOverflow();
}
- return static_cast<Result>(result);
+ return result;
}
// clang-format off
@@ -361,7 +351,5 @@ Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
return new_stack_base - stack_content_size;
}
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index f571c3c5a5..228f4a701b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -192,9 +192,6 @@ class RegExpMacroAssembler {
Zone* zone_;
};
-
-#ifndef V8_INTERPRETED_REGEXP // Avoid compiling unused code.
-
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
public:
// Type of input string to generate code for.
@@ -215,12 +212,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
~NativeRegExpMacroAssembler() override;
bool CanReadUnaligned() override;
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
+ // Returns a {Result} sentinel, or the number of successful matches.
+ static int Match(Handle<Code> regexp, Handle<String> subject,
+ int* offsets_vector, int offsets_vector_length,
+ int previous_index, Isolate* isolate);
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
@@ -248,13 +243,12 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
return reinterpret_cast<Address>(&word_character_map[0]);
}
- static Result Execute(Code code, String input, int start_offset,
- const byte* input_start, const byte* input_end,
- int* output, int output_size, Isolate* isolate);
+ // Returns a {Result} sentinel, or the number of successful matches.
+ static int Execute(Code code, String input, int start_offset,
+ const byte* input_start, const byte* input_end,
+ int* output, int output_size, Isolate* isolate);
};
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 3da99409c6..97be9fa27b 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -14,6 +14,7 @@
#include "src/regexp/jsregexp.h"
#include "src/regexp/property-sequences.h"
#include "src/utils.h"
+#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
@@ -990,8 +991,12 @@ Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
for (int i = 0; i < named_captures_->length(); i++) {
RegExpCapture* capture = named_captures_->at(i);
- MaybeHandle<String> name = factory->NewStringFromTwoByte(capture->name());
- array->set(i * 2, *name.ToHandleChecked());
+ Vector<const uc16> capture_name(capture->name()->data(),
+ capture->name()->size());
+ // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
+ // internalized so they can be used as property names in the 'exec' results.
+ Handle<String> name = factory->InternalizeTwoByteString(capture_name);
+ array->set(i * 2, *name);
array->set(i * 2 + 1, Smi::FromInt(capture->index()));
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 56b1e1b708..36bc3e5df6 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -51,9 +51,9 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
JSRegExp::cast(*recv)->set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
return recv;
} else {
- return Object::SetProperty(isolate, recv,
- isolate->factory()->lastIndex_string(),
- value_as_object, LanguageMode::kStrict);
+ return Object::SetProperty(
+ isolate, recv, isolate->factory()->lastIndex_string(), value_as_object,
+ StoreOrigin::kMaybeKeyed, Just(kThrowOnError));
}
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index d6c966484e..e73caee402 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -19,7 +19,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r6: Temporarily stores the index of capture start after a matching pass
@@ -357,7 +356,6 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
Label fallthrough;
- Label success;
// Find length of back-referenced capture.
__ LoadP(r2, register_location(start_reg));
@@ -903,7 +901,6 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
@@ -1073,7 +1070,7 @@ void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
DCHECK(!masm_->options().isolate_independent_code);
static constexpr int num_arguments = 3;
@@ -1293,7 +1290,6 @@ void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 42ce06c494..9bb9f31f15 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -12,7 +12,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone, Mode mode,
@@ -207,7 +206,6 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
const RegList kRegExpCalleeSaved =
1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 | 1 << 11 | 1 << 13;
-#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index b196f70a49..856c481b0a 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -17,8 +17,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
-
/*
* This assembler uses the following register assignment convention
* - rdx : Currently loaded character(s) as Latin1 or UC16. Must be loaded
@@ -144,7 +142,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
DCHECK_LE(0, reg);
DCHECK_GT(num_registers_, reg);
if (by != 0) {
- __ addp(register_location(reg), Immediate(by));
+ __ addq(register_location(reg), Immediate(by));
}
}
@@ -153,7 +151,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code offset from backtrack stack, add Code and jump to location.
Pop(rbx);
- __ addp(rbx, code_object_pointer());
+ __ addq(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -176,16 +174,16 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- __ leap(rax, Operand(rdi, -char_size()));
- __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
+ __ leaq(rax, Operand(rdi, -char_size()));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
}
void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ leap(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
- __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
+ __ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -211,7 +209,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
- __ subp(rbx, rdx); // Length of capture.
+ __ subq(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -243,12 +241,12 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ leap(r9, Operand(rsi, rdx, times_1, 0));
- __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ leaq(r9, Operand(rsi, rdx, times_1, 0));
+ __ leaq(r11, Operand(rsi, rdi, times_1, 0));
if (read_backward) {
- __ subp(r11, rbx); // Offset by length when matching backwards.
+ __ subq(r11, rbx); // Offset by length when matching backwards.
}
- __ addp(rbx, r9); // End of capture
+ __ addq(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -266,8 +264,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orq(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orq(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -281,14 +279,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addp(r11, Immediate(1));
- __ addp(r9, Immediate(1));
+ __ addq(r11, Immediate(1));
+ __ addq(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpp(r9, rbx);
+ __ cmpq(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
- __ movp(rdi, r11);
+ __ movq(rdi, r11);
__ subq(rdi, rsi);
if (read_backward) {
// Subtract match length if we matched backward.
@@ -317,9 +315,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
DCHECK(rcx == arg_reg_1);
DCHECK(rdx == arg_reg_2);
// Compute and set byte_offset1 (start of capture).
- __ leap(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leaq(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ leap(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leaq(rdx, Operand(rsi, rdi, times_1, 0));
if (read_backward) {
__ subq(rdx, rbx);
}
@@ -327,22 +325,22 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
DCHECK(rdi == arg_reg_1);
DCHECK(rsi == arg_reg_2);
// Compute byte_offset2 (current position = rsi+rdi).
- __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ leaq(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ leap(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leaq(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ movp(rsi, rax);
+ __ movq(rsi, rax);
if (read_backward) {
__ subq(rsi, rbx);
}
#endif // _WIN64
// Set byte_length.
- __ movp(arg_reg_3, rbx);
+ __ movq(arg_reg_3, rbx);
// Isolate.
#ifdef V8_INTL_SUPPORT
if (unicode) {
- __ movp(arg_reg_4, Immediate(0));
+ __ movq(arg_reg_4, Immediate(0));
} else // NOLINT
#endif // V8_INTL_SUPPORT
{
@@ -366,7 +364,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testp(rax, rax);
+ __ testq(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, advance position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -388,7 +386,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
// Find length of back-referenced capture.
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
- __ subp(rax, rdx); // Length to check.
+ __ subq(rax, rdx); // Length to check.
// At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared.
@@ -411,12 +409,12 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
}
// Compute pointers to match string and capture string
- __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ leaq(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
if (read_backward) {
__ subq(rbx, rax); // Offset by length when matching backwards.
}
- __ addp(rdx, rsi); // Start of capture.
- __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ addq(rdx, rsi); // Start of capture.
+ __ leaq(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -435,15 +433,15 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addp(rbx, Immediate(char_size()));
- __ addp(rdx, Immediate(char_size()));
+ __ addq(rbx, Immediate(char_size()));
+ __ addq(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpp(rdx, r9);
+ __ cmpq(rdx, r9);
__ j(below, &loop);
// Success.
// Set current character position to position after match.
- __ movp(rdi, rbx);
+ __ movq(rdi, rbx);
__ subq(rdi, rsi);
if (read_backward) {
// Subtract match length if we matched backward.
@@ -469,7 +467,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ andp(rax, current_character());
+ __ andq(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -483,7 +481,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ andp(rax, current_character());
+ __ andq(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -496,8 +494,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
- __ leap(rax, Operand(current_character(), -minus));
- __ andp(rax, Immediate(mask));
+ __ leal(rax, Operand(current_character(), -minus));
+ __ andl(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -529,8 +527,8 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
__ Move(rax, table);
Register index = current_character();
if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
- __ movp(rbx, current_character());
- __ andp(rbx, Immediate(kTableMask));
+ __ movq(rbx, current_character());
+ __ andq(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -543,8 +541,8 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
- // cmp(rax, Immediate(max - min))
+ // leal(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // cmpl(rax, Immediate(max - min))
switch (type) {
case 's':
// Match space-characters
@@ -554,7 +552,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0D
- __ leap(rax, Operand(current_character(), -'\t'));
+ __ leal(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -569,20 +567,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ leap(rax, Operand(current_character(), -'0'));
+ __ leal(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ leap(rax, Operand(current_character(), -'0'));
+ __ leal(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xorp(rax, Immediate(0x01));
+ __ xorl(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ subl(rax, Immediate(0x0B));
__ cmpl(rax, Immediate(0x0C - 0x0B));
@@ -600,7 +598,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xorp(rax, Immediate(0x01));
+ __ xorl(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ subl(rax, Immediate(0x0B));
__ cmpl(rax, Immediate(0x0C - 0x0B));
@@ -682,7 +680,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Actually emit code to start a new stack frame.
__ pushq(rbp);
- __ movp(rbp, rsp);
+ __ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef _WIN64
@@ -699,12 +697,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
- DCHECK_EQ(kInputString, -1 * kRegisterSize);
- DCHECK_EQ(kStartIndex, -2 * kRegisterSize);
- DCHECK_EQ(kInputStart, -3 * kRegisterSize);
- DCHECK_EQ(kInputEnd, -4 * kRegisterSize);
- DCHECK_EQ(kRegisterOutput, -5 * kRegisterSize);
- DCHECK_EQ(kNumOutputRegisters, -6 * kRegisterSize);
+ DCHECK_EQ(kInputString, -1 * kSystemPointerSize);
+ DCHECK_EQ(kStartIndex, -2 * kSystemPointerSize);
+ DCHECK_EQ(kInputStart, -3 * kSystemPointerSize);
+ DCHECK_EQ(kInputEnd, -4 * kSystemPointerSize);
+ DCHECK_EQ(kRegisterOutput, -5 * kSystemPointerSize);
+ DCHECK_EQ(kNumOutputRegisters, -6 * kSystemPointerSize);
__ pushq(rdi);
__ pushq(rsi);
__ pushq(rdx);
@@ -724,14 +722,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ movp(rcx, rsp);
+ __ movq(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subp(rcx, Operand(kScratchRegister, 0));
+ __ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpp(rcx, Immediate(num_registers_ * kSystemPointerSize));
+ __ cmpq(rcx, Immediate(num_registers_ * kSystemPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -741,32 +739,32 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testp(rax, rax);
+ __ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subp(rsp, Immediate(num_registers_ * kSystemPointerSize));
+ __ subq(rsp, Immediate(num_registers_ * kSystemPointerSize));
// Load string length.
- __ movp(rsi, Operand(rbp, kInputEnd));
+ __ movq(rsi, Operand(rbp, kInputEnd));
// Load input position.
- __ movp(rdi, Operand(rbp, kInputStart));
+ __ movq(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movp(rbx, Operand(rbp, kStartIndex));
+ __ movq(rbx, Operand(rbp, kStartIndex));
__ negq(rbx);
if (mode_ == UC16) {
- __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leaq(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leaq(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movp(Operand(rbp, kStringStartMinusOne), rax);
+ __ movq(Operand(rbp, kStringStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -776,7 +774,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
- __ movp(register_location(i), rax); // One write every page.
+ __ movq(register_location(i), rax); // One write every page.
}
#endif // V8_OS_WIN
@@ -805,20 +803,20 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movp(Operand(rbp, rcx, times_1, 0), rax);
+ __ movq(Operand(rbp, rcx, times_1, 0), rax);
__ subq(rcx, Immediate(kSystemPointerSize));
__ cmpq(rcx, Immediate(kRegisterZero -
num_saved_registers_ * kSystemPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
- __ movp(register_location(i), rax);
+ __ movq(register_location(i), rax);
}
}
}
// Initialize backtrack stack pointer.
- __ movp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ jmp(&start_label_);
@@ -828,24 +826,24 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movp(rdx, Operand(rbp, kStartIndex));
- __ movp(rbx, Operand(rbp, kRegisterOutput));
- __ movp(rcx, Operand(rbp, kInputEnd));
- __ subp(rcx, Operand(rbp, kInputStart));
+ __ movq(rdx, Operand(rbp, kStartIndex));
+ __ movq(rbx, Operand(rbp, kRegisterOutput));
+ __ movq(rcx, Operand(rbp, kInputEnd));
+ __ subq(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ leap(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leaq(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addp(rcx, rdx);
+ __ addq(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
- __ movp(rax, register_location(i));
+ __ movq(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
- __ movp(rdx, rax);
+ __ movq(rdx, rax);
}
- __ addp(rax, rcx); // Convert to index from start, not end.
+ __ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
- __ sarp(rax, Immediate(1)); // Convert byte index to character index.
+ __ sarq(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
@@ -854,31 +852,31 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incp(Operand(rbp, kSuccessfulCaptures));
+ __ incq(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subp(rcx, Immediate(num_saved_registers_));
+ __ subq(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpp(rcx, Immediate(num_saved_registers_));
+ __ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movp(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movq(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addp(Operand(rbp, kRegisterOutput),
+ __ addq(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movp(rax, Operand(rbp, kStringStartMinusOne));
+ __ movq(rax, Operand(rbp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpp(rdi, rdx);
+ __ cmpq(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testp(rdi, rdi);
+ __ testq(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
Label advance;
@@ -893,29 +891,29 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&load_char_start_regexp);
} else {
- __ movp(rax, Immediate(SUCCESS));
+ __ movq(rax, Immediate(SUCCESS));
}
}
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movp(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movq(rax, Operand(rbp, kSuccessfulCaptures));
}
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ popq(rbx);
__ popq(rdi);
__ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
- __ movp(rbx, Operand(rbp, kBackup_rbx));
+ __ movq(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
- __ movp(rsp, rbp);
+ __ movq(rsp, rbp);
#endif
// Exit function frame, restore previous one.
__ popq(rbp);
@@ -937,7 +935,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
CallCheckStackGuardState();
- __ testp(rax, rax);
+ __ testq(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
@@ -947,7 +945,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ popq(rdi);
__ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movp(rsi, Operand(rbp, kInputEnd));
+ __ movq(rsi, Operand(rbp, kInputEnd));
SafeReturn();
}
@@ -956,7 +954,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
@@ -970,12 +967,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movp(rdi, backtrack_stackpointer()); // First argument.
- __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ movq(rdi, backtrack_stackpointer()); // First argument.
+ __ leaq(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -983,10 +980,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testp(rax, rax);
+ __ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
- __ movp(backtrack_stackpointer(), rax);
+ __ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
@@ -1024,7 +1021,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpp(register_location(reg), Immediate(comparand));
+ __ cmpq(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1032,14 +1029,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpp(register_location(reg), Immediate(comparand));
+ __ cmpq(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpp(rdi, register_location(reg));
+ __ cmpq(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1073,7 +1070,7 @@ void RegExpMacroAssemblerX64::PopCurrentPosition() {
void RegExpMacroAssemblerX64::PopRegister(int register_index) {
Pop(rax);
- __ movp(register_location(register_index), rax);
+ __ movq(register_location(register_index), rax);
}
@@ -1090,43 +1087,30 @@ void RegExpMacroAssemblerX64::PushCurrentPosition() {
void RegExpMacroAssemblerX64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ movp(rax, register_location(register_index));
+ __ movq(rax, register_location(register_index));
Push(rax);
if (check_stack_limit) CheckStackLimit();
}
-STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
- kSystemPointerSize == kInt32Size);
-
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- if (kSystemPointerSize == kInt64Size) {
- __ movq(rdi, register_location(reg));
- } else {
- // Need sign extension for x32 as rdi might be used as an index register.
- __ movsxlq(rdi, register_location(reg));
- }
+ __ movq(rdi, register_location(reg));
}
void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
- if (kSystemPointerSize == kInt64Size) {
- __ movq(dst, register_location(reg));
- } else {
- // Need sign extension for x32 as dst might be used as an index register.
- __ movsxlq(dst, register_location(reg));
- }
+ __ movq(dst, register_location(reg));
}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movp(backtrack_stackpointer(), register_location(reg));
- __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movq(backtrack_stackpointer(), register_location(reg));
+ __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpp(rdi, Immediate(-by * char_size()));
+ __ cmpq(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1139,7 +1123,7 @@ void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
- __ movp(register_location(register_index), Immediate(to));
+ __ movq(register_location(register_index), Immediate(to));
}
@@ -1152,27 +1136,27 @@ bool RegExpMacroAssemblerX64::Succeed() {
void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ movp(register_location(reg), rdi);
+ __ movq(register_location(reg), rdi);
} else {
- __ leap(rax, Operand(rdi, cp_offset * char_size()));
- __ movp(register_location(reg), rax);
+ __ leaq(rax, Operand(rdi, cp_offset * char_size()));
+ __ movq(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ movp(rax, Operand(rbp, kStringStartMinusOne));
+ __ movq(rax, Operand(rbp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movp(register_location(reg), rax);
+ __ movq(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movp(rax, backtrack_stackpointer());
- __ subp(rax, Operand(rbp, kStackHighEnd));
- __ movp(register_location(reg), rax);
+ __ movq(rax, backtrack_stackpointer());
+ __ subq(rax, Operand(rbp, kStackHighEnd));
+ __ movq(register_location(reg), rax);
}
@@ -1185,20 +1169,20 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code of self. (Do this before overwriting r8).
- __ movp(rdx, code_object_pointer());
+ __ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
- __ movp(r8, rbp);
+ __ movq(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ leap(rcx, Operand(rsp, -kSystemPointerSize));
+ __ leaq(rcx, Operand(rsp, -kSystemPointerSize));
#else
// Third argument: RegExp code frame pointer.
- __ movp(rdx, rbp);
+ __ movq(rdx, rbp);
// Second argument: Code of self.
- __ movp(rsi, code_object_pointer());
+ __ movq(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ leap(rdi, Operand(rsp, -kRegisterSize));
+ __ leaq(rdi, Operand(rsp, -kSystemPointerSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1247,8 +1231,8 @@ void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
__ cmpl(rdi, Immediate(-cp_offset * char_size()));
BranchOrBacktrack(greater_equal, on_outside_input);
} else {
- __ leap(rax, Operand(rdi, cp_offset * char_size()));
- __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
+ __ leaq(rax, Operand(rdi, cp_offset * char_size()));
+ __ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(less_equal, on_outside_input);
}
}
@@ -1279,12 +1263,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subp(Operand(rsp, 0), code_object_pointer());
+ __ subq(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addp(Operand(rsp, 0), code_object_pointer());
+ __ addq(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1292,14 +1276,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
- __ subp(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subp(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1322,7 +1306,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subp(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1332,12 +1316,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addp(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addq(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addp(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addq(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1347,7 +1331,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpp(rsp, rax);
+ __ cmpq(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1361,7 +1345,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpp(backtrack_stackpointer(), rax);
+ __ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
@@ -1396,8 +1380,6 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 10ef0b5035..8d747c9d2a 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
-#ifndef V8_INTERPRETED_REGEXP
-
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone, Mode mode,
@@ -90,8 +88,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kRegisterSize;
- static const int kFrameAlign = kReturn_eip + kRegisterSize;
+ static const int kReturn_eip = kFramePointer + kSystemPointerSize;
+ static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
#ifdef _WIN64
// Parameters (first four passed as registers, but with room on stack).
@@ -100,50 +98,50 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// use this space to store the register passed parameters.
static const int kInputString = kFrameAlign;
// StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kRegisterSize;
- static const int kInputStart = kStartIndex + kRegisterSize;
- static const int kInputEnd = kInputStart + kRegisterSize;
- static const int kRegisterOutput = kInputEnd + kRegisterSize;
+ static const int kStartIndex = kInputString + kSystemPointerSize;
+ static const int kInputStart = kStartIndex + kSystemPointerSize;
+ static const int kInputEnd = kInputStart + kSystemPointerSize;
+ static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize;
- static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kRegisterSize;
- static const int kIsolate = kDirectCall + kRegisterSize;
+ static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kIsolate = kDirectCall + kSystemPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kRegisterSize;
- static const int kStartIndex = kInputString - kRegisterSize;
- static const int kInputStart = kStartIndex - kRegisterSize;
- static const int kInputEnd = kInputStart - kRegisterSize;
- static const int kRegisterOutput = kInputEnd - kRegisterSize;
+ static const int kInputString = kFramePointer - kSystemPointerSize;
+ static const int kStartIndex = kInputString - kSystemPointerSize;
+ static const int kInputStart = kStartIndex - kSystemPointerSize;
+ static const int kInputEnd = kInputStart - kSystemPointerSize;
+ static const int kRegisterOutput = kInputEnd - kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize;
+ static const int kNumOutputRegisters = kRegisterOutput - kSystemPointerSize;
static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kRegisterSize;
- static const int kIsolate = kDirectCall + kRegisterSize;
+ static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kIsolate = kDirectCall + kSystemPointerSize;
#endif
#ifdef _WIN64
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kRegisterSize;
- static const int kBackup_rdi = kBackup_rsi - kRegisterSize;
- static const int kBackup_rbx = kBackup_rdi - kRegisterSize;
+ static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
+ static const int kBackup_rdi = kBackup_rsi - kSystemPointerSize;
+ static const int kBackup_rbx = kBackup_rdi - kSystemPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize;
+ static const int kBackup_rbx = kNumOutputRegisters - kSystemPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -255,8 +253,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 7a70d432d3..e7f4ada1e4 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -92,7 +92,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
- GetDefaultRegisterConfiguration);
+ GetDefaultRegisterConfiguration)
// Allocatable registers with the masking register removed.
class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
@@ -128,7 +128,7 @@ int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
[kMaxAllocatableGeneralRegisterCount - 1];
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
- GetDefaultPoisoningRegisterConfiguration);
+ GetDefaultPoisoningRegisterConfiguration)
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
diff --git a/deps/v8/src/reloc-info.h b/deps/v8/src/reloc-info.h
index 26ab7b084e..33710173ac 100644
--- a/deps/v8/src/reloc-info.h
+++ b/deps/v8/src/reloc-info.h
@@ -5,8 +5,8 @@
#ifndef V8_RELOC_INFO_H_
#define V8_RELOC_INFO_H_
+#include "src/flush-instruction-cache.h"
#include "src/globals.h"
-#include "src/objects.h"
#include "src/objects/code.h"
namespace v8 {
@@ -41,8 +41,8 @@ class RelocInfo {
static const char* const kFillerCommentString;
// The minimum size of a comment is equal to two bytes for the extra tagged
- // pc and kPointerSize for the actual pointer to the comment.
- static const int kMinRelocCommentSize = 2 + kPointerSize;
+ // pc and kSystemPointerSize for the actual pointer to the comment.
+ static const int kMinRelocCommentSize = 2 + kSystemPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
@@ -278,7 +278,22 @@ class RelocInfo {
V8_INLINE void WipeOut();
template <typename ObjectVisitor>
- inline void Visit(ObjectVisitor* v);
+ void Visit(ObjectVisitor* visitor) {
+ Mode mode = rmode();
+ if (IsEmbeddedObject(mode)) {
+ visitor->VisitEmbeddedPointer(host(), this);
+ } else if (IsCodeTargetMode(mode)) {
+ visitor->VisitCodeTarget(host(), this);
+ } else if (IsExternalReference(mode)) {
+ visitor->VisitExternalReference(host(), this);
+ } else if (IsInternalReference(mode) || IsInternalReferenceEncoded(mode)) {
+ visitor->VisitInternalReference(host(), this);
+ } else if (IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(host(), this);
+ } else if (IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
+ }
+ }
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
@@ -337,7 +352,7 @@ class RelocInfoWriter {
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
- static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
+ static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kSystemPointerSize;
private:
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
@@ -379,7 +394,7 @@ class RelocIterator : public Malloced {
explicit RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask = -1);
- RelocIterator(RelocIterator&&) = default;
+ RelocIterator(RelocIterator&&) V8_NOEXCEPT = default;
// Iteration
bool done() const { return done_; }
diff --git a/deps/v8/src/roots-inl.h b/deps/v8/src/roots-inl.h
index 0eadb79555..cae3d37a39 100644
--- a/deps/v8/src/roots-inl.h
+++ b/deps/v8/src/roots-inl.h
@@ -9,13 +9,18 @@
#include "src/feedback-vector.h"
#include "src/handles.h"
-#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/heap-number.h"
#include "src/objects/literal-objects.h"
#include "src/objects/map.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-array.h"
+#include "src/objects/property-cell.h"
#include "src/objects/scope-info.h"
#include "src/objects/slots.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
@@ -52,17 +57,24 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
}
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
- : roots_table_(heap->isolate()->roots_table()) {}
+ : roots_table_(Isolate::FromHeap(heap)->roots_table()) {}
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
: roots_table_(isolate->roots_table()) {}
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- Type ReadOnlyRoots::name() const { \
- return Type::cast(Object(roots_table_[RootIndex::k##CamelName])); \
- } \
- Handle<Type> ReadOnlyRoots::name##_handle() const { \
- return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
+// We use unchecked_cast below because we trust our read-only roots to
+// have the right type, and to avoid the heavy #includes that would be
+// required for checked casts.
+
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type ReadOnlyRoots::name() const { \
+ DCHECK(CheckType(RootIndex::k##CamelName)); \
+ return Type::unchecked_cast( \
+ Object(roots_table_[RootIndex::k##CamelName])); \
+ } \
+ Handle<Type> ReadOnlyRoots::name##_handle() const { \
+ DCHECK(CheckType(RootIndex::k##CamelName)); \
+ return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
@@ -70,18 +82,22 @@ READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
Map ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
- return Map::cast(Object(roots_table_[root_index]));
+ DCHECK(CheckType(root_index));
+ return Map::unchecked_cast(Object(roots_table_[root_index]));
}
Map ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
- return Map::cast(Object(roots_table_[root_index]));
+ DCHECK(CheckType(root_index));
+ return Map::unchecked_cast(Object(roots_table_[root_index]));
}
-FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForMap(const Map map) {
+FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
+ ElementsKind elements_kind) {
RootIndex root_index =
- RootsTable::RootIndexForEmptyFixedTypedArray(map->elements_kind());
- return FixedTypedArrayBase::cast(Object(roots_table_[root_index]));
+ RootsTable::RootIndexForEmptyFixedTypedArray(elements_kind);
+ DCHECK(CheckType(root_index));
+ return FixedTypedArrayBase::unchecked_cast(Object(roots_table_[root_index]));
}
} // namespace internal
diff --git a/deps/v8/src/roots.cc b/deps/v8/src/roots.cc
index 8a0ed69895..65aadbca17 100644
--- a/deps/v8/src/roots.cc
+++ b/deps/v8/src/roots.cc
@@ -5,6 +5,7 @@
#include "src/roots.h"
#include "src/elements-kind.h"
+#include "src/objects-inl.h"
#include "src/visitors.h"
namespace v8 {
@@ -65,5 +66,24 @@ void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
}
+#ifdef DEBUG
+
+bool ReadOnlyRoots::CheckType(RootIndex index) const {
+ Object root(roots_table_[index]);
+ switch (index) {
+#define CHECKTYPE(Type, name, CamelName) \
+ case RootIndex::k##CamelName: \
+ return root->Is##Type();
+ READ_ONLY_ROOT_LIST(CHECKTYPE)
+#undef CHECKTYPE
+
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+#endif // DEBUG
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots.h
index be9ea8d252..4e841d52f6 100644
--- a/deps/v8/src/roots.h
+++ b/deps/v8/src/roots.h
@@ -124,6 +124,7 @@ class RootVisitor;
V(Map, weak_array_list_map, WeakArrayListMap) \
V(Map, ephemeron_hash_table_map, EphemeronHashTableMap) \
V(Map, embedder_data_array_map, EmbedderDataArrayMap) \
+ V(Map, weak_cell_map, WeakCellMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -134,22 +135,14 @@ class RootVisitor;
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
V(Map, external_string_map, ExternalStringMap) \
- V(Map, external_string_with_one_byte_data_map, \
- ExternalStringWithOneByteDataMap) \
V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
V(Map, uncached_external_string_map, UncachedExternalStringMap) \
- V(Map, uncached_external_string_with_one_byte_data_map, \
- UncachedExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \
V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
- V(Map, external_internalized_string_with_one_byte_data_map, \
- ExternalInternalizedStringWithOneByteDataMap) \
V(Map, external_one_byte_internalized_string_map, \
ExternalOneByteInternalizedStringMap) \
V(Map, uncached_external_internalized_string_map, \
UncachedExternalInternalizedStringMap) \
- V(Map, uncached_external_internalized_string_with_one_byte_data_map, \
- UncachedExternalInternalizedStringWithOneByteDataMap) \
V(Map, uncached_external_one_byte_internalized_string_map, \
UncachedExternalOneByteInternalizedStringMap) \
V(Map, uncached_external_one_byte_string_map, \
@@ -284,12 +277,13 @@ class RootVisitor;
V(TemplateList, message_listeners, MessageListeners) \
/* Support for async stack traces */ \
V(HeapObject, current_microtask, CurrentMicrotask) \
- /* JSWeakFactory objects which need cleanup */ \
- V(Object, dirty_js_weak_factories, DirtyJSWeakFactories) \
+ /* JSFinalizationGroup objects which need cleanup */ \
+ V(Object, dirty_js_finalization_groups, DirtyJSFinalizationGroups) \
/* KeepDuringJob set for JS WeakRefs */ \
V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
V(HeapObject, interpreter_entry_trampoline_for_profiling, \
- InterpreterEntryTrampolineForProfiling)
+ InterpreterEntryTrampolineForProfiling) \
+ V(Object, pending_optimize_for_test_bytecode, PendingOptimizeForTestBytecode)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -523,13 +517,18 @@ class ReadOnlyRoots {
V8_INLINE Map MapForFixedTypedArray(ExternalArrayType array_type);
V8_INLINE Map MapForFixedTypedArray(ElementsKind elements_kind);
- V8_INLINE FixedTypedArrayBase EmptyFixedTypedArrayForMap(const Map map);
+ V8_INLINE FixedTypedArrayBase
+ EmptyFixedTypedArrayForTypedArray(ElementsKind elements_kind);
// Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or
// heap verification.
void Iterate(RootVisitor* visitor);
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE bool CheckType(RootIndex index) const;
+#endif
+
private:
RootsTable& roots_table_;
};
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index d18ced02bd..febecaf892 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -8,8 +8,11 @@
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
@@ -130,11 +133,11 @@ Object RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// the remaining undefineds or delete the remaining properties.
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetElement(isolate, receiver, current_pos, element,
- LanguageMode::kStrict));
+ ShouldThrow::kThrowOnError));
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetElement(isolate, receiver, key,
isolate->factory()->undefined_value(),
- LanguageMode::kStrict));
+ ShouldThrow::kThrowOnError));
++current_pos;
}
}
@@ -152,7 +155,7 @@ Object RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetElement(isolate, receiver, current_pos++,
isolate->factory()->undefined_value(),
- LanguageMode::kStrict));
+ ShouldThrow::kThrowOnError));
}
// TODO(szuend): Re-enable when we also copy from the prototype chain for
// JSArrays. Then we can use HasOwnProperty instead of
@@ -207,7 +210,8 @@ Object RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Map> new_map =
JSObject::GetElementsTransitionMap(object, HOLEY_ELEMENTS);
- PretenureFlag tenure = Heap::InNewSpace(*object) ? NOT_TENURED : TENURED;
+ PretenureFlag tenure =
+ ObjectInYoungGeneration(*object) ? NOT_TENURED : TENURED;
Handle<FixedArray> fast_elements =
isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
dict->CopyValuesTo(*fast_elements);
@@ -336,7 +340,7 @@ Maybe<bool> ConditionalCopy(Isolate* isolate, Handle<JSReceiver> source,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, set_result,
Object::SetElement(isolate, target, index, source_element,
- LanguageMode::kStrict),
+ ShouldThrow::kThrowOnError),
Nothing<bool>());
return Just(true);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 02db33733e..f0d31600bf 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -771,25 +771,22 @@ namespace {
MaybeHandle<Object> StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value,
- LanguageMode language_mode) {
+ Handle<Object> value) {
Handle<JSReceiver> holder;
ASSIGN_RETURN_ON_EXCEPTION(isolate, holder,
GetSuperHolder(isolate, receiver, home_object,
SuperMode::kStore, name, 0),
Object);
LookupIterator it(receiver, name, holder);
- MAYBE_RETURN(
- Object::SetSuperProperty(&it, value, language_mode, StoreOrigin::kNamed),
- MaybeHandle<Object>());
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, StoreOrigin::kNamed),
+ MaybeHandle<Object>());
return value;
}
MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
Handle<JSObject> home_object,
Handle<Object> receiver, uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode) {
+ Handle<Object> value) {
Handle<JSReceiver> holder;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, holder,
@@ -797,29 +794,14 @@ MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
MaybeHandle<Name>(), index),
Object);
LookupIterator it(isolate, receiver, index, holder);
- MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
- StoreOrigin::kMaybeKeyed),
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, StoreOrigin::kMaybeKeyed),
MaybeHandle<Object>());
return value;
}
} // anonymous namespace
-RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, StoreToSuper(isolate, home_object, receiver, name, value,
- LanguageMode::kStrict));
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
+RUNTIME_FUNCTION(Runtime_StoreToSuper) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
@@ -828,47 +810,30 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, StoreToSuper(isolate, home_object, receiver, name, value,
- LanguageMode::kSloppy));
+ isolate, StoreToSuper(isolate, home_object, receiver, name, value));
}
-static MaybeHandle<Object> StoreKeyedToSuper(
- Isolate* isolate, Handle<JSObject> home_object, Handle<Object> receiver,
- Handle<Object> key, Handle<Object> value, LanguageMode language_mode) {
+static MaybeHandle<Object> StoreKeyedToSuper(Isolate* isolate,
+ Handle<JSObject> home_object,
+ Handle<Object> receiver,
+ Handle<Object> key,
+ Handle<Object> value) {
uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- return StoreElementToSuper(isolate, home_object, receiver, index, value,
- language_mode);
+ return StoreElementToSuper(isolate, home_object, receiver, index, value);
}
Handle<Name> name;
ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
Object);
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
- return StoreElementToSuper(isolate, home_object, receiver, index, value,
- language_mode);
+ return StoreElementToSuper(isolate, home_object, receiver, index, value);
}
- return StoreToSuper(isolate, home_object, receiver, name, value,
- language_mode);
+ return StoreToSuper(isolate, home_object, receiver, name, value);
}
-
-RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, StoreKeyedToSuper(isolate, home_object, receiver, key, value,
- LanguageMode::kStrict));
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
+RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
@@ -877,8 +842,7 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, StoreKeyedToSuper(isolate, home_object, receiver, key, value,
- LanguageMode::kSloppy));
+ isolate, StoreKeyedToSuper(isolate, home_object, receiver, key, value));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 2f03bb8532..42f6af5f4f 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -6,6 +6,7 @@
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 98aa3b98e7..7c08352175 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -15,6 +15,7 @@
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
@@ -722,16 +723,16 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
SealHandleScope shs(isolate);
CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
- Coverage::SelectMode(isolate, enable ? debug::Coverage::kPreciseCount
- : debug::Coverage::kBestEffort);
+ Coverage::SelectMode(isolate, enable ? debug::CoverageMode::kPreciseCount
+ : debug::CoverageMode::kBestEffort);
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
SealHandleScope shs(isolate);
CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
- Coverage::SelectMode(isolate, enable ? debug::Coverage::kBlockCount
- : debug::Coverage::kBestEffort);
+ Coverage::SelectMode(isolate, enable ? debug::CoverageMode::kBlockCount
+ : debug::CoverageMode::kBestEffort);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index b0bb297bfe..56580e91da 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -8,6 +8,7 @@
#include "src/counters.h"
#include "src/elements.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 1edbd3d5cb..ee813bff11 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -6,6 +6,7 @@
#include "src/arguments-inl.h"
#include "src/compiler.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index f8a7d5ba83..6a0dd3564f 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -17,6 +17,7 @@
#include "src/isolate-inl.h"
#include "src/message-template.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
@@ -27,6 +28,17 @@
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_AccessCheck) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ if (!isolate->MayAccess(handle(isolate->context(), isolate), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -95,6 +107,13 @@ RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
THROW_ERROR(isolate, args, NewTypeError);
}
+RUNTIME_FUNCTION(Runtime_ThrowTypeErrorIfStrict) {
+ if (GetShouldThrow(isolate, Nothing<ShouldThrow>()) ==
+ ShouldThrow::kDontThrow)
+ return ReadOnlyRoots(isolate).undefined_value();
+ THROW_ERROR(isolate, args, NewTypeError);
+}
+
#undef THROW_ERROR
namespace {
@@ -157,6 +176,15 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
+RUNTIME_FUNCTION(Runtime_ThrowAccessedUninitializedVariable) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewReferenceError(MessageTemplate::kAccessedUninitializedVariable, name));
+}
+
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -268,6 +296,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
CHECK(size <= kMaxRegularHeapObjectSize || space == LO_SPACE);
+ if (FLAG_young_generation_large_objects && space == LO_SPACE) {
+ space = NEW_LO_SPACE;
+ }
return *isolate->factory()->NewFillerObject(size, double_align, space);
}
@@ -641,12 +672,16 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
Handle<JSReceiver>::cast(sync_iterator), next);
}
-RUNTIME_FUNCTION(Runtime_CreateTemplateObject) {
+RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared_info, 1);
+ CONVERT_SMI_ARG_CHECKED(slot_id, 2);
- return *TemplateObjectDescription::CreateTemplateObject(isolate, description);
+ Handle<Context> native_context(isolate->context()->native_context(), isolate);
+ return *TemplateObjectDescription::GetTemplateObject(
+ isolate, native_context, description, shared_info, slot_id);
}
RUNTIME_FUNCTION(Runtime_ReportMessage) {
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 59f15a62bd..edceef20a5 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index fd3d2dd168..02d1a8e157 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -6,6 +6,7 @@
#include "src/bootstrapper.h"
#include "src/counters.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/message-template.h"
#include "src/objects/hash-table-inl.h"
@@ -52,6 +53,30 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
return result;
}
+MaybeHandle<Object> Runtime::HasProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key) {
+ // Check that {object} is actually a receiver.
+ if (!object->IsJSReceiver()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidInOperatorUse, key, object),
+ Object);
+ }
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+ // Convert the {key} to a name.
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
+
+ // Lookup the {name} on {receiver}.
+ Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
+ if (maybe.IsNothing()) return MaybeHandle<Object>();
+ return maybe.FromJust() ? ReadOnlyRoots(isolate).true_value_handle()
+ : ReadOnlyRoots(isolate).false_value_handle();
+}
+
namespace {
bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
@@ -341,12 +366,10 @@ RUNTIME_FUNCTION(Runtime_ObjectCreate) {
return *obj;
}
-MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreOrigin store_origin) {
+MaybeHandle<Object> Runtime::SetObjectProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> key,
+ Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw) {
if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR(
isolate,
@@ -371,12 +394,11 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
MAYBE_RETURN_NULL(
- Object::SetProperty(&it, value, language_mode, store_origin));
+ Object::SetProperty(&it, value, store_origin, should_throw));
return value;
}
-
RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -549,31 +571,28 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
RUNTIME_FUNCTION(Runtime_SetKeyedProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
- StoreOrigin::kMaybeKeyed));
+ isolate, Runtime::SetObjectProperty(isolate, object, key, value,
+ StoreOrigin::kMaybeKeyed));
}
RUNTIME_FUNCTION(Runtime_SetNamedProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, object, key, value,
- language_mode, StoreOrigin::kNamed));
+ StoreOrigin::kNamed));
}
// Similar to DefineDataPropertyInLiteral, but does not update feedback, and
@@ -592,8 +611,8 @@ RUNTIME_FUNCTION(Runtime_StoreDataPropertyInLiteral) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, key, &success, LookupIterator::OWN);
- Maybe<bool> result =
- JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE, kDontThrow);
+ Maybe<bool> result = JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, NONE, Just(kDontThrow));
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
DCHECK(result.IsJust());
USE(result);
@@ -791,7 +810,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
nexus.ConfigureMegamorphic(PROPERTY);
}
} else if (nexus.ic_state() == MONOMORPHIC) {
- if (nexus.FindFirstMap() != object->map() ||
+ if (nexus.GetFirstMap() != object->map() ||
nexus.GetFeedbackExtra() != MaybeObject::FromObject(*name)) {
nexus.ConfigureMegamorphic(PROPERTY);
}
@@ -823,9 +842,9 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
isolate, object, name, object, LookupIterator::OWN);
// Cannot fail since this should only be called when
// creating an object literal.
- CHECK(
- JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs, kDontThrow)
- .IsJust());
+ CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
+ Just(kDontThrow))
+ .IsJust());
return *object;
}
@@ -1027,7 +1046,7 @@ RUNTIME_FUNCTION(Runtime_DefineMethodsInternal) {
}
Maybe<bool> success = JSReceiver::DefineOwnProperty(
- isolate, target, key, &descriptor, kDontThrow);
+ isolate, target, key, &descriptor, Just(kDontThrow));
CHECK(success.FromJust());
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -1133,7 +1152,7 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, o, key, &success, LookupIterator::OWN);
if (!success) return ReadOnlyRoots(isolate).exception();
- MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, value, kThrowOnError),
+ MAYBE_RETURN(JSReceiver::CreateDataProperty(&it, value, Just(kThrowOnError)),
ReadOnlyRoots(isolate).exception());
return *value;
}
@@ -1170,7 +1189,7 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
}
- CHECK(Object::AddDataProperty(&it, value, NONE, kDontThrow,
+ CHECK(Object::AddDataProperty(&it, value, NONE, Just(kDontThrow),
StoreOrigin::kMaybeKeyed)
.FromJust());
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index 1ce7fffd18..cc932f2b41 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -4,6 +4,7 @@
#include "src/arguments.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index cd76d5ee7d..dc361b95e8 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -76,9 +76,10 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Handle<CallableTask> microtask =
- isolate->factory()->NewCallableTask(function, isolate->native_context());
- isolate->native_context()->microtask_queue()->EnqueueMicrotask(*microtask);
+
+ Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
+ function, handle(function->native_context(), isolate));
+ function->native_context()->microtask_queue()->EnqueueMicrotask(*microtask);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -158,7 +159,8 @@ Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
Object::SetProperty(
isolate, reject_handler,
isolate->factory()->promise_forwarding_handler_symbol(),
- isolate->factory()->true_value(), LanguageMode::kStrict)
+ isolate->factory()->true_value(), StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Check();
Handle<JSPromise>::cast(value)->set_handled_hint(is_predicted_as_caught);
}
@@ -167,7 +169,8 @@ Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
// Promise is found on the Promise stack
Object::SetProperty(isolate, throwaway,
isolate->factory()->promise_handled_by_symbol(),
- outer_promise, LanguageMode::kStrict)
+ outer_promise, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Check();
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index f4f84ebec9..11544cd34b 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -8,6 +8,7 @@
#include "src/counters.h"
#include "src/elements.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
@@ -64,12 +65,11 @@ RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 3);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 4);
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(isolate, receiver, key,
@@ -78,8 +78,8 @@ RUNTIME_FUNCTION(Runtime_SetPropertyWithReceiver) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
- Maybe<bool> result = Object::SetSuperProperty(&it, value, language_mode,
- StoreOrigin::kMaybeKeyed);
+ Maybe<bool> result =
+ Object::SetSuperProperty(&it, value, StoreOrigin::kMaybeKeyed);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index f472da7478..67bb8642c3 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -7,11 +7,11 @@
#include "src/arguments-inl.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/message-template.h"
#include "src/objects/js-array-inl.h"
#include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
#include "src/string-builder-inl.h"
@@ -628,7 +628,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
// Shortcut for simple non-regexp global replacements
if (typeTag == JSRegExp::ATOM && simple_replace) {
- if (subject->HasOnlyOneByteChars() && replacement->HasOnlyOneByteChars()) {
+ if (subject->IsOneByteRepresentation() &&
+ replacement->IsOneByteRepresentation()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
@@ -652,16 +653,9 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
ReplacementStringBuilder builder(isolate->heap(), subject, expected_parts);
- // Number of parts added by compiled replacement plus preceding
- // string and possibly suffix after last match. It is possible for
- // all components to use two elements when encoded as two smis.
- const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
-
int prev = 0;
do {
- builder.EnsureCapacity(parts_added_per_loop);
-
int start = current_match[0];
int end = current_match[1];
@@ -682,7 +676,6 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
if (prev < subject_length) {
- builder.EnsureCapacity(2);
builder.AddSubjectSlice(prev, subject_length);
}
@@ -791,33 +784,6 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
return *answer;
}
-namespace {
-
-Object StringReplaceGlobalRegExpWithStringHelper(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
- CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
-
- subject = String::Flatten(isolate, subject);
-
- if (replacement->length() == 0) {
- if (subject->HasOnlyOneByteChars()) {
- return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
- isolate, subject, regexp, last_match_info);
- } else {
- return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info);
- }
- }
-
- replacement = String::Flatten(isolate, replacement);
-
- return StringReplaceGlobalRegExpWithString(isolate, subject, regexp,
- replacement, last_match_info);
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
DCHECK_EQ(3, args.length());
@@ -915,20 +881,6 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
index, last_match_info));
}
-RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
-
- Handle<RegExpMatchInfo> internal_match_info =
- isolate->regexp_internal_match_info();
-
- return StringReplaceGlobalRegExpWithStringHelper(
- isolate, regexp, subject, replacement, internal_match_info);
-}
-
namespace {
class MatchInfoBackedMatch : public String::Match {
@@ -1375,7 +1327,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
String);
if (replace->length() == 0) {
- if (string->HasOnlyOneByteChars()) {
+ if (string->IsOneByteRepresentation()) {
Object result =
StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, string, regexp, last_match_info);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 8227242940..6783387b70 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -11,6 +11,7 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/message-template.h"
#include "src/objects/heap-object-inl.h"
@@ -53,7 +54,7 @@ Object DeclareGlobal(
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(isolate, script_contexts, name, &lookup) &&
+ if (ScriptContextTable::Lookup(isolate, *script_contexts, *name, &lookup) &&
IsLexicalVariableMode(lookup.mode)) {
// ES#sec-globaldeclarationinstantiation 6.a:
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
@@ -634,7 +635,7 @@ static Object FindNameClash(Isolate* isolate, Handle<ScopeInfo> scope_info,
Handle<String> name(scope_info->ContextLocalName(var), isolate);
VariableMode mode = scope_info->ContextLocalMode(var);
ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(isolate, script_context, name, &lookup)) {
+ if (ScriptContextTable::Lookup(isolate, *script_context, *name, &lookup)) {
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
// ES#sec-globaldeclarationinstantiation 5.b:
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
@@ -948,9 +949,9 @@ MaybeHandle<Object> StoreLookupSlot(
object = handle(context->global_object(), isolate);
}
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- Object::SetProperty(isolate, object, name, value, language_mode), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Object::SetProperty(isolate, object, name, value),
+ Object);
return value;
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 9a537e7fa2..aa19b103eb 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -5,6 +5,7 @@
#include "src/arguments-inl.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/slots.h"
@@ -297,7 +298,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
}
int length;
- bool one_byte = special->HasOnlyOneByteChars();
+ bool one_byte = special->IsOneByteRepresentation();
{
DisallowHeapAllocation no_gc;
@@ -344,234 +345,6 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
}
}
-// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
-RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- int32_t array_length;
- if (!args[1]->ToInt32(&array_length)) {
- THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
- }
- CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- CHECK(array->HasObjectElements());
- CHECK_GE(array_length, 0);
-
- Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()), isolate);
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
- }
-
- if (array_length == 0) {
- return ReadOnlyRoots(isolate).empty_string();
- } else if (array_length == 1) {
- Object first = fixed_array->get(0);
- CHECK(first->IsString());
- return first;
- }
-
- int separator_length = separator->length();
- CHECK_GT(separator_length, 0);
- int max_nof_separators =
- (String::kMaxLength + separator_length - 1) / separator_length;
- if (max_nof_separators < (array_length - 1)) {
- THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
- }
- int length = (array_length - 1) * separator_length;
- for (int i = 0; i < array_length; i++) {
- Object element_obj = fixed_array->get(i);
- CHECK(element_obj->IsString());
- String element = String::cast(element_obj);
- int increment = element->length();
- if (increment > String::kMaxLength - length) {
- STATIC_ASSERT(String::kMaxLength < kMaxInt);
- length = kMaxInt; // Provoke exception;
- break;
- }
- length += increment;
- }
-
- Handle<SeqTwoByteString> answer;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, answer, isolate->factory()->NewRawTwoByteString(length));
-
- DisallowHeapAllocation no_gc;
-
- uc16* sink = answer->GetChars(no_gc);
-#ifdef DEBUG
- uc16* end = sink + length;
-#endif
-
- CHECK(fixed_array->get(0)->IsString());
- String first = String::cast(fixed_array->get(0));
- String separator_raw = *separator;
-
- int first_length = first->length();
- String::WriteToFlat(first, sink, 0, first_length);
- sink += first_length;
-
- for (int i = 1; i < array_length; i++) {
- DCHECK(sink + separator_length <= end);
- String::WriteToFlat(separator_raw, sink, 0, separator_length);
- sink += separator_length;
-
- CHECK(fixed_array->get(i)->IsString());
- String element = String::cast(fixed_array->get(i));
- int element_length = element->length();
- DCHECK(sink + element_length <= end);
- String::WriteToFlat(element, sink, 0, element_length);
- sink += element_length;
- }
- DCHECK(sink == end);
-
- // Use %_FastOneByteArrayJoin instead.
- DCHECK(!answer->IsOneByteRepresentation());
- return *answer;
-}
-
-template <typename sinkchar>
-static void WriteRepeatToFlat(String src, Vector<sinkchar> buffer, int cursor,
- int repeat, int length) {
- if (repeat == 0) return;
-
- sinkchar* start = &buffer[cursor];
- String::WriteToFlat<sinkchar>(src, start, 0, length);
-
- int done = 1;
- sinkchar* next = start + length;
-
- while (done < repeat) {
- int block = Min(done, repeat - done);
- int block_chars = block * length;
- CopyChars(next, start, block_chars);
- next += block_chars;
- done += block;
- }
-}
-
-// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
-template <typename Char>
-static void JoinSparseArrayWithSeparator(FixedArray elements,
- int elements_length,
- uint32_t array_length,
- String separator,
- Vector<Char> buffer) {
- DisallowHeapAllocation no_gc;
- int previous_separator_position = 0;
- int separator_length = separator->length();
- DCHECK_LT(0, separator_length);
- int cursor = 0;
- for (int i = 0; i < elements_length; i += 2) {
- int position = NumberToInt32(elements->get(i));
- String string = String::cast(elements->get(i + 1));
- int string_length = string->length();
- if (string->length() > 0) {
- int repeat = position - previous_separator_position;
- WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat,
- separator_length);
- cursor += repeat * separator_length;
- previous_separator_position = position;
- String::WriteToFlat<Char>(string, &buffer[cursor], 0, string_length);
- cursor += string->length();
- }
- }
-
- int last_array_index = static_cast<int>(array_length - 1);
- // Array length must be representable as a signed 32-bit number,
- // otherwise the total string length would have been too large.
- DCHECK_LE(array_length, 0x7FFFFFFF); // Is int32_t.
- int repeat = last_array_index - previous_separator_position;
- WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
- cursor += repeat * separator_length;
- DCHECK(cursor <= buffer.length());
-}
-
-// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
-RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
- CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- // elements_array is fast-mode JSarray of alternating positions
- // (increasing order) and strings.
- CHECK(elements_array->HasSmiOrObjectElements());
- // array_length is length of original array (used to add separators);
- // separator is string to put between elements. Assumed to be non-empty.
- CHECK_GT(array_length, 0);
-
- // Find total length of join result.
- int string_length = 0;
- bool is_one_byte = separator->IsOneByteRepresentation();
- bool overflow = false;
- CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
- CHECK(elements_length <= elements_array->elements()->length());
- CHECK_EQ(elements_length & 1, 0); // Even length.
- FixedArray elements = FixedArray::cast(elements_array->elements());
- {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < elements_length; i += 2) {
- String string = String::cast(elements->get(i + 1));
- int length = string->length();
- if (is_one_byte && !string->IsOneByteRepresentation()) {
- is_one_byte = false;
- }
- if (length > String::kMaxLength ||
- String::kMaxLength - length < string_length) {
- overflow = true;
- break;
- }
- string_length += length;
- }
- }
-
- int separator_length = separator->length();
- if (!overflow && separator_length > 0) {
- if (array_length <= 0x7FFFFFFFu) {
- int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = String::kMaxLength - string_length;
- if ((remaining_length / separator_length) >= separator_count) {
- string_length += separator_length * (array_length - 1);
- } else {
- // Not room for the separators within the maximal string length.
- overflow = true;
- }
- } else {
- // Nonempty separator and at least 2^31-1 separators necessary
- // means that the string is too large to create.
- STATIC_ASSERT(String::kMaxLength < 0x7FFFFFFF);
- overflow = true;
- }
- }
- if (overflow) {
- // Throw an exception if the resulting string is too large. See
- // https://code.google.com/p/chromium/issues/detail?id=336820
- // for details.
- THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
- }
-
- if (is_one_byte) {
- Handle<SeqOneByteString> result = isolate->factory()
- ->NewRawOneByteString(string_length)
- .ToHandleChecked();
- DisallowHeapAllocation no_gc;
- JoinSparseArrayWithSeparator<uint8_t>(
- FixedArray::cast(elements_array->elements()), elements_length,
- array_length, *separator,
- Vector<uint8_t>(result->GetChars(no_gc), string_length));
- return *result;
- } else {
- Handle<SeqTwoByteString> result = isolate->factory()
- ->NewRawTwoByteString(string_length)
- .ToHandleChecked();
- DisallowHeapAllocation no_gc;
- JoinSparseArrayWithSeparator<uc16>(
- FixedArray::cast(elements_array->elements()), elements_length,
- array_length, *separator,
- Vector<uc16>(result->GetChars(no_gc), string_length));
- return *result;
- }
-}
// Copies Latin1 characters to the given fixed array looking up
// one-char strings in the cache. Gives up on the first char that is
@@ -713,5 +486,78 @@ RUNTIME_FUNCTION(Runtime_StringMaxLength) {
return Smi::FromInt(String::kMaxLength);
}
+RUNTIME_FUNCTION(Runtime_StringCompareSequence) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, search_string, 1);
+ CONVERT_NUMBER_CHECKED(int, start, Int32, args[2]);
+
+ // Check if start + searchLength is in bounds.
+ DCHECK_LE(start + search_string->length(), string->length());
+
+ FlatStringReader string_reader(isolate, String::Flatten(isolate, string));
+ FlatStringReader search_reader(isolate,
+ String::Flatten(isolate, search_string));
+
+ for (int i = 0; i < search_string->length(); i++) {
+ if (string_reader.Get(start + i) != search_reader.Get(i)) {
+ return ReadOnlyRoots(isolate).false_value();
+ }
+ }
+
+ return ReadOnlyRoots(isolate).true_value();
+}
+
+RUNTIME_FUNCTION(Runtime_StringEscapeQuotes) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+
+ // Equivalent to global replacement `string.replace(/"/g, "&quot")`, but this
+ // does not modify any global state (e.g. the regexp match info).
+
+ const int string_length = string->length();
+ Handle<String> quotes =
+ isolate->factory()->LookupSingleCharacterStringFromCode('"');
+
+ int index = String::IndexOf(isolate, string, quotes, 0);
+
+ // No quotes, nothing to do.
+ if (index == -1) return *string;
+
+ // Find all quotes.
+ std::vector<int> indices = {index};
+ while (index + 1 < string_length) {
+ index = String::IndexOf(isolate, string, quotes, index + 1);
+ if (index == -1) break;
+ indices.emplace_back(index);
+ }
+
+ // Build the replacement string.
+ Handle<String> replacement =
+ isolate->factory()->NewStringFromAsciiChecked("&quot;");
+ const int estimated_part_count = static_cast<int>(indices.size()) * 2 + 1;
+ ReplacementStringBuilder builder(isolate->heap(), string,
+ estimated_part_count);
+
+ int prev_index = -1; // Start at -1 to avoid special-casing the first match.
+ for (int index : indices) {
+ const int slice_start = prev_index + 1;
+ const int slice_end = index;
+ if (slice_end > slice_start) {
+ builder.AddSubjectSlice(slice_start, slice_end);
+ }
+ builder.AddString(replacement);
+ prev_index = index;
+ }
+
+ if (prev_index < string_length - 1) {
+ builder.AddSubjectSlice(prev_index + 1, string_length);
+ }
+
+ return *builder.ToString().ToHandleChecked();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 8cd48505d2..b47794938a 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -4,6 +4,7 @@
#include "src/arguments-inl.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 40ca5de401..1178ea6251 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -16,6 +16,9 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/smi.h"
@@ -44,7 +47,7 @@ using WasmCompileControlsMap = std::map<v8::Isolate*, WasmCompileControls>;
// isolates concurrently. Methods need to hold the accompanying mutex on access.
// To avoid upsetting the static initializer count, we lazy initialize this.
DEFINE_LAZY_LEAKY_OBJECT_GETTER(WasmCompileControlsMap,
- GetPerIsolateWasmControls);
+ GetPerIsolateWasmControls)
base::LazyMutex g_PerIsolateWasmControlsMutex = LAZY_MUTEX_INITIALIZER;
bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
@@ -103,6 +106,14 @@ bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
} // namespace
+RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ isolate->load_stub_cache()->Clear();
+ isolate->store_stub_cache()->Clear();
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_ConstructDouble) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -285,6 +296,60 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // Only one function should be prepared for optimization at a time
+ CHECK(isolate->heap()->pending_optimize_for_test_bytecode()->IsUndefined());
+
+ // Check function allows lazy compilation.
+ if (!function->shared()->allows_lazy_compilation()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // If function isn't compiled, compile it now.
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ if (!is_compiled_scope.is_compiled() &&
+ !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // Ensure function has a feedback vector to hold type feedback for
+ // optimization.
+ JSFunction::EnsureFeedbackVector(function);
+
+ // If optimization is disabled for the function, return without making it
+ // pending optimize for test.
+ if (function->shared()->optimization_disabled() &&
+ function->shared()->disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // If the function is already optimized, return without making it pending
+ // optimize for test.
+ if (function->IsOptimized() || function->shared()->HasAsmWasmData()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // If the function has optimized code, ensure that we check for it and then
+ // return without making it pending optimize for test.
+ if (function->HasOptimizedCode()) {
+ DCHECK(function->ChecksOptimizationMarker());
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ // Hold onto the bytecode array between marking and optimization to ensure
+ // it's not flushed.
+ isolate->heap()->SetPendingOptimizeForTestBytecode(
+ function->shared()->GetBytecodeArray());
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
HandleScope scope(isolate);
DCHECK(args.length() == 0 || args.length() == 1);
@@ -796,7 +861,7 @@ RUNTIME_FUNCTION(Runtime_InNewSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(Heap::InNewSpace(obj));
+ return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj));
}
RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
@@ -874,10 +939,9 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
- Handle<Object> tag;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol())
- .ToHandle(&tag)) {
+ Handle<Object> tag =
+ WasmExceptionPackage::GetExceptionTag(isolate, exception);
+ if (tag->IsWasmExceptionTag()) {
Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
for (int index = 0; index < exceptions_table->length(); ++index) {
if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
@@ -890,11 +954,9 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
- Handle<Object> values_obj;
- CHECK(JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values_obj));
+ Handle<Object> values_obj =
+ WasmExceptionPackage::GetExceptionValues(isolate, exception);
+ CHECK(values_obj->IsFixedArray()); // Only called with correct input.
Handle<FixedArray> values = Handle<FixedArray>::cast(values_obj);
return *isolate->factory()->NewJSArrayWithElements(values);
}
@@ -929,6 +991,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(PackedElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 84ef744d8b..fe85e1dfc9 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -102,8 +102,6 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
- DCHECK(isolate->context().is_null());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
return isolate->StackOverflow();
}
@@ -120,7 +118,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
DCHECK_EQ(2, args.length());
DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- CONVERT_ARG_CHECKED(HeapObject, tag_raw, 0);
+ CONVERT_ARG_CHECKED(WasmExceptionTag, tag_raw, 0);
CONVERT_SMI_ARG_CHECKED(size, 1);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> tag(tag_raw, isolate);
@@ -128,12 +126,14 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
MessageTemplate::kWasmExceptionError);
CHECK(!Object::SetProperty(isolate, exception,
isolate->factory()->wasm_exception_tag_symbol(),
- tag, LanguageMode::kStrict)
+ tag, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.is_null());
Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
CHECK(!Object::SetProperty(isolate, exception,
isolate->factory()->wasm_exception_values_symbol(),
- values, LanguageMode::kStrict)
+ values, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.is_null());
return *exception;
}
@@ -147,16 +147,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> except_obj(except_obj_raw, isolate);
- if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
- Handle<Object> tag;
- if (JSReceiver::GetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol())
- .ToHandle(&tag)) {
- return *tag;
- }
- }
- return ReadOnlyRoots(isolate).undefined_value();
+ return *WasmExceptionPackage::GetExceptionTag(isolate, except_obj);
}
RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
@@ -168,18 +159,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> except_obj(except_obj_raw, isolate);
- if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
- Handle<Object> values;
- if (JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values)) {
- DCHECK(values->IsFixedArray());
- return *values;
- }
- }
- return ReadOnlyRoots(isolate).undefined_value();
+ return *WasmExceptionPackage::GetExceptionValues(isolate, except_obj);
}
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
@@ -333,6 +313,20 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
timeout_ms);
}
+namespace {
+Object ThrowTableOutOfBounds(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ // Handle out-of-bounds access here in the runtime call, rather
+ // than having the lower-level layers deal with JS exceptions.
+ if (isolate->context().is_null()) {
+ isolate->set_context(instance->native_context());
+ }
+ Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
+ MessageTemplate::kWasmTrapTableOutOfBounds);
+ return isolate->Throw(*error_obj);
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -342,44 +336,31 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 1);
CONVERT_UINT32_ARG_CHECKED(dst, 2);
CONVERT_UINT32_ARG_CHECKED(src, 3);
- CONVERT_UINT32_ARG_CHECKED(size, 4);
-
- PrintF(
- "TableInit(table_index=%u, elem_segment_index=%u, dst=%u, src=%u, "
- "size=%u)\n",
- table_index, elem_segment_index, dst, src, size);
+ CONVERT_UINT32_ARG_CHECKED(count, 4);
- USE(instance);
- USE(table_index);
- USE(elem_segment_index);
- USE(dst);
- USE(src);
- USE(size);
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
- UNREACHABLE();
+ bool oob = !WasmInstanceObject::InitTableEntries(
+ isolate, instance, table_index, elem_segment_index, dst, src, count);
+ if (oob) return ThrowTableOutOfBounds(isolate, instance);
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(5, args.length());
auto instance =
Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(dst, 1);
- CONVERT_UINT32_ARG_CHECKED(src, 2);
- CONVERT_UINT32_ARG_CHECKED(count, 3);
+ CONVERT_UINT32_ARG_CHECKED(table_src_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(dst, 2);
+ CONVERT_UINT32_ARG_CHECKED(src, 3);
+ CONVERT_UINT32_ARG_CHECKED(count, 4);
bool oob = !WasmInstanceObject::CopyTableEntries(
- isolate, instance, table_index, dst, src, count);
- if (oob) {
- // Handle out-of-bounds access here in the runtime call, rather
- // than having the lower-level layers deal with JS exceptions.
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
- Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
- MessageTemplate::kWasmTrapTableOutOfBounds);
- return isolate->Throw(*error_obj);
- }
+ isolate, instance, table_src_index, table_dst_index, dst, src, count);
+ if (oob) return ThrowTableOutOfBounds(isolate, instance);
return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
index 4bc258d7de..df7ed76bf3 100644
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -15,12 +15,12 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_WeakFactoryCleanupJob) {
+RUNTIME_FUNCTION(Runtime_FinalizationGroupCleanupJob) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakFactory, weak_factory, 0);
- weak_factory->set_scheduled_for_cleanup(false);
+ CONVERT_ARG_HANDLE_CHECKED(JSFinalizationGroup, finalization_group, 0);
+ finalization_group->set_scheduled_for_cleanup(false);
- JSWeakFactory::Cleanup(weak_factory, isolate);
+ JSFinalizationGroup::Cleanup(finalization_group, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 3d70a67553..058e02733e 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -124,6 +124,7 @@ bool Runtime::NeedsExactContext(FunctionId id) {
case Runtime::kThrowNotConstructor:
case Runtime::kThrowRangeError:
case Runtime::kThrowReferenceError:
+ case Runtime::kThrowAccessedUninitializedVariable:
case Runtime::kThrowStackOverflow:
case Runtime::kThrowStaticPrototypeError:
case Runtime::kThrowSuperAlreadyCalledError:
@@ -163,6 +164,7 @@ bool Runtime::IsNonReturning(FunctionId id) {
case Runtime::kThrowNotConstructor:
case Runtime::kThrowRangeError:
case Runtime::kThrowReferenceError:
+ case Runtime::kThrowAccessedUninitializedVariable:
case Runtime::kThrowStackOverflow:
case Runtime::kThrowSymbolAsyncIteratorInvalid:
case Runtime::kThrowTypeError:
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 43e4e99f5a..4dabce27a5 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -85,10 +85,8 @@ namespace internal {
F(HomeObjectSymbol, 0, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreToSuper_Strict, 4, 1) \
+ F(StoreKeyedToSuper, 4, 1) \
+ F(StoreToSuper, 4, 1) \
F(ThrowConstructorNonCallableError, 1, 1) \
F(ThrowNotSuperConstructor, 2, 1) \
F(ThrowStaticPrototypeError, 0, 1) \
@@ -207,6 +205,7 @@ namespace internal {
#endif // V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ F(AccessCheck, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInTargetSpace, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
@@ -215,10 +214,10 @@ namespace internal {
F(CheckIsBootstrapping, 0, 1) \
I(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
- F(CreateTemplateObject, 1, 1) \
F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(GetTemplateObject, 3, 1) \
F(IncrementUseCounter, 1, 1) \
F(Interrupt, 0, 1) \
F(NewReferenceError, 2, 1) \
@@ -244,14 +243,16 @@ namespace internal {
F(ThrowPatternAssignmentNonCoercible, 0, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(ThrowAccessedUninitializedVariable, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
F(ThrowSymbolIteratorInvalid, 0, 1) \
F(ThrowThrowMethodMissing, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(ThrowTypeErrorIfStrict, -1 /* >= 1 */, 1) \
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
- F(WeakFactoryCleanupJob, 1, 1)
+ F(FinalizationGroupCleanupJob, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F, I) \
F(CreateArrayLiteral, 4, 1) \
@@ -316,8 +317,8 @@ namespace internal {
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(PerformSideEffectCheckForObject, 1, 1) \
F(SetDataProperties, 2, 1) \
- F(SetKeyedProperty, 4, 1) \
- F(SetNamedProperty, 4, 1) \
+ F(SetKeyedProperty, 3, 1) \
+ F(SetNamedProperty, 3, 1) \
F(StoreDataPropertyInLiteral, 3, 1) \
F(ShrinkPropertyDictionary, 1, 1) \
F(ToFastProperties, 1, 1) \
@@ -363,14 +364,13 @@ namespace internal {
F(IsJSProxy, 1, 1) \
F(JSProxyGetHandler, 1, 1) \
F(JSProxyGetTarget, 1, 1) \
- F(SetPropertyWithReceiver, 5, 1)
+ F(SetPropertyWithReceiver, 4, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
I(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
- F(RegExpInternalReplace, 3, 1) \
F(RegExpReplace, 3, 1) \
F(RegExpSplit, 3, 1) \
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
@@ -406,12 +406,11 @@ namespace internal {
F(FlattenString, 1, 1) \
F(GetSubstitution, 5, 1) \
F(InternalizeString, 1, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
F(StringCharCodeAt, 2, 1) \
F(StringEqual, 2, 1) \
+ F(StringEscapeQuotes, 1, 1) \
F(StringGreaterThan, 2, 1) \
F(StringGreaterThanOrEqual, 2, 1) \
F(StringIncludes, 3, 1) \
@@ -422,6 +421,7 @@ namespace internal {
F(StringLessThanOrEqual, 2, 1) \
F(StringMaxLength, 0, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringCompareSequence, 3, 1) \
F(StringSubstring, 3, 1) \
F(StringToArray, 2, 1) \
F(StringTrim, 2, 1)
@@ -433,6 +433,7 @@ namespace internal {
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F, I) \
+ F(ClearMegamorphicStubCache, 0, 1) \
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
F(ClearFunctionFeedback, 1, 1) \
@@ -460,6 +461,7 @@ namespace internal {
F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
+ F(HasPackedElements, 1, 1) \
F(HasDoubleElements, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
@@ -494,6 +496,7 @@ namespace internal {
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
+ F(PrepareFunctionForOptimization, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
F(RunningInSimulator, 0, 1) \
@@ -540,7 +543,7 @@ namespace internal {
F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \
F(WasmTableInit, 5, 1) \
- F(WasmTableCopy, 4, 1) \
+ F(WasmTableCopy, 5, 1) \
F(WasmIsValidAnyFuncValue, 1, 1) \
F(WasmCompileLazy, 2, 1)
@@ -554,9 +557,8 @@ namespace internal {
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
- F(KeyedStoreICNoFeedback_Miss, 4, 1) \
F(StoreInArrayLiteralIC_Miss, 5, 1) \
- F(KeyedStoreIC_Slow, 5, 1) \
+ F(KeyedStoreIC_Slow, 3, 1) \
F(LoadAccessorProperty, 4, 1) \
F(LoadCallbackProperty, 4, 1) \
F(LoadElementWithInterceptor, 2, 1) \
@@ -564,15 +566,16 @@ namespace internal {
F(LoadGlobalIC_Slow, 3, 1) \
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 5, 1) \
- F(StoreCallbackProperty, 6, 1) \
+ F(StoreCallbackProperty, 5, 1) \
F(StoreGlobalIC_Miss, 4, 1) \
- F(StoreGlobalICNoFeedback_Miss, 3, 1) \
+ F(StoreGlobalICNoFeedback_Miss, 2, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
- F(StoreICNoFeedback_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Slow, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
- F(CloneObjectIC_Miss, 4, 1)
+ F(CloneObjectIC_Miss, 4, 1) \
+ F(KeyedHasIC_Miss, 4, 1) \
+ F(HasElementWithInterceptor, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT_IMPL(F, I) \
FOR_EACH_INTRINSIC_ARRAY(F, I) \
@@ -700,13 +703,16 @@ class Runtime : public AllStatic {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
- Handle<Object> value, LanguageMode language_mode,
- StoreOrigin store_origin);
+ Handle<Object> value, StoreOrigin store_origin,
+ Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
bool* is_found_out = nullptr);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> HasProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> key);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetInternalProperties(
Isolate* isolate, Handle<Object>);
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index b9440d0f65..d02f73ceeb 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -215,24 +215,6 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
// Operand constructors
Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
@@ -315,7 +297,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 6);
+ FlushInstructionCache(pc, 6);
}
patched = true;
} else {
@@ -344,7 +326,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(pc + instr1_length), instr_2);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 12);
+ FlushInstructionCache(pc, 12);
}
patched = true;
}
@@ -358,7 +340,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 6);
+ FlushInstructionCache(pc, 6);
}
patched = true;
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index ad0c2892c9..124f4a93df 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -349,7 +349,9 @@ Assembler::Assembler(const AssemblerOptions& options,
relocations_.reserve(128);
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
EmitRelocations();
int code_comments_size = WriteCodeComments();
@@ -357,16 +359,25 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- desc->reloc_size =
- (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
- desc->constant_pool_size = 0;
- desc->origin = this;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
void Assembler::Align(int m) {
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index e50e77d3da..109ef53236 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -81,6 +81,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -226,10 +228,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() {}
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -493,7 +505,7 @@ inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_RSY_A_OPCODE_LIST(DECLARE_S390_RSY_A_INSTRUCTIONS);
+ S390_RSY_A_OPCODE_LIST(DECLARE_S390_RSY_A_INSTRUCTIONS)
#undef DECLARE_S390_RSY_A_INSTRUCTIONS
#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
@@ -503,7 +515,7 @@ inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_RSY_B_OPCODE_LIST(DECLARE_S390_RSY_B_INSTRUCTIONS);
+ S390_RSY_B_OPCODE_LIST(DECLARE_S390_RSY_B_INSTRUCTIONS)
#undef DECLARE_S390_RSY_B_INSTRUCTIONS
@@ -523,7 +535,7 @@ inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
void name(Register r1, Register r3, const MemOperand& opnd) { \
name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_RS_A_OPCODE_LIST(DECLARE_S390_RS_A_INSTRUCTIONS);
+ S390_RS_A_OPCODE_LIST(DECLARE_S390_RS_A_INSTRUCTIONS)
#undef DECLARE_S390_RS_A_INSTRUCTIONS
#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
@@ -533,7 +545,7 @@ inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
void name(Register r1, Condition m3, const MemOperand& opnd) { \
name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_RS_B_OPCODE_LIST(DECLARE_S390_RS_B_INSTRUCTIONS);
+ S390_RS_B_OPCODE_LIST(DECLARE_S390_RS_B_INSTRUCTIONS)
#undef DECLARE_S390_RS_B_INSTRUCTIONS
#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
@@ -579,7 +591,7 @@ inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4, int f5 = 0) {
name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
Operand(opnd.offset())); \
}
- S390_RXE_OPCODE_LIST(DECLARE_S390_RXE_INSTRUCTIONS);
+ S390_RXE_OPCODE_LIST(DECLARE_S390_RXE_INSTRUCTIONS)
#undef DECLARE_S390_RXE_INSTRUCTIONS
@@ -598,7 +610,7 @@ inline void ri_format(Opcode opcode, int f1, int f2) {
DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
ri_format(op_name, r.code(), i2.immediate()); \
}
- S390_RI_A_OPCODE_LIST(DECLARE_S390_RI_A_INSTRUCTIONS);
+ S390_RI_A_OPCODE_LIST(DECLARE_S390_RI_A_INSTRUCTIONS)
#undef DECLARE_S390_RI_A_INSTRUCTIONS
#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
@@ -609,7 +621,7 @@ inline void ri_format(Opcode opcode, int f1, int f2) {
halfwordOp.setBits(16); \
ri_format(op_name, r1.code(), halfwordOp.immediate()); \
}
- S390_RI_B_OPCODE_LIST(DECLARE_S390_RI_B_INSTRUCTIONS);
+ S390_RI_B_OPCODE_LIST(DECLARE_S390_RI_B_INSTRUCTIONS)
#undef DECLARE_S390_RI_B_INSTRUCTIONS
#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
@@ -620,7 +632,7 @@ inline void ri_format(Opcode opcode, int f1, int f2) {
is_int16(i2.immediate()) : is_uint16(i2.immediate())); \
ri_format(op_name, m, i2.immediate()); \
}
- S390_RI_C_OPCODE_LIST(DECLARE_S390_RI_C_INSTRUCTIONS);
+ S390_RI_C_OPCODE_LIST(DECLARE_S390_RI_C_INSTRUCTIONS)
#undef DECLARE_S390_RI_C_INSTRUCTIONS
@@ -640,7 +652,7 @@ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
}
- S390_RRF_A_OPCODE_LIST(DECLARE_S390_RRF_A_INSTRUCTIONS);
+ S390_RRF_A_OPCODE_LIST(DECLARE_S390_RRF_A_INSTRUCTIONS)
#undef DECLARE_S390_RRF_A_INSTRUCTIONS
@@ -651,7 +663,7 @@ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Register r1, Register r2, Register r3) { \
name(r1, Condition(0), r2, r3); \
}
- S390_RRF_B_OPCODE_LIST(DECLARE_S390_RRF_B_INSTRUCTIONS);
+ S390_RRF_B_OPCODE_LIST(DECLARE_S390_RRF_B_INSTRUCTIONS)
#undef DECLARE_S390_RRF_B_INSTRUCTIONS
@@ -664,7 +676,7 @@ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
- S390_RRF_C_OPCODE_LIST(DECLARE_S390_RRF_C_INSTRUCTIONS);
+ S390_RRF_C_OPCODE_LIST(DECLARE_S390_RRF_C_INSTRUCTIONS)
#undef DECLARE_S390_RRF_C_INSTRUCTIONS
@@ -677,7 +689,7 @@ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(Condition m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
- S390_RRF_D_OPCODE_LIST(DECLARE_S390_RRF_D_INSTRUCTIONS);
+ S390_RRF_D_OPCODE_LIST(DECLARE_S390_RRF_D_INSTRUCTIONS)
#undef DECLARE_S390_RRF_D_INSTRUCTIONS
@@ -690,7 +702,7 @@ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
void name(M3 m3, R1 r1, R2 r2) { \
name(m3, Condition(0), r1, r2); \
}
- S390_RRF_E_OPCODE_LIST(DECLARE_S390_RRF_E_INSTRUCTIONS);
+ S390_RRF_E_OPCODE_LIST(DECLARE_S390_RRF_E_INSTRUCTIONS)
#undef DECLARE_S390_RRF_E_INSTRUCTIONS
enum FIDBRA_FLAGS {
@@ -717,7 +729,7 @@ inline void rsi_format(Opcode op, int f1, int f2, int f3) {
void name(Register r1, Register r3, const Operand& i2) { \
rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
}
- S390_RSI_OPCODE_LIST(DECLARE_S390_RSI_INSTRUCTIONS);
+ S390_RSI_OPCODE_LIST(DECLARE_S390_RSI_INSTRUCTIONS)
#undef DECLARE_S390_RSI_INSTRUCTIONS
@@ -739,7 +751,7 @@ inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
}
- S390_RSL_A_OPCODE_LIST(DECLARE_S390_RSL_A_INSTRUCTIONS);
+ S390_RSL_A_OPCODE_LIST(DECLARE_S390_RSL_A_INSTRUCTIONS)
#undef DECLARE_S390_RSL_A_INSTRUCTIONS
#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
@@ -748,7 +760,7 @@ inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
uint16_t L = static_cast<uint16_t>(l2.immediate()); \
rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
}
- S390_RSL_B_OPCODE_LIST(DECLARE_S390_RSL_B_INSTRUCTIONS);
+ S390_RSL_B_OPCODE_LIST(DECLARE_S390_RSL_B_INSTRUCTIONS)
#undef DECLARE_S390_RSL_B_INSTRUCTIONS
@@ -773,7 +785,7 @@ inline void s_format(Opcode op, int f1, int f2) {
Operand d2 = Operand(opnd.getDisplacement()); \
name(opnd.getBaseRegister(), d2); \
}
- S390_S_OPCODE_LIST(DECLARE_S390_S_INSTRUCTIONS);
+ S390_S_OPCODE_LIST(DECLARE_S390_S_INSTRUCTIONS)
#undef DECLARE_S390_S_INSTRUCTIONS
@@ -792,7 +804,7 @@ inline void si_format(Opcode op, int f1, int f2, int f3) {
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_SI_OPCODE_LIST(DECLARE_S390_SI_INSTRUCTIONS);
+ S390_SI_OPCODE_LIST(DECLARE_S390_SI_INSTRUCTIONS)
#undef DECLARE_S390_SI_INSTRUCTIONS
@@ -816,7 +828,7 @@ inline void siy_format(Opcode op, int f1, int f2, int f3) {
void name(const MemOperand& opnd, const Operand& i2) { \
name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
- S390_SIY_OPCODE_LIST(DECLARE_S390_SIY_INSTRUCTIONS);
+ S390_SIY_OPCODE_LIST(DECLARE_S390_SIY_INSTRUCTIONS)
#undef DECLARE_S390_SIY_INSTRUCTIONS
@@ -844,7 +856,7 @@ inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
name(r1, r2, opnd.getBaseRegister(), \
Operand(opnd.getDisplacement()), m3); \
}
- S390_RRS_OPCODE_LIST(DECLARE_S390_RRS_INSTRUCTIONS);
+ S390_RRS_OPCODE_LIST(DECLARE_S390_RRS_INSTRUCTIONS)
#undef DECLARE_S390_RRS_INSTRUCTIONS
@@ -873,7 +885,7 @@ inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
name(r1, m3, opnd.getBaseRegister(), \
Operand(opnd.getDisplacement()), i2); \
}
- S390_RIS_OPCODE_LIST(DECLARE_S390_RIS_INSTRUCTIONS);
+ S390_RIS_OPCODE_LIST(DECLARE_S390_RIS_INSTRUCTIONS)
#undef DECLARE_S390_RIS_INSTRUCTIONS
@@ -895,7 +907,7 @@ inline void sil_format(Opcode op, int f1, int f2, int f3) {
void name(const MemOperand& opnd, const Operand& i2) { \
name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
}
- S390_SIL_OPCODE_LIST(DECLARE_S390_SIL_INSTRUCTIONS);
+ S390_SIL_OPCODE_LIST(DECLARE_S390_SIL_INSTRUCTIONS)
#undef DECLARE_S390_SIL_INSTRUCTIONS
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index 25b76fa4ea..fff6efacab 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -1822,15 +1822,6 @@ class Instruction {
};
static OpcodeFormatType OpcodeFormatTable[256];
-// Helper macro to define static accessors.
-// We use the cast to char* trick to bypass the strict anti-aliasing rules.
-#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
- static inline return_type Name(Instr instr) { \
- char* temp = reinterpret_cast<char*>(&instr); \
- return reinterpret_cast<Instruction*>(temp)->Name(); \
- }
-
-#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits.
template <typename T>
@@ -2076,7 +2067,7 @@ class SixByteInstruction : public Instruction {
// I Instruction
class IInstruction : public TwoByteInstruction {
public:
- DECLARE_FIELD_FOR_TWO_BYTE_INSTR(IValue, int, 8, 16);
+ DECLARE_FIELD_FOR_TWO_BYTE_INSTR(IValue, int, 8, 16)
};
// E Instruction
@@ -2085,25 +2076,25 @@ class EInstruction : public TwoByteInstruction {};
// IE Instruction
class IEInstruction : public FourByteInstruction {
public:
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I1Value, int, 24, 28);
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 28, 32);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I1Value, int, 24, 28)
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 28, 32)
};
// MII Instruction
class MIIInstruction : public SixByteInstruction {
public:
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M1Value, uint32_t, 8, 12);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI2Value, int, 12, 24);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI3Value, int, 24, 47);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M1Value, uint32_t, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI2Value, int, 12, 24)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI3Value, int, 24, 47)
};
// RI Instruction
class RIInstruction : public FourByteInstruction {
public:
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(R1Value, int, 8, 12);
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 16, 32);
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2UnsignedValue, uint32_t, 16, 32);
- DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(M1Value, uint32_t, 8, 12);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 16, 32)
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2UnsignedValue, uint32_t, 16, 32)
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(M1Value, uint32_t, 8, 12)
};
// RR Instruction
@@ -2299,12 +2290,12 @@ class RIEInstruction : Instruction {
// VRR Instruction
class VRR_C_Instruction : SixByteInstruction {
public:
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M6Value, uint32_t, 24, 28);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32);
- DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M6Value, uint32_t, 24, 28)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36)
};
// Helper functions for converting between register numbers and names.
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index e7d4c8e449..da51d195ff 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -98,6 +98,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : function template info
+ // r4 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments (on the stack, not including receiver)
@@ -202,9 +210,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- r3, // kApiFunctionAddress
- r4, // kArgc
+ r3, // kApiFunctionAddress
+ r4, // kArgc
+ r5, // kCallData
+ r2, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 69fca2b15e..f12b5a3021 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -16,6 +16,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/objects/smi.h"
#include "src/register-configuration.h"
@@ -142,11 +143,6 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
-void MacroAssembler::JumpToJSEntry(Register target) {
- Move(ip, target);
- Jump(ip);
-}
-
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
Label skip;
@@ -1522,6 +1518,20 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
CmpP(obj, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ Register scratch = r0;
+ LoadRR(scratch, value);
+ slgfi(scratch, Operand(lower_limit));
+ CmpLogicalP(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ CmpLogicalP(value, Operand(higher_limit));
+ }
+ ble(on_in_range);
+}
+
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -2014,58 +2024,6 @@ void TurboAssembler::CheckPageFlag(
}
}
-////////////////////////////////////////////////////////////////////////////////
-//
-// New MacroAssembler Interfaces added for S390
-//
-////////////////////////////////////////////////////////////////////////////////
-// Primarily used for loading constants
-// This should really move to be in macro-assembler as it
-// is really a pseudo instruction
-// Some usages of this intend for a FIXED_SEQUENCE to be used
-// @TODO - break this dependency so we can optimize mov() in general
-// and only use the generic version when we require a fixed sequence
-void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
- Representation r, Register scratch) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8()) {
- LoadB(dst, mem);
- } else if (r.IsUInteger8()) {
- LoadlB(dst, mem);
- } else if (r.IsInteger16()) {
- LoadHalfWordP(dst, mem, scratch);
- } else if (r.IsUInteger16()) {
- LoadHalfWordP(dst, mem, scratch);
-#if V8_TARGET_ARCH_S390X
- } else if (r.IsInteger32()) {
- LoadW(dst, mem, scratch);
-#endif
- } else {
- LoadP(dst, mem, scratch);
- }
-}
-
-void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
- Representation r, Register scratch) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8() || r.IsUInteger8()) {
- StoreByte(src, mem, scratch);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- StoreHalfWord(src, mem, scratch);
-#if V8_TARGET_ARCH_S390X
- } else if (r.IsInteger32()) {
- StoreW(src, mem, scratch);
-#endif
- } else {
- if (r.IsHeapObject()) {
- AssertNotSmi(src);
- } else if (r.IsSmi()) {
- AssertSmi(src);
- }
- StoreP(src, mem, scratch);
- }
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 243ed278a1..680f217cbb 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -1074,6 +1074,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(if_not_equal);
}
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1238,8 +1243,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
- void JumpToJSEntry(Register target);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@@ -1265,11 +1268,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
- void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
- Register scratch = no_reg);
- void StoreRepresentation(Register src, const MemOperand& mem,
- Representation r, Register scratch = no_reg);
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
@@ -1283,6 +1281,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/s390/register-s390.h b/deps/v8/src/s390/register-s390.h
index e0114342e0..06537bcb06 100644
--- a/deps/v8/src/s390/register-s390.h
+++ b/deps/v8/src/s390/register-s390.h
@@ -246,8 +246,8 @@ C_REGISTERS(DECLARE_C_REGISTER)
#undef DECLARE_C_REGISTER
// Define {RegisterName} methods for the register types.
-DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
-DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = r2;
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 04b2a7748c..588d1b6e40 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -4499,30 +4499,15 @@ EVALUATE(LLILL) {
return 0;
}
-EVALUATE(TMLH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
-}
-
-EVALUATE(TMLL) {
- DCHECK_OPCODE(TMLL);
- DECODE_RI_A_INSTRUCTION(instr, r1, i2);
- uint32_t mask = i2 & 0x0000FFFF;
- uint32_t r1_val = get_low_register<uint32_t>(r1);
- r1_val = r1_val & 0x0000FFFF; // uses only the last 16bits
-
+inline static int TestUnderMask(uint16_t val, uint16_t mask) {
// Test if all selected bits are zeros or mask is zero
- if (0 == (mask & r1_val)) {
- condition_reg_ = 0x8;
- return length; // Done!
+ if (0 == (mask & val)) {
+ return 0x8;
}
- DCHECK_NE(mask, 0);
- // Test if all selected bits are one
- if (mask == (mask & r1_val)) {
- condition_reg_ = 0x1;
- return length; // Done!
+ // Test if all selected bits are one or mask is 0
+ if (mask == (mask & val)) {
+ return 0x1;
}
// Now we know selected bits mixed zeros and ones
@@ -4530,31 +4515,47 @@ EVALUATE(TMLL) {
#if defined(__GNUC__)
int leadingZeros = __builtin_clz(mask);
mask = 0x80000000u >> leadingZeros;
- if (mask & r1_val) {
+ if (mask & val) {
// leftmost bit is one
- condition_reg_ = 0x2;
+ return 0x2;
} else {
// leftmost bit is zero
- condition_reg_ = 0x4;
+ return 0x4;
}
- return length; // Done!
#else
for (int i = 15; i >= 0; i--) {
if (mask & (1 << i)) {
- if (r1_val & (1 << i)) {
+ if (val & (1 << i)) {
// leftmost bit is one
- condition_reg_ = 0x2;
+ return 0x2;
} else {
// leftmost bit is zero
- condition_reg_ = 0x4;
+ return 0x4;
}
- return length; // Done!
}
}
#endif
UNREACHABLE();
}
+EVALUATE(TMLH) {
+ DCHECK_OPCODE(TMLH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_low_register<uint32_t>(r1) >> 16;
+ uint32_t mask = i2 & 0x0000FFFF;
+ condition_reg_ = TestUnderMask(value, mask);
+ return length; // DONE
+}
+
+EVALUATE(TMLL) {
+ DCHECK_OPCODE(TMLL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_low_register<uint32_t>(r1) & 0x0000FFFF;
+ uint32_t mask = i2 & 0x0000FFFF;
+ condition_reg_ = TestUnderMask(value, mask);
+ return length; // DONE
+}
+
EVALUATE(TMHH) {
UNIMPLEMENTED();
USE(instr);
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index f8003f115d..919c932b15 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
+#include "src/utils.h"
#include "src/v8memory.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
@@ -143,7 +144,7 @@ class SafepointTable {
static void PrintBits(std::ostream& os, // NOLINT
uint8_t byte, int digits);
- DISALLOW_HEAP_ALLOCATION(no_allocation_);
+ DISALLOW_HEAP_ALLOCATION(no_allocation_)
Address instruction_start_;
uint32_t stack_slots_;
unsigned length_;
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index e158e4d92b..f55d5b57e5 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,5 +1,6 @@
set noparent
+delphick@chromium.org
jgruber@chromium.org
petermarshall@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index bdae825950..df728fcfc9 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -6,6 +6,7 @@
#include "src/counters.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -96,10 +97,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
+bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
PagedSpace* read_only_space = isolate()->heap()->read_only_space();
if (!read_only_space->Contains(obj)) return false;
@@ -117,42 +115,24 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj,
SerializerReference back_reference =
SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj->ptr()), back_reference);
- CHECK(SerializeBackReference(obj, how_to_code, where_to_point, skip));
+ CHECK(SerializeBackReference(obj));
return true;
}
-void CodeSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+void CodeSerializer::SerializeObject(HeapObject obj) {
+ if (SerializeHotObject(obj)) return;
- if (SerializeRoot(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeRoot(obj)) return;
- if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeBackReference(obj)) return;
- if (SerializeReadOnlyObject(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeReadOnlyObject(obj)) return;
- FlushSkip(skip);
-
- if (obj->IsCode()) {
- Code code_object = Code::cast(obj);
- switch (code_object->kind()) {
- case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
- case Code::REGEXP: // No regexp literals initialized yet.
- case Code::NUMBER_OF_KINDS: // Pseudo enum value.
- case Code::BYTECODE_HANDLER: // No direct references to handlers.
- break; // hit UNREACHABLE below.
- case Code::STUB:
- case Code::BUILTIN:
- default:
- return SerializeCodeObject(code_object, how_to_code, where_to_point);
- }
- UNREACHABLE();
- }
+ CHECK(!obj->IsCode());
ReadOnlyRoots roots(isolate());
if (ElideObject(obj)) {
- return SerializeObject(roots.undefined_value(), how_to_code, where_to_point,
- skip);
+ return SerializeObject(roots.undefined_value());
}
if (obj->IsScript()) {
@@ -170,7 +150,7 @@ void CodeSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
// object graph.
FixedArray host_options = script_obj->host_defined_options();
script_obj->set_host_defined_options(roots.empty_fixed_array());
- SerializeGeneric(obj, how_to_code, where_to_point);
+ SerializeGeneric(obj);
script_obj->set_host_defined_options(host_options);
script_obj->set_context_data(context_data);
return;
@@ -195,7 +175,7 @@ void CodeSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
}
DCHECK(!sfi->HasDebugInfo());
- SerializeGeneric(obj, how_to_code, where_to_point);
+ SerializeGeneric(obj);
// Restore debug info
if (!debug_info.is_null()) {
@@ -221,15 +201,12 @@ void CodeSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
// We expect no instantiated function objects or contexts.
CHECK(!obj->IsJSFunction() && !obj->IsContext());
- SerializeGeneric(obj, how_to_code, where_to_point);
+ SerializeGeneric(obj);
}
-void CodeSerializer::SerializeGeneric(HeapObject heap_object,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
+void CodeSerializer::SerializeGeneric(HeapObject heap_object) {
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, heap_object, &sink_, how_to_code,
- where_to_point);
+ ObjectSerializer serializer(this, heap_object, &sink_);
serializer.Serialize();
}
@@ -338,8 +315,6 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
SetMagicNumber();
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, cs->source_hash());
- SetHeaderValue(kCpuFeaturesOffset,
- static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumReservationsOffset,
static_cast<uint32_t>(reservations.size()));
@@ -369,16 +344,12 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
- uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
uint32_t c1 = GetHeaderValue(kChecksumPartAOffset);
uint32_t c2 = GetHeaderValue(kChecksumPartBOffset);
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
- if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
- return CPU_FEATURES_MISMATCH;
- }
if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
uint32_t max_payload_length =
this->size_ -
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 446566602c..720995f163 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -58,23 +58,15 @@ class CodeSerializer : public Serializer {
CodeSerializer(Isolate* isolate, uint32_t source_hash);
~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
- virtual void SerializeCodeObject(Code code_object, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- UNREACHABLE();
- }
-
virtual bool ElideObject(Object obj) { return false; }
- void SerializeGeneric(HeapObject heap_object, HowToCode how_to_code,
- WhereToPoint where_to_point);
+ void SerializeGeneric(HeapObject heap_object);
private:
- void SerializeObject(HeapObject o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject o) override;
- bool SerializeReadOnlyObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeReadOnlyObject(HeapObject obj);
- DISALLOW_HEAP_ALLOCATION(no_gc_);
+ DISALLOW_HEAP_ALLOCATION(no_gc_)
uint32_t source_hash_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
@@ -87,7 +79,6 @@ class SerializedCodeData : public SerializedData {
MAGIC_NUMBER_MISMATCH = 1,
VERSION_MISMATCH = 2,
SOURCE_MISMATCH = 3,
- CPU_FEATURES_MISMATCH = 4,
FLAGS_MISMATCH = 5,
CHECKSUM_MISMATCH = 6,
INVALID_HEADER = 7,
@@ -98,19 +89,17 @@ class SerializedCodeData : public SerializedData {
// [0] magic number and (internally provided) external reference count
// [1] version hash
// [2] source hash
- // [3] cpu features
- // [4] flag hash
- // [5] number of reservation size entries
- // [6] payload length
- // [7] payload checksum part A
- // [8] payload checksum part B
+ // [3] flag hash
+ // [4] number of reservation size entries
+ // [5] payload length
+ // [6] payload checksum part A
+ // [7] payload checksum part B
// ... reservations
// ... code stub keys
// ... serialized payload
static const uint32_t kVersionHashOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kSourceHashOffset = kVersionHashOffset + kUInt32Size;
- static const uint32_t kCpuFeaturesOffset = kSourceHashOffset + kUInt32Size;
- static const uint32_t kFlagHashOffset = kCpuFeaturesOffset + kUInt32Size;
+ static const uint32_t kFlagHashOffset = kSourceHashOffset + kUInt32Size;
static const uint32_t kNumReservationsOffset = kFlagHashOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
kNumReservationsOffset + kUInt32Size;
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index 09f8f678c3..7862865d43 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -5,15 +5,10 @@
#include "src/snapshot/deserializer-allocator.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/snapshot/deserializer.h"
-#include "src/snapshot/startup-deserializer.h"
namespace v8 {
namespace internal {
-DeserializerAllocator::DeserializerAllocator(Deserializer* deserializer)
- : deserializer_(deserializer) {}
-
// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
@@ -27,10 +22,10 @@ DeserializerAllocator::DeserializerAllocator(Deserializer* deserializer)
// reference large objects by index.
Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
if (space == LO_SPACE) {
- AlwaysAllocateScope scope(isolate());
+ AlwaysAllocateScope scope(heap_);
// Note that we currently do not support deserialization of large code
// objects.
- LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
+ LargeObjectSpace* lo_space = heap_->lo_space();
AllocationResult result = lo_space->AllocateRaw(size);
HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
@@ -65,11 +60,10 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
// If one of the following assertions fails, then we are deserializing an
// aligned object when the filler maps have not been deserialized yet.
// We require filler maps as padding to align the object.
- Heap* heap = isolate()->heap();
- DCHECK(ReadOnlyRoots(heap).free_space_map()->IsMap());
- DCHECK(ReadOnlyRoots(heap).one_pointer_filler_map()->IsMap());
- DCHECK(ReadOnlyRoots(heap).two_pointer_filler_map()->IsMap());
- obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+ DCHECK(ReadOnlyRoots(heap_).free_space_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map()->IsMap());
+ obj = heap_->AlignWithFiller(obj, size, reserved, next_alignment_);
address = obj->address();
next_alignment_ = kWordAligned;
return address;
@@ -135,7 +129,7 @@ bool DeserializerAllocator::ReserveSpace() {
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
- if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
+ if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
return false;
}
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
@@ -158,13 +152,9 @@ bool DeserializerAllocator::ReservationsAreFullyUsed() const {
}
void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
- isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
+ heap_->RegisterDeserializedObjectsForBlackAllocation(
reservations_, deserialized_large_objects_, allocated_maps_);
}
-Isolate* DeserializerAllocator::isolate() const {
- return deserializer_->isolate();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index eb06c2689a..b44248aff1 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -18,7 +18,9 @@ class StartupDeserializer;
class DeserializerAllocator final {
public:
- explicit DeserializerAllocator(Deserializer* deserializer);
+ DeserializerAllocator() = default;
+
+ void Initialize(Heap* heap) { heap_ = heap; }
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@@ -65,8 +67,6 @@ class DeserializerAllocator final {
void RegisterDeserializedObjectsForBlackAllocation();
private:
- Isolate* isolate() const;
-
// Raw allocation without considering alignment.
Address AllocateRaw(AllocationSpace space, int size);
@@ -97,8 +97,7 @@ class DeserializerAllocator final {
// back-references.
std::vector<HeapObject> deserialized_large_objects_;
- // The current deserializer.
- Deserializer* const deserializer_;
+ Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(DeserializerAllocator);
};
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index abb31c5326..2569ca18c4 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -5,10 +5,13 @@
#include "src/snapshot/deserializer.h"
#include "src/assembler-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/log.h"
+#include "src/objects-body-descriptors-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table.h"
@@ -18,69 +21,33 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
+#include "src/roots.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-// This is like a MaybeObjectSlot, except it doesn't enforce alignment.
-// Most slots used below are aligned, but when writing into Code objects,
-// they might not be, hence the use of UnalignedSlot and UnalignedCopy.
-class UnalignedSlot {
- public:
- explicit UnalignedSlot(ObjectSlot slot) : ptr_(slot.address()) {}
- explicit UnalignedSlot(Address address) : ptr_(address) {}
- explicit UnalignedSlot(MaybeObject* slot)
- : ptr_(reinterpret_cast<Address>(slot)) {}
- explicit UnalignedSlot(Object* slot)
- : ptr_(reinterpret_cast<Address>(slot)) {}
-
- inline bool operator<(const UnalignedSlot& other) const {
- return ptr_ < other.ptr_;
- }
- inline bool operator==(const UnalignedSlot& other) const {
- return ptr_ == other.ptr_;
- }
-
- inline void Advance(int bytes = kPointerSize) { ptr_ += bytes; }
-
- MaybeObject Read() {
- Address result;
- memcpy(&result, reinterpret_cast<void*>(ptr_), sizeof(result));
- return MaybeObject(result);
- }
- MaybeObject ReadPrevious() {
- Address result;
- memcpy(&result, reinterpret_cast<void*>(ptr_ - kPointerSize),
- sizeof(result));
- return MaybeObject(result);
- }
- inline void Write(Address value) {
- memcpy(reinterpret_cast<void*>(ptr_), &value, sizeof(value));
- }
- MaybeObjectSlot Slot() { return MaybeObjectSlot(ptr_); }
-
- Address address() { return ptr_; }
-
- private:
- Address ptr_;
-};
-
-void Deserializer::UnalignedCopy(UnalignedSlot dest, MaybeObject value) {
+template <typename TSlot>
+TSlot Deserializer::Write(TSlot dest, MaybeObject value) {
DCHECK(!allocator()->next_reference_is_weak());
- dest.Write(value.ptr());
+ dest.store(value);
+ return dest + 1;
}
-void Deserializer::UnalignedCopy(UnalignedSlot dest, Address value) {
+template <typename TSlot>
+TSlot Deserializer::WriteAddress(TSlot dest, Address value) {
DCHECK(!allocator()->next_reference_is_weak());
- dest.Write(value);
+ memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
+ STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
+ return dest + (kSystemPointerSize / TSlot::kSlotDataSize);
}
void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
+ allocator()->Initialize(isolate->heap());
DCHECK_NULL(external_reference_table_);
external_reference_table_ = isolate->external_reference_table();
#ifdef DEBUG
@@ -97,7 +64,8 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
- for (HeapObject item : to_rehash_) item->RehashBasedOnMap(isolate());
+ for (HeapObject item : to_rehash_)
+ item->RehashBasedOnMap(ReadOnlyRoots(isolate()));
}
Deserializer::~Deserializer() {
@@ -115,12 +83,10 @@ Deserializer::~Deserializer() {
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
- // The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using nullptr as the address.
- // TODO(ishell): this will not work once we actually compress pointers.
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
- ReadData(UnalignedSlot(start.address()), UnalignedSlot(end.address()),
- NEW_SPACE, kNullAddress);
+ // We are reading to a location outside of JS heap, so pass NEW_SPACE to
+ // avoid triggering write barriers.
+ ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), NEW_SPACE,
+ kNullAddress);
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@@ -143,10 +109,11 @@ void Deserializer::DeserializeDeferredObjects() {
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
HeapObject object = GetBackReferencedObject(space);
- int size = source_.GetInt() << kPointerSizeLog2;
+ int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object->address();
- UnalignedSlot start(obj_address + kPointerSize);
- UnalignedSlot end(obj_address + size);
+ // Object's map is already initialized, now read the rest.
+ MaybeObjectSlot start(obj_address + kTaggedSize);
+ MaybeObjectSlot end(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
@@ -323,11 +290,13 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
bytecode_array->set_interrupt_budget(
interpreter::Interpreter::InterruptBudget());
bytecode_array->set_osr_loop_nesting_level(0);
- } else if (obj->IsDescriptorArray()) {
- // Reset the marking state of the descriptor array.
+ }
+#ifdef DEBUG
+ if (obj->IsDescriptorArray()) {
DescriptorArray descriptor_array = DescriptorArray::cast(obj);
- descriptor_array->set_raw_number_of_marked_descriptors(0);
+ DCHECK_EQ(0, descriptor_array->raw_number_of_marked_descriptors());
}
+#endif
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
@@ -379,12 +348,18 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
return obj;
}
-// This routine writes the new object into the pointer provided.
-// The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
- HeapObjectReferenceType reference_type) {
+HeapObject Deserializer::ReadObject() {
+ MaybeObject object;
+ // We are reading to a location outside of JS heap, so pass NEW_SPACE to
+ // avoid triggering write barriers.
+ bool filled =
+ ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
+ NEW_SPACE, kNullAddress);
+ CHECK(filled);
+ return object.GetHeapObjectAssumeStrong();
+}
+
+HeapObject Deserializer::ReadObject(int space_number) {
const int size = source_.GetInt() << kObjectAlignmentBits;
Address address =
@@ -392,18 +367,14 @@ void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
HeapObject obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
- UnalignedSlot current(address);
- UnalignedSlot limit(address + size);
+ MaybeObjectSlot current(address);
+ MaybeObjectSlot limit(address + size);
if (ReadData(current, limit, space_number, address)) {
// Only post process if object content has not been deferred.
obj = PostProcessNewObject(obj, space_number);
}
- MaybeObject write_back_obj = reference_type == HeapObjectReferenceType::STRONG
- ? HeapObjectReference::Strong(obj)
- : HeapObjectReference::Weak(obj);
- UnalignedCopy(write_back, write_back_obj);
#ifdef DEBUG
if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
@@ -411,6 +382,108 @@ void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
}
#endif // DEBUG
+ return obj;
+}
+
+void Deserializer::ReadCodeObjectBody(int space_number,
+ Address code_object_address) {
+ // At this point the code object is already allocated, its map field is
+ // initialized and its raw data fields and code stream are also read.
+ // Now we read the rest of code header's fields.
+ MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
+ MaybeObjectSlot limit(code_object_address + Code::kDataStart);
+ bool filled = ReadData(current, limit, space_number, code_object_address);
+ CHECK(filled);
+
+ // Now iterate RelocInfos the same way it was done by the serialzier and
+ // deserialize respective data into RelocInfos.
+ Code code = Code::cast(HeapObject::FromAddress(code_object_address));
+ RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
+ for (; !it.done(); it.next()) {
+ RelocInfo rinfo = *it.rinfo();
+ rinfo.Visit(this);
+ }
+}
+
+void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
+ HeapObject object = ReadObject();
+ rinfo->set_target_address(Code::cast(object)->raw_instruction_start());
+}
+
+void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+ HeapObject object = ReadObject();
+ // Embedded object reference must be a strong one.
+ rinfo->set_target_object(isolate_->heap(), object);
+}
+
+void Deserializer::VisitRuntimeEntry(Code host, RelocInfo* rinfo) {
+ // We no longer serialize code that contains runtime entries.
+ UNREACHABLE();
+}
+
+void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
+ byte data = source_.Get();
+ CHECK_EQ(data, kExternalReference);
+
+ Address address = ReadExternalReferenceCase();
+
+ if (rinfo->IsCodedSpecially()) {
+ Address location_of_branch_data = rinfo->pc();
+ Assembler::deserialization_set_special_target_at(location_of_branch_data,
+ host, address);
+ } else {
+ WriteUnalignedValue(rinfo->target_address_address(), address);
+ }
+}
+
+void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
+ byte data = source_.Get();
+ CHECK_EQ(data, kInternalReference);
+
+ // Internal reference target is encoded as an offset from code entry.
+ int target_offset = source_.GetInt();
+ DCHECK_LT(static_cast<unsigned>(target_offset),
+ static_cast<unsigned>(host->raw_instruction_size()));
+ Address target = host->entry() + target_offset;
+ Assembler::deserialization_set_target_internal_reference_at(
+ rinfo->pc(), target, rinfo->rmode());
+}
+
+void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
+ DCHECK(FLAG_embedded_builtins);
+ byte data = source_.Get();
+ CHECK_EQ(data, kOffHeapTarget);
+
+ int builtin_index = source_.GetInt();
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+
+ CHECK_NOT_NULL(isolate_->embedded_blob());
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address address = d.InstructionStartOfBuiltin(builtin_index);
+ CHECK_NE(kNullAddress, address);
+
+ // TODO(ishell): implement RelocInfo::set_target_off_heap_target()
+ if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
+ Address location_of_branch_data = rinfo->pc();
+ Assembler::deserialization_set_special_target_at(location_of_branch_data,
+ host, address);
+ } else {
+ WriteUnalignedValue(rinfo->target_address_address(), address);
+ }
+}
+
+template <typename TSlot>
+TSlot Deserializer::ReadRepeatedObject(TSlot current, int repeat_count) {
+ CHECK_LE(2, repeat_count);
+
+ HeapObject heap_object = ReadObject();
+ DCHECK(!Heap::InYoungGeneration(heap_object));
+ for (int i = 0; i < repeat_count; i++) {
+ // Repeated values are not subject to the write barrier so we don't need
+ // to trigger it.
+ current = Write(current, MaybeObject::FromObject(heap_object));
+ }
+ return current;
}
static void NoExternalReferencesCallback() {
@@ -421,8 +494,9 @@ static void NoExternalReferencesCallback() {
CHECK_WITH_MSG(false, "No external references provided via API");
}
-bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
- int source_space, Address current_object_address) {
+template <typename TSlot>
+bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
+ Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
@@ -433,34 +507,31 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
while (current < limit) {
byte data = source_.Get();
switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number) \
- case where + how + within + space_number: \
- STATIC_ASSERT((where & ~kWhereMask) == 0); \
- STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
- STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
+#define CASE_STATEMENT(bytecode, space_number) \
+ case bytecode + space_number: \
STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
-#define CASE_BODY(where, how, within, space_number_if_any) \
- current = ReadDataCase<where, how, within, space_number_if_any>( \
+#define CASE_BODY(bytecode, space_number_if_any) \
+ current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
isolate, current, current_object_address, data, write_barrier_needed); \
break;
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with fall-through cases
// and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(where, how, within, LO_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(where, how, within, RO_SPACE) \
- CASE_BODY(where, how, within, kAnyOldSpace)
+#define ALL_SPACES(bytecode) \
+ CASE_STATEMENT(bytecode, NEW_SPACE) \
+ CASE_BODY(bytecode, NEW_SPACE) \
+ CASE_STATEMENT(bytecode, OLD_SPACE) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, CODE_SPACE) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, MAP_SPACE) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, LO_SPACE) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, RO_SPACE) \
+ CASE_BODY(bytecode, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
case byte_code: \
@@ -474,130 +545,45 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
-#define SINGLE_CASE(where, how, within, space) \
- CASE_STATEMENT(where, how, within, space) \
- CASE_BODY(where, how, within, space)
+#define SINGLE_CASE(bytecode, space) \
+ CASE_STATEMENT(bytecode, space) \
+ CASE_BODY(bytecode, space)
// Deserialize a new object and write a pointer to it to the current
// object.
- ALL_SPACES(kNewObject, kPlain, kStartOfObject)
- // Deserialize a new code object and write a pointer to its first
- // instruction to the current code object.
- ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
+ ALL_SPACES(kNewObject)
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
- ALL_SPACES(kBackref, kPlain, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if V8_CODE_EMBEDS_OBJECT_POINTER
- // Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, PPC, ARM
- // or S390 with embedded constant pool, and omitted on the other
- // architectures because it is fully unrolled and would cause bloat.
- ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to it to the current
- // object. Required only for MIPS, PPC, ARM or S390 with embedded
- // constant pool.
- ALL_SPACES(kBackref, kFromCode, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
-#endif
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to its first instruction
- // to the current code object or the instruction pointer in a function
- // object.
- ALL_SPACES(kBackref, kFromCode, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackref)
// Find an object in the roots array and write a pointer to it to the
// current object.
- SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
-#if V8_CODE_EMBEDS_OBJECT_POINTER
- // Find an object in the roots array and write a pointer to it to in code.
- SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
-#endif
+ SINGLE_CASE(kRootArray, RO_SPACE)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kPartialSnapshotCache, kFromCode, kStartOfObject, 0)
- SINGLE_CASE(kPartialSnapshotCache, kFromCode, kInnerPointer, 0)
+ SINGLE_CASE(kPartialSnapshotCache, RO_SPACE)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kReadOnlyObjectCache, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kStartOfObject, 0)
- SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kInnerPointer, 0)
+ SINGLE_CASE(kReadOnlyObjectCache, RO_SPACE)
// Find an object in the attached references and write a pointer to it to
// the current object.
- SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
- SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
+ SINGLE_CASE(kAttachedReference, RO_SPACE)
#undef CASE_STATEMENT
#undef CASE_BODY
#undef ALL_SPACES
- case kSkip: {
- int size = source_.GetInt();
- current.Advance(size);
- break;
- }
-
// Find an external reference and write a pointer to it to the current
// object.
- case kExternalReference + kPlain + kStartOfObject:
- current =
- ReadExternalReferenceCase(kPlain, current, current_object_address);
- break;
- // Find an external reference and write a pointer to it in the current
- // code object.
- case kExternalReference + kFromCode + kStartOfObject:
- current = ReadExternalReferenceCase(kFromCode, current,
- current_object_address);
- break;
-
- case kInternalReferenceEncoded:
- case kInternalReference: {
- // Internal reference address is not encoded via skip, but by offset
- // from code entry.
- int pc_offset = source_.GetInt();
- int target_offset = source_.GetInt();
- Code code = Code::cast(HeapObject::FromAddress(current_object_address));
- DCHECK(0 <= pc_offset && pc_offset <= code->raw_instruction_size());
- DCHECK(0 <= target_offset &&
- target_offset <= code->raw_instruction_size());
- Address pc = code->entry() + pc_offset;
- Address target = code->entry() + target_offset;
- Assembler::deserialization_set_target_internal_reference_at(
- pc, target,
- data == kInternalReference ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ case kExternalReference: {
+ Address address = ReadExternalReferenceCase();
+ current = WriteAddress(current, address);
break;
}
+ case kInternalReference:
case kOffHeapTarget: {
- DCHECK(FLAG_embedded_builtins);
- int skip = source_.GetInt();
- int builtin_index = source_.GetInt();
- DCHECK(Builtins::IsBuiltinId(builtin_index));
-
- current.Advance(skip);
-
- CHECK_NOT_NULL(isolate->embedded_blob());
- EmbeddedData d = EmbeddedData::FromBlob();
- Address address = d.InstructionStartOfBuiltin(builtin_index);
- CHECK_NE(kNullAddress, address);
-
- if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
- Address location_of_branch_data = current.address();
- int skip = Assembler::deserialization_special_target_size(
- location_of_branch_data);
- Assembler::deserialization_set_special_target_at(
- location_of_branch_data,
- Code::cast(HeapObject::FromAddress(current_object_address)),
- address);
- current.Advance(skip);
- } else {
- UnalignedCopy(current, address);
- current.Advance();
- }
+ // These bytecodes are expected only during RelocInfo iteration.
+ UNREACHABLE();
break;
}
@@ -612,7 +598,7 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
case kDeferred: {
// Deferred can only occur right after the heap object header.
- DCHECK_EQ(current.address(), current_object_address + kPointerSize);
+ DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
HeapObject obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
@@ -629,30 +615,33 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
// Deserialize raw data of variable length.
case kVariableRawData: {
int size_in_bytes = source_.GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current.address());
- source_.CopyRaw(raw_data_out, size_in_bytes);
- current.Advance(size_in_bytes);
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
+ source_.CopyRaw(current.ToVoidPtr(), size_in_bytes);
+ current = TSlot(current.address() + size_in_bytes);
break;
}
// Deserialize raw code directly into the body of the code object.
- // Do not move current.
case kVariableRawCode: {
+ // VariableRawCode can only occur right after the heap object header.
+ DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
int size_in_bytes = source_.GetInt();
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
source_.CopyRaw(
- reinterpret_cast<byte*>(current_object_address + Code::kDataStart),
+ reinterpret_cast<void*>(current_object_address + Code::kDataStart),
size_in_bytes);
+ // Deserialize tagged fields in the code object header and reloc infos.
+ ReadCodeObjectBody(source_space, current_object_address);
+ // Set current to the code object end.
+ current = TSlot(current.address() + Code::kDataStart -
+ HeapObject::kHeaderSize + size_in_bytes);
+ CHECK_EQ(current, limit);
break;
}
case kVariableRepeat: {
- int repeats = source_.GetInt();
- MaybeObject object = current.ReadPrevious();
- DCHECK(!Heap::InNewSpace(object));
- for (int i = 0; i < repeats; i++) {
- UnalignedCopy(current, object);
- current.Advance();
- }
+ int repeats = DecodeVariableRepeatCount(source_.GetInt());
+ current = ReadRepeatedObject(current, repeats);
break;
}
@@ -668,8 +657,6 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
}
case kApiReference: {
- int skip = source_.GetInt();
- current.Advance(skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
if (isolate->api_external_references()) {
@@ -681,14 +668,12 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
- UnalignedCopy(current, address);
- current.Advance();
+ current = WriteAddress(current, address);
break;
}
case kClearedWeakReference:
- UnalignedCopy(current, HeapObjectReference::ClearedValue(isolate_));
- current.Advance();
+ current = Write(current, HeapObjectReference::ClearedValue(isolate_));
break;
case kWeakPrefix:
@@ -711,32 +696,17 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
STATIC_ASSERT(kNumberOfRootArrayConstants <=
static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
- SIXTEEN_CASES(kRootArrayConstantsWithSkip)
- SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
- int skip = source_.GetInt();
- current.Advance(skip);
- V8_FALLTHROUGH;
- }
-
SIXTEEN_CASES(kRootArrayConstants)
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
RootIndex root_index = static_cast<RootIndex>(id);
MaybeObject object = MaybeObject::FromObject(isolate->root(root_index));
- DCHECK(!Heap::InNewSpace(object));
- UnalignedCopy(current, object);
- current.Advance();
+ DCHECK(!Heap::InYoungGeneration(object));
+ current = Write(current, object);
break;
}
STATIC_ASSERT(kNumberOfHotObjects == 8);
- FOUR_CASES(kHotObjectWithSkip)
- FOUR_CASES(kHotObjectWithSkip + 4) {
- int skip = source_.GetInt();
- current.Advance(skip);
- V8_FALLTHROUGH;
- }
-
FOUR_CASES(kHotObject)
FOUR_CASES(kHotObject + 4) {
int index = data & kHotObjectMask;
@@ -745,14 +715,17 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
if (allocator()->GetAndClearNextReferenceIsWeak()) {
hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
}
-
- UnalignedCopy(current, hot_maybe_object);
- if (write_barrier_needed && Heap::InNewSpace(hot_object)) {
+ // Don't update current pointer here as it may be needed for write
+ // barrier.
+ Write(current, hot_maybe_object);
+ if (write_barrier_needed && Heap::InYoungGeneration(hot_object)) {
HeapObject current_object =
HeapObject::FromAddress(current_object_address);
- GenerationalBarrier(current_object, current.Slot(), hot_maybe_object);
+ GenerationalBarrier(current_object,
+ MaybeObjectSlot(current.address()),
+ hot_maybe_object);
}
- current.Advance();
+ ++current;
break;
}
@@ -760,22 +733,16 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
STATIC_ASSERT(kNumberOfFixedRawData == 32);
SIXTEEN_CASES(kFixedRawData)
SIXTEEN_CASES(kFixedRawData + 16) {
- byte* raw_data_out = reinterpret_cast<byte*>(current.address());
- int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
- source_.CopyRaw(raw_data_out, size_in_bytes);
- current.Advance(size_in_bytes);
+ int size_in_tagged = data - kFixedRawDataStart;
+ source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
+ current += size_in_tagged;
break;
}
STATIC_ASSERT(kNumberOfFixedRepeat == 16);
SIXTEEN_CASES(kFixedRepeat) {
- int repeats = data - kFixedRepeatStart;
- MaybeObject object = current.ReadPrevious();
- DCHECK(!Heap::InNewSpace(object));
- for (int i = 0; i < repeats; i++) {
- UnalignedCopy(current, object);
- current.Advance();
- }
+ int repeats = DecodeFixedRepeatCount(data);
+ current = ReadRepeatedObject(current, repeats);
break;
}
@@ -796,116 +763,68 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
return true;
}
-UnalignedSlot Deserializer::ReadExternalReferenceCase(
- HowToCode how, UnalignedSlot current, Address current_object_address) {
- int skip = source_.GetInt();
- current.Advance(skip);
+Address Deserializer::ReadExternalReferenceCase() {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- Address address = external_reference_table_->address(reference_id);
-
- if (how == kFromCode) {
- Address location_of_branch_data = current.address();
- int skip =
- Assembler::deserialization_special_target_size(location_of_branch_data);
- Assembler::deserialization_set_special_target_at(
- location_of_branch_data,
- Code::cast(HeapObject::FromAddress(current_object_address)), address);
- current.Advance(skip);
- } else {
- UnalignedCopy(current, address);
- current.Advance();
- }
- return current;
+ return external_reference_table_->address(reference_id);
}
-template <int where, int how, int within, int space_number_if_any>
-UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
- UnalignedSlot current,
- Address current_object_address,
- byte data, bool write_barrier_needed) {
+template <typename TSlot, SerializerDeserializer::Bytecode bytecode,
+ int space_number_if_any>
+TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
+ Address current_object_address, byte data,
+ bool write_barrier_needed) {
bool emit_write_barrier = false;
- bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
: space_number_if_any;
- HeapObjectReferenceType reference_type = HeapObjectReferenceType::STRONG;
- if (where == kNewObject && how == kPlain && within == kStartOfObject) {
- if (allocator()->GetAndClearNextReferenceIsWeak()) {
- reference_type = HeapObjectReferenceType::WEAK;
- }
- ReadObject(space_number, current, reference_type);
+ HeapObject heap_object;
+ HeapObjectReferenceType reference_type =
+ allocator()->GetAndClearNextReferenceIsWeak()
+ ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+
+ if (bytecode == kNewObject) {
+ heap_object = ReadObject(space_number);
emit_write_barrier = (space_number == NEW_SPACE);
+ } else if (bytecode == kBackref) {
+ heap_object = GetBackReferencedObject(space_number);
+ emit_write_barrier = (space_number == NEW_SPACE);
+ } else if (bytecode == kRootArray) {
+ int id = source_.GetInt();
+ RootIndex root_index = static_cast<RootIndex>(id);
+ heap_object = HeapObject::cast(isolate->root(root_index));
+ emit_write_barrier = Heap::InYoungGeneration(heap_object);
+ hot_objects_.Add(heap_object);
+ } else if (bytecode == kReadOnlyObjectCache) {
+ int cache_index = source_.GetInt();
+ heap_object = HeapObject::cast(
+ isolate->heap()->read_only_heap()->read_only_object_cache()->at(
+ cache_index));
+ DCHECK(!Heap::InYoungGeneration(heap_object));
+ emit_write_barrier = false;
+ } else if (bytecode == kPartialSnapshotCache) {
+ int cache_index = source_.GetInt();
+ heap_object =
+ HeapObject::cast(isolate->partial_snapshot_cache()->at(cache_index));
+ emit_write_barrier = Heap::InYoungGeneration(heap_object);
} else {
- Object new_object; /* May not be a real Object pointer. */
- if (where == kNewObject) {
- ReadObject(space_number, UnalignedSlot(&new_object),
- HeapObjectReferenceType::STRONG);
- } else if (where == kBackref) {
- emit_write_barrier = (space_number == NEW_SPACE);
- new_object = GetBackReferencedObject(data & kSpaceMask);
- } else if (where == kBackrefWithSkip) {
- int skip = source_.GetInt();
- current.Advance(skip);
- emit_write_barrier = (space_number == NEW_SPACE);
- new_object = GetBackReferencedObject(data & kSpaceMask);
- } else if (where == kRootArray) {
- int id = source_.GetInt();
- RootIndex root_index = static_cast<RootIndex>(id);
- new_object = isolate->root(root_index);
- emit_write_barrier = Heap::InNewSpace(new_object);
- hot_objects_.Add(HeapObject::cast(new_object));
- } else if (where == kReadOnlyObjectCache) {
- int cache_index = source_.GetInt();
- new_object = isolate->read_only_object_cache()->at(cache_index);
- DCHECK(!Heap::InNewSpace(new_object));
- emit_write_barrier = false;
- } else if (where == kPartialSnapshotCache) {
- int cache_index = source_.GetInt();
- new_object = isolate->partial_snapshot_cache()->at(cache_index);
- emit_write_barrier = Heap::InNewSpace(new_object);
- } else {
- DCHECK_EQ(where, kAttachedReference);
- int index = source_.GetInt();
- new_object = *attached_objects_[index];
- emit_write_barrier = Heap::InNewSpace(new_object);
- }
- if (within == kInnerPointer) {
- DCHECK_EQ(how, kFromCode);
- if (new_object->IsCode()) {
- new_object = Object(Code::cast(new_object)->raw_instruction_start());
- } else {
- Cell cell = Cell::cast(new_object);
- new_object = Object(cell->ValueAddress());
- }
- }
- if (how == kFromCode) {
- DCHECK(!allocator()->next_reference_is_weak());
- Address location_of_branch_data = current.address();
- int skip = Assembler::deserialization_special_target_size(
- location_of_branch_data);
- Assembler::deserialization_set_special_target_at(
- location_of_branch_data,
- Code::cast(HeapObject::FromAddress(current_object_address)),
- new_object->ptr());
- current.Advance(skip);
- current_was_incremented = true;
- } else {
- MaybeObject new_maybe_object = MaybeObject::FromObject(new_object);
- if (allocator()->GetAndClearNextReferenceIsWeak()) {
- new_maybe_object = MaybeObject::MakeWeak(new_maybe_object);
- }
- UnalignedCopy(current, new_maybe_object);
- }
+ DCHECK_EQ(bytecode, kAttachedReference);
+ int index = source_.GetInt();
+ heap_object = *attached_objects_[index];
+ emit_write_barrier = Heap::InYoungGeneration(heap_object);
}
+ HeapObjectReference heap_object_ref =
+ reference_type == HeapObjectReferenceType::STRONG
+ ? HeapObjectReference::Strong(heap_object)
+ : HeapObjectReference::Weak(heap_object);
+ // Don't update current pointer here as it may be needed for write barrier.
+ Write(current, heap_object_ref);
if (emit_write_barrier && write_barrier_needed) {
- HeapObject object = HeapObject::FromAddress(current_object_address);
- SLOW_DCHECK(isolate->heap()->Contains(object));
- GenerationalBarrier(object, current.Slot(), current.Read());
- }
- if (!current_was_incremented) {
- current.Advance();
+ HeapObject host_object = HeapObject::FromAddress(current_object_address);
+ SLOW_DCHECK(isolate->heap()->Contains(host_object));
+ GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
+ heap_object_ref);
}
-
- return current;
+ return current + 1;
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 70b48bf0ef..c4e6d216d7 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -22,7 +22,6 @@ namespace internal {
class HeapObject;
class Object;
-class UnalignedSlot;
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
@@ -49,7 +48,6 @@ class Deserializer : public SerializerDeserializer {
source_(data->Payload()),
magic_number_(data->GetMagicNumber()),
external_reference_table_(nullptr),
- allocator_(this),
deserializing_user_code_(deserializing_user_code),
can_rehash_(false) {
allocator()->DecodeReservation(data->Reservations());
@@ -113,31 +111,45 @@ class Deserializer : public SerializerDeserializer {
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- void UnalignedCopy(UnalignedSlot dest, MaybeObject value);
- void UnalignedCopy(UnalignedSlot dest, Address value);
+ template <typename TSlot>
+ inline TSlot Write(TSlot dest, MaybeObject value);
+
+ template <typename TSlot>
+ inline TSlot WriteAddress(TSlot dest, Address value);
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
- bool ReadData(UnalignedSlot start, UnalignedSlot end, int space,
- Address object_address);
+ template <typename TSlot>
+ bool ReadData(TSlot start, TSlot end, int space, Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
- template <int where, int how, int within, int space_number_if_any>
- inline UnalignedSlot ReadDataCase(Isolate* isolate, UnalignedSlot current,
- Address current_object_address, byte data,
- bool write_barrier_needed);
+ template <typename TSlot, Bytecode bytecode, int space_number_if_any>
+ inline TSlot ReadDataCase(Isolate* isolate, TSlot current,
+ Address current_object_address, byte data,
+ bool write_barrier_needed);
// A helper function for ReadData for reading external references.
- // Returns the new value of {current}.
- inline UnalignedSlot ReadExternalReferenceCase(
- HowToCode how, UnalignedSlot current, Address current_object_address);
+ inline Address ReadExternalReferenceCase();
+
+ HeapObject ReadObject();
+ HeapObject ReadObject(int space_number);
+ void ReadCodeObjectBody(int space_number, Address code_object_address);
- void ReadObject(int space_number, UnalignedSlot write_back,
- HeapObjectReferenceType reference_type);
+ public:
+ void VisitCodeTarget(Code host, RelocInfo* rinfo);
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
+ void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
+ void VisitExternalReference(Code host, RelocInfo* rinfo);
+ void VisitInternalReference(Code host, RelocInfo* rinfo);
+ void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
+
+ private:
+ template <typename TSlot>
+ TSlot ReadRepeatedObject(TSlot current, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
HeapObject PostProcessNewObject(HeapObject obj, int space);
@@ -189,7 +201,7 @@ class StringTableInsertionKey : public StringTableKey {
uint32_t ComputeHashField(String string);
String string_;
- DISALLOW_HEAP_ALLOCATION(no_gc);
+ DISALLOW_HEAP_ALLOCATION(no_gc)
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded-data.cc b/deps/v8/src/snapshot/embedded-data.cc
index f5dded1bd1..0488c2f2c7 100644
--- a/deps/v8/src/snapshot/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded-data.cc
@@ -232,6 +232,13 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
// between two builtins with int3's (on x64/ia32).
ZapCode(reinterpret_cast<Address>(blob), blob_size);
+ // Hash relevant parts of the Isolate's heap and store the result.
+ {
+ STATIC_ASSERT(IsolateHashSize() == kSizetSize);
+ const size_t hash = isolate->HashIsolateForEmbeddedBlob();
+ std::memcpy(blob + IsolateHashOffset(), &hash, IsolateHashSize());
+ }
+
// Write the metadata tables.
DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
@@ -254,12 +261,14 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
FinalizeEmbeddedCodeTargets(isolate, &d);
// Hash the blob and store the result.
- STATIC_ASSERT(HashSize() == kSizetSize);
- const size_t hash = d.CreateHash();
- std::memcpy(blob + HashOffset(), &hash, HashSize());
+ {
+ STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
+ const size_t hash = d.CreateEmbeddedBlobHash();
+ std::memcpy(blob + EmbeddedBlobHashOffset(), &hash, EmbeddedBlobHashSize());
- DCHECK_EQ(hash, d.CreateHash());
- DCHECK_EQ(hash, d.Hash());
+ DCHECK_EQ(hash, d.CreateEmbeddedBlobHash());
+ DCHECK_EQ(hash, d.EmbeddedBlobHash());
+ }
if (FLAG_serialization_statistics) d.PrintStatistics();
@@ -281,10 +290,10 @@ uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
return metadata[i].instructions_length;
}
-size_t EmbeddedData::CreateHash() const {
- STATIC_ASSERT(HashOffset() == 0);
- STATIC_ASSERT(HashSize() == kSizetSize);
- return base::hash_range(data_ + HashSize(), data_ + size_);
+size_t EmbeddedData::CreateEmbeddedBlobHash() const {
+ STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
+ STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
+ return base::hash_range(data_ + EmbeddedBlobHashSize(), data_ + size_);
}
void EmbeddedData::PrintStatistics() const {
@@ -311,7 +320,8 @@ void EmbeddedData::PrintStatistics() const {
const int k90th = embedded_count * 0.90;
const int k99th = embedded_count * 0.99;
- const int metadata_size = static_cast<int>(HashSize() + MetadataSize());
+ const int metadata_size = static_cast<int>(
+ EmbeddedBlobHashSize() + IsolateHashSize() + MetadataSize());
PrintF("EmbeddedData:\n");
PrintF(" Total size: %d\n",
diff --git a/deps/v8/src/snapshot/embedded-data.h b/deps/v8/src/snapshot/embedded-data.h
index 6e28071525..5c5653e2ca 100644
--- a/deps/v8/src/snapshot/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded-data.h
@@ -71,9 +71,13 @@ class EmbeddedData final {
return (size == 0) ? 0 : PadAndAlign(size);
}
- size_t CreateHash() const;
- size_t Hash() const {
- return *reinterpret_cast<const size_t*>(data_ + HashOffset());
+ size_t CreateEmbeddedBlobHash() const;
+ size_t EmbeddedBlobHash() const {
+ return *reinterpret_cast<const size_t*>(data_ + EmbeddedBlobHashOffset());
+ }
+
+ size_t IsolateHash() const {
+ return *reinterpret_cast<const size_t*>(data_ + IsolateHashOffset());
}
struct Metadata {
@@ -88,15 +92,20 @@ class EmbeddedData final {
// The layout of the blob is as follows:
//
// [0] hash of the remaining blob
- // [1] metadata of instruction stream 0
+ // [1] hash of embedded-blob-relevant heap objects
+ // [2] metadata of instruction stream 0
// ... metadata
// ... instruction streams
static constexpr uint32_t kTableSize = Builtins::builtin_count;
- static constexpr uint32_t HashOffset() { return 0; }
- static constexpr uint32_t HashSize() { return kSizetSize; }
+ static constexpr uint32_t EmbeddedBlobHashOffset() { return 0; }
+ static constexpr uint32_t EmbeddedBlobHashSize() { return kSizetSize; }
+ static constexpr uint32_t IsolateHashOffset() {
+ return EmbeddedBlobHashOffset() + EmbeddedBlobHashSize();
+ }
+ static constexpr uint32_t IsolateHashSize() { return kSizetSize; }
static constexpr uint32_t MetadataOffset() {
- return HashOffset() + HashSize();
+ return IsolateHashOffset() + IsolateHashSize();
}
static constexpr uint32_t MetadataSize() {
return sizeof(struct Metadata) * kTableSize;
diff --git a/deps/v8/src/snapshot/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded-file-writer.cc
index 2a08d05170..38073ec012 100644
--- a/deps/v8/src/snapshot/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded-file-writer.cc
@@ -175,7 +175,9 @@ void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
fprintf(fp_, ".balign 32\n");
}
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
+ fprintf(fp_, ".balign 8\n");
+}
void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
fprintf(fp_, "// %s\n", string);
@@ -185,7 +187,9 @@ void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
fprintf(fp_, "_%s:\n", name);
}
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
fprintf(fp_, ".loc %d %d\n", fileid, line);
}
@@ -262,7 +266,9 @@ void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
fprintf(fp_, ".align 5\n");
}
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
+ fprintf(fp_, ".align 3\n");
+}
void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
fprintf(fp_, "// %s\n", string);
@@ -273,8 +279,10 @@ void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
fprintf(fp_, "%s:\n", name);
}
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
- fprintf(fp_, ".loc %d %d\n", fileid, line);
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
+ fprintf(fp_, ".xline %d, \"%s\"\n", line, filename);
}
void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
@@ -299,7 +307,9 @@ void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
int fileid, const char* filename) {
- fprintf(fp_, ".file %d \"%s\"\n", fileid, filename);
+ // File name cannot be declared with an identifier on AIX.
+ // We use the SourceInfo method to emit debug info in
+ //.xline <line-number> <file-name> format.
}
void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
@@ -354,7 +364,9 @@ void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
fprintf(fp_, "ALIGN 4\n");
}
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
+ fprintf(fp_, "ALIGN 4\n");
+}
void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
fprintf(fp_, "; %s\n", string);
@@ -365,7 +377,9 @@ void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
DirectiveAsString(kByte));
}
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
// TODO(mvstanton): output source information for MSVC.
// Its syntax is #line <line> "<filename>"
}
@@ -471,7 +485,9 @@ void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
fprintf(fp_, "%s%s\n", SYMBOL_PREFIX, name);
}
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
// TODO(mvstanton): output source information for MSVC.
// Its syntax is #line <line> "<filename>"
}
@@ -576,7 +592,9 @@ void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
fprintf(fp_, "%s%s:\n", SYMBOL_PREFIX, name);
}
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
fprintf(fp_, ".loc %d %d\n", fileid, line);
}
diff --git a/deps/v8/src/snapshot/embedded-file-writer.h b/deps/v8/src/snapshot/embedded-file-writer.h
index 8323c4d53b..04da018d14 100644
--- a/deps/v8/src/snapshot/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded-file-writer.h
@@ -43,7 +43,7 @@ class PlatformDependentEmbeddedFileWriter final {
void DeclareLabel(const char* name);
- void SourceInfo(int fileid, int line);
+ void SourceInfo(int fileid, const char* filename, int line);
void DeclareFunctionBegin(const char* name);
void DeclareFunctionEnd(const char* name);
@@ -237,6 +237,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
if (i == next_offset) {
// Write source directive.
w->SourceInfo(positions.source_position().ExternalFileId(),
+ GetExternallyCompiledFilename(
+ positions.source_position().ExternalFileId()),
positions.source_position().ExternalLine());
positions.Advance();
next_offset = static_cast<uint32_t>(
@@ -298,16 +300,13 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
#define V8_COMPILER_IS_MSVC
#endif
-#if defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
+#if defined(V8_COMPILER_IS_MSVC)
// Windows MASM doesn't have an .octa directive, use QWORDs instead.
// Note: MASM *really* does not like large data streams. It takes over 5
// minutes to assemble the ~350K lines of embedded.S produced when using
// BYTE directives in a debug build. QWORD produces roughly 120KLOC and
// reduces assembly time to ~40 seconds. Still terrible, but much better
// than before. See also: https://crbug.com/v8/8475.
-
- // GCC MASM on Aix doesn't have an .octa directive, use .llong instead.
-
static constexpr DataDirective kByteChunkDirective = kQuad;
static constexpr int kByteChunkSize = 8;
@@ -316,21 +315,35 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
const uint64_t* quad_ptr = reinterpret_cast<const uint64_t*>(data);
return current_line_length + w->HexLiteral(*quad_ptr);
}
+
+#elif defined(V8_OS_AIX)
+ // PPC uses a fixed 4 byte instruction set, using .long
+ // to prevent any unnecessary padding.
+ static constexpr DataDirective kByteChunkDirective = kLong;
+ static constexpr int kByteChunkSize = 4;
+
+ static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
+ int current_line_length, const uint8_t* data) {
+ const uint32_t* long_ptr = reinterpret_cast<const uint32_t*>(data);
+ return current_line_length + w->HexLiteral(*long_ptr);
+ }
+
#else // defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
static constexpr DataDirective kByteChunkDirective = kOcta;
static constexpr int kByteChunkSize = 16;
static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
int current_line_length, const uint8_t* data) {
- const uint64_t* quad_ptr1 = reinterpret_cast<const uint64_t*>(data);
- const uint64_t* quad_ptr2 = reinterpret_cast<const uint64_t*>(data + 8);
+ const size_t size = kInt64Size;
+ uint64_t part1, part2;
+ // Use memcpy for the reads since {data} is not guaranteed to be aligned.
#ifdef V8_TARGET_BIG_ENDIAN
- uint64_t part1 = *quad_ptr1;
- uint64_t part2 = *quad_ptr2;
+ memcpy(&part1, data, size);
+ memcpy(&part2, data + size, size);
#else
- uint64_t part1 = *quad_ptr2;
- uint64_t part2 = *quad_ptr1;
+ memcpy(&part1, data + size, size);
+ memcpy(&part2, data, size);
#endif // V8_TARGET_BIG_ENDIAN
if (part1 != 0) {
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 4d6e736223..44b7088380 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -5,8 +5,10 @@
#include "src/snapshot/object-deserializer.h"
#include "src/assembler-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/allocation-site-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/code-serializer.h"
@@ -59,8 +61,8 @@ void ObjectDeserializer::FlushICache() {
for (Code code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
WriteBarrierForCode(code);
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
+ FlushInstructionCache(code->raw_instruction_start(),
+ code->raw_instruction_size());
}
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 769b46e468..4dd25980e8 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/partial-deserializer.h"
#include "src/api-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index a3b4f04161..c71d05b385 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -68,24 +68,21 @@ void PartialSerializer::Serialize(Context* o, bool include_global_proxy) {
Pad();
}
-void PartialSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+void PartialSerializer::SerializeObject(HeapObject obj) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
- if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeHotObject(obj)) return;
- if (SerializeRoot(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeRoot(obj)) return;
- if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeBackReference(obj)) return;
- if (startup_serializer_->SerializeUsingReadOnlyObjectCache(
- &sink_, obj, how_to_code, where_to_point, skip)) {
+ if (startup_serializer_->SerializeUsingReadOnlyObjectCache(&sink_, obj)) {
return;
}
if (ShouldBeInThePartialSnapshotCache(obj)) {
- startup_serializer_->SerializeUsingPartialSnapshotCache(
- &sink_, obj, how_to_code, where_to_point, skip);
+ startup_serializer_->SerializeUsingPartialSnapshotCache(&sink_, obj);
return;
}
@@ -101,12 +98,10 @@ void PartialSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
// We should not end up at another native context.
DCHECK_IMPLIES(obj != context_, !obj->IsNativeContext());
- FlushSkip(skip);
-
// Clear literal boilerplates and feedback.
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
- if (SerializeJSObjectWithEmbedderFields(obj, how_to_code, where_to_point)) {
+ if (SerializeJSObjectWithEmbedderFields(obj)) {
return;
}
@@ -121,7 +116,7 @@ void PartialSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
+ ObjectSerializer serializer(this, obj, &sink_);
serializer.Serialize();
}
@@ -133,7 +128,7 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject o) {
DCHECK(!o->IsScript());
return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
- o->IsTemplateInfo() ||
+ o->IsTemplateInfo() || o->IsClassPositions() ||
o->map() == ReadOnlyRoots(startup_serializer_->isolate())
.fixed_cow_array_map();
}
@@ -142,8 +137,7 @@ namespace {
bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
-bool PartialSerializer::SerializeJSObjectWithEmbedderFields(
- Object obj, HowToCode how_to_code, WhereToPoint where_to_point) {
+bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
if (!obj->IsJSObject()) return false;
JSObject js_obj = JSObject::cast(obj);
int embedder_fields_count = js_obj->GetEmbedderFieldCount();
@@ -194,8 +188,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(
// 3) Serialize the object. References from embedder fields to heap objects or
// smis are serialized regularly.
- ObjectSerializer(this, js_obj, &sink_, how_to_code, where_to_point)
- .Serialize();
+ ObjectSerializer(this, js_obj, &sink_).Serialize();
// 4) Obtain back reference for the serialized object.
SerializerReference reference =
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index dca0588a90..55bc5c8aee 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -27,13 +27,11 @@ class PartialSerializer : public Serializer {
bool can_be_rehashed() const { return can_be_rehashed_; }
private:
- void SerializeObject(HeapObject o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject o) override;
bool ShouldBeInThePartialSnapshotCache(HeapObject o);
- bool SerializeJSObjectWithEmbedderFields(Object obj, HowToCode how_to_code,
- WhereToPoint where_to_point);
+ bool SerializeJSObjectWithEmbedderFields(Object obj);
void CheckRehashability(HeapObject obj);
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 57b1f1dbcb..b1b22cc70b 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/read-only-heap.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
#include "src/v8threads.h"
@@ -25,19 +26,24 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
// No active handles.
DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
// Partial snapshot cache is not yet populated.
- DCHECK(isolate->read_only_object_cache()->empty());
+ DCHECK(isolate->heap()->read_only_heap()->read_only_object_cache()->empty());
DCHECK(isolate->partial_snapshot_cache()->empty());
// Builtins are not yet created.
DCHECK(!isolate->builtins()->is_initialized());
{
DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots(isolate);
- ReadOnlyRoots(isolate).Iterate(this);
- isolate->heap()->read_only_space()->RepairFreeListsAfterDeserialization();
+ roots.Iterate(this);
+ isolate->heap()
+ ->read_only_heap()
+ ->read_only_space()
+ ->RepairFreeListsAfterDeserialization();
// Deserialize the Read-only Object Cache.
- std::vector<Object>* cache = isolate->read_only_object_cache();
+ std::vector<Object>* cache =
+ isolate->heap()->read_only_heap()->read_only_object_cache();
for (size_t i = 0;; ++i) {
// Extend the array ready to get a value when deserializing.
if (cache->size() <= i) cache->push_back(Smi::kZero);
@@ -45,15 +51,15 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
// cache and eventually terminates the cache with undefined.
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
FullObjectSlot(&cache->at(i)));
- if (cache->at(i)->IsUndefined(isolate)) break;
+ if (cache->at(i)->IsUndefined(roots)) break;
}
DeserializeDeferredObjects();
}
-}
-void ReadOnlyDeserializer::RehashHeap() {
- DCHECK(FLAG_rehash_snapshot && can_rehash());
- Rehash();
+ if (FLAG_rehash_snapshot && can_rehash()) {
+ isolate_->heap()->InitializeHashSeed();
+ Rehash();
+ }
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
index 25b6c29802..08443766c2 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.h
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -20,12 +20,6 @@ class ReadOnlyDeserializer final : public Deserializer {
// Deserialize the snapshot into an empty heap.
void DeserializeInto(Isolate* isolate);
-
- private:
- friend class StartupDeserializer;
-
- // Rehash after deserializing.
- void RehashHeap();
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index ba20ec8d64..7fa38f78ea 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/code-tracer.h"
#include "src/global-handles.h"
+#include "src/heap/heap-inl.h" // For InReadOnlySpace.
#include "src/objects-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/startup-serializer.h"
@@ -24,26 +25,20 @@ ReadOnlySerializer::~ReadOnlySerializer() {
OutputStatistics("ReadOnlySerializer");
}
-void ReadOnlySerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- CHECK(isolate()->heap()->read_only_space()->Contains(obj));
+void ReadOnlySerializer::SerializeObject(HeapObject obj) {
+ CHECK(isolate()->heap()->InReadOnlySpace(obj));
CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
- if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- if (IsRootAndHasBeenSerialized(obj) &&
- SerializeRoot(obj, how_to_code, where_to_point, skip)) {
+ if (SerializeHotObject(obj)) return;
+ if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) {
return;
}
- if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
-
- FlushSkip(skip);
+ if (SerializeBackReference(obj)) return;
CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
- where_to_point);
+ ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
}
@@ -84,18 +79,15 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (!isolate()->heap()->read_only_space()->Contains(obj)) return false;
+ SnapshotByteSink* sink, HeapObject obj) {
+ if (!isolate()->heap()->InReadOnlySpace(obj)) return false;
// Get the cache index and serialize it into the read-only snapshot if
// necessary.
int cache_index = SerializeInObjectCache(obj);
// Writing out the cache entry into the calling serializer's sink.
- FlushSkip(sink, skip);
- sink->Put(kReadOnlyObjectCache + how_to_code + where_to_point,
- "ReadOnlyObjectCache");
+ sink->Put(kReadOnlyObjectCache, "ReadOnlyObjectCache");
sink->PutInt(cache_index, "read_only_object_cache_index");
return true;
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index 23259f4cc2..06aaa91df7 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -28,13 +28,11 @@ class ReadOnlySerializer : public RootsSerializer {
// read-only object cache if not already present and emit a
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
- bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink, HeapObject obj,
- HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
+ HeapObject obj);
private:
- void SerializeObject(HeapObject o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject o) override;
bool MustBeDeferred(HeapObject object) override;
DISALLOW_COPY_AND_ASSIGN(ReadOnlySerializer);
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index 40f1d60345..a54e76316f 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -186,7 +186,7 @@ class SerializerReferenceMap
static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
- DISALLOW_HEAP_ALLOCATION(no_allocation_);
+ DISALLOW_HEAP_ALLOCATION(no_allocation_)
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index 14e0e46c51..e634c45eff 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -28,7 +28,7 @@ int RootsSerializer::SerializeInObjectCache(HeapObject heap_object) {
if (!object_cache_index_map_.LookupOrInsert(heap_object, &index)) {
// This object is not part of the object cache yet. Add it to the cache so
// we can refer to it via cache index from the delegating snapshot.
- SerializeObject(heap_object, kPlain, kStartOfObject, 0);
+ SerializeObject(heap_object);
}
return index;
}
@@ -40,7 +40,7 @@ void RootsSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
void RootsSerializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start,
FullObjectSlot end) {
- RootsTable& roots_table = isolate()->heap()->roots_table();
+ RootsTable& roots_table = isolate()->roots_table();
if (start ==
roots_table.begin() + static_cast<int>(first_root_to_be_serialized_)) {
// Serializing the root list needs special handling:
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index ec24c7831d..a373683886 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -123,162 +123,180 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos);
-#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- V(0x0e) \
- V(0x2e) \
- V(0x3e) \
- V(0x3f) \
- V(0x4e) \
- V(0x58) \
- V(0x59) \
- V(0x5a) \
- V(0x5b) \
- V(0x5c) \
- V(0x5d) \
- V(0x5e) \
- V(0x5f) \
- V(0x67) \
- V(0x6e) \
- V(0x76) \
- V(0x79) \
- V(0x7a) \
- V(0x7b) \
- V(0x7c)
-
- // ---------- byte code range 0x00..0x7f ----------
- // Byte codes in this range represent Where, HowToCode and WhereToPoint.
- // Where the pointed-to object can be found:
+// clang-format off
+#define UNUSED_SERIALIZER_BYTE_CODES(V) \
+ V(0x06) V(0x07) V(0x0e) V(0x0f) \
+ /* Free range 0x26..0x2f */ \
+ V(0x26) V(0x27) \
+ V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
+ /* Free range 0x30..0x3f */ \
+ V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
+ V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
+ /* Free range 0x97..0x9f */ \
+ V(0x98) V(0x99) V(0x9a) V(0x9b) V(0x9c) V(0x9d) V(0x9e) V(0x9f) \
+ /* Free range 0xa0..0xaf */ \
+ V(0xa0) V(0xa1) V(0xa2) V(0xa3) V(0xa4) V(0xa5) V(0xa6) V(0xa7) \
+ V(0xa8) V(0xa9) V(0xaa) V(0xab) V(0xac) V(0xad) V(0xae) V(0xaf) \
+ /* Free range 0xb0..0xbf */ \
+ V(0xb0) V(0xb1) V(0xb2) V(0xb3) V(0xb4) V(0xb5) V(0xb6) V(0xb7) \
+ V(0xb8) V(0xb9) V(0xba) V(0xbb) V(0xbc) V(0xbd) V(0xbe) V(0xbf) \
+ /* Free range 0xc0..0xcf */ \
+ V(0xc0) V(0xc1) V(0xc2) V(0xc3) V(0xc4) V(0xc5) V(0xc6) V(0xc7) \
+ V(0xc8) V(0xc9) V(0xca) V(0xcb) V(0xcc) V(0xcd) V(0xce) V(0xcf) \
+ /* Free range 0xd0..0xdf */ \
+ V(0xd0) V(0xd1) V(0xd2) V(0xd3) V(0xd4) V(0xd5) V(0xd6) V(0xd7) \
+ V(0xd8) V(0xd9) V(0xda) V(0xdb) V(0xdc) V(0xdd) V(0xde) V(0xdf) \
+ /* Free range 0xe0..0xef */ \
+ V(0xe0) V(0xe1) V(0xe2) V(0xe3) V(0xe4) V(0xe5) V(0xe6) V(0xe7) \
+ V(0xe8) V(0xe9) V(0xea) V(0xeb) V(0xec) V(0xed) V(0xee) V(0xef) \
+ /* Free range 0xf0..0xff */ \
+ V(0xf0) V(0xf1) V(0xf2) V(0xf3) V(0xf4) V(0xf5) V(0xf6) V(0xf7) \
+ V(0xf8) V(0xf9) V(0xfa) V(0xfb) V(0xfc) V(0xfd) V(0xfe) V(0xff)
+ // clang-format on
+
// The static assert below will trigger when the number of preallocated spaces
- // changed. If that happens, update the bytecode ranges in the comments below.
+ // changed. If that happens, update the kNewObject and kBackref bytecode
+ // ranges in the comments below.
STATIC_ASSERT(6 == kNumberOfSpaces);
- enum Where {
- // 0x00..0x05 Allocate new object, in specified space.
- kNewObject = 0x00,
- // 0x08..0x0d Reference to previous object from space.
- kBackref = 0x08,
- // 0x10..0x15 Reference to previous object from space after skip.
- kBackrefWithSkip = 0x10,
-
- // 0x06 Object in the partial snapshot cache.
- kPartialSnapshotCache = 0x06,
- // 0x07 External reference referenced by id.
- kExternalReference = 0x07,
-
- // 0x16 Root array item.
- kRootArray = 0x16,
- // 0x17 Object provided in the attached list.
- kAttachedReference = 0x17,
- // 0x18 Object in the read-only object cache.
- kReadOnlyObjectCache = 0x18,
-
- // 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
- // 0x18..0x1f Misc, see below (incl. 0x38..0x3f, 0x58..0x5f, 0x78..0x7f).
- };
-
- static const int kWhereMask = 0x1f;
static const int kSpaceMask = 7;
STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
- // How to code the pointer to the object.
- enum HowToCode {
- // Straight pointer.
- kPlain = 0,
- // A pointer inlined in code. What this means depends on the architecture.
- kFromCode = 0x20
- };
-
- static const int kHowToCodeMask = 0x20;
-
- // Where to point within the object.
- enum WhereToPoint {
- // Points to start of object
- kStartOfObject = 0,
- // Points to instruction in code object or payload of cell.
- kInnerPointer = 0x40
- };
-
- static const int kWhereToPointMask = 0x40;
-
- // ---------- Misc ----------
- // Skip.
- static const int kSkip = 0x0f;
- // Do nothing, used for padding.
- static const int kNop = 0x2f;
- // Move to next reserved chunk.
- static const int kNextChunk = 0x4f;
- // Deferring object content.
- static const int kDeferred = 0x6f;
- // Alignment prefixes 0x19..0x1b
- static const int kAlignmentPrefix = 0x19;
- // A tag emitted at strategic points in the snapshot to delineate sections.
- // If the deserializer does not find these at the expected moments then it
- // is an indication that the snapshot and the VM do not fit together.
- // Examine the build process for architecture, version or configuration
- // mismatches.
- static const int kSynchronize = 0x1c;
- // Repeats of variable length.
- static const int kVariableRepeat = 0x1d;
- // Raw data of variable length.
-
- // Used for embedder-allocated backing stores for TypedArrays.
- static const int kOffHeapBackingStore = 0x1e;
-
- // Used for embedder-provided serialization data for embedder fields.
- static const int kEmbedderFieldsData = 0x1f;
-
- static const int kVariableRawCode = 0x39;
- static const int kVariableRawData = 0x3a;
-
- static const int kInternalReference = 0x3b;
- static const int kInternalReferenceEncoded = 0x3c;
-
- // Used to encode external references provided through the API.
- static const int kApiReference = 0x3d;
-
- // In-place weak references
- static const int kClearedWeakReference = 0x7d;
- static const int kWeakPrefix = 0x7e;
-
- // Encodes an off-heap instruction stream target.
- static const int kOffHeapTarget = 0x7f;
-
- // ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
static const int kNumberOfRootArrayConstants = 0x20;
- // 0x80..0x9f
- static const int kRootArrayConstants = 0x80;
- // 0xa0..0xbf
- static const int kRootArrayConstantsWithSkip = 0xa0;
static const int kRootArrayConstantsMask = 0x1f;
// 32 common raw data lengths.
static const int kNumberOfFixedRawData = 0x20;
- // 0xc0..0xdf
- static const int kFixedRawData = 0xc0;
- static const int kOnePointerRawData = kFixedRawData;
- static const int kFixedRawDataStart = kFixedRawData - 1;
// 16 repeats lengths.
static const int kNumberOfFixedRepeat = 0x10;
- // 0xe0..0xef
- static const int kFixedRepeat = 0xe0;
- static const int kFixedRepeatStart = kFixedRepeat - 1;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
- // 0xf0..0xf7
- static const int kHotObject = 0xf0;
- // 0xf8..0xff
- static const int kHotObjectWithSkip = 0xf8;
static const int kHotObjectMask = 0x07;
- // ---------- special values ----------
+ enum Bytecode {
+ //
+ // ---------- byte code range 0x00..0x0f ----------
+ //
+
+ // 0x00..0x05 Allocate new object, in specified space.
+ kNewObject = 0x00,
+ // 0x08..0x0d Reference to previous object from specified space.
+ kBackref = 0x08,
+
+ //
+ // ---------- byte code range 0x10..0x25 ----------
+ //
+
+ // Object in the partial snapshot cache.
+ kPartialSnapshotCache = 0x10,
+ // Root array item.
+ kRootArray,
+ // Object provided in the attached list.
+ kAttachedReference,
+ // Object in the read-only object cache.
+ kReadOnlyObjectCache,
+ // Do nothing, used for padding.
+ kNop,
+ // Move to next reserved chunk.
+ kNextChunk,
+ // Deferring object content.
+ kDeferred,
+ // 3 alignment prefixes 0x17..0x19
+ kAlignmentPrefix = 0x17,
+ // A tag emitted at strategic points in the snapshot to delineate sections.
+ // If the deserializer does not find these at the expected moments then it
+ // is an indication that the snapshot and the VM do not fit together.
+ // Examine the build process for architecture, version or configuration
+ // mismatches.
+ kSynchronize = 0x1a,
+ // Repeats of variable length.
+ kVariableRepeat,
+ // Used for embedder-allocated backing stores for TypedArrays.
+ kOffHeapBackingStore,
+ // Used for embedder-provided serialization data for embedder fields.
+ kEmbedderFieldsData,
+ // Raw data of variable length.
+ kVariableRawCode,
+ kVariableRawData,
+ // Used to encode external references provided through the API.
+ kApiReference,
+ // External reference referenced by id.
+ kExternalReference,
+ // Internal reference of a code objects in code stream.
+ kInternalReference,
+ // In-place weak references.
+ kClearedWeakReference,
+ kWeakPrefix,
+ // Encodes an off-heap instruction stream target.
+ kOffHeapTarget,
+
+ //
+ // ---------- byte code range 0x40..0x7f ----------
+ //
+
+ // 0x40..0x5f
+ kRootArrayConstants = 0x40,
+
+ // 0x60..0x7f
+ kFixedRawData = 0x60,
+ kOnePointerRawData = kFixedRawData,
+ kFixedRawDataStart = kFixedRawData - 1,
+
+ //
+ // ---------- byte code range 0x80..0x9f ----------
+ //
+
+ // 0x80..0x8f
+ kFixedRepeat = 0x80,
+
+ // 0x90..0x97
+ kHotObject = 0x90,
+ };
+
+ //
+ // Some other constants.
+ //
static const int kAnyOldSpace = -1;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
+ // Repeat count encoding helpers.
+ static const int kFirstEncodableRepeatCount = 2;
+ static const int kLastEncodableFixedRepeatCount =
+ kFirstEncodableRepeatCount + kNumberOfFixedRepeat - 1;
+ static const int kFirstEncodableVariableRepeatCount =
+ kLastEncodableFixedRepeatCount + 1;
+
+ // Encodes repeat count into a fixed repeat bytecode.
+ static int EncodeFixedRepeat(int repeat_count) {
+ DCHECK(IsInRange(repeat_count, kFirstEncodableRepeatCount,
+ kLastEncodableFixedRepeatCount));
+ return kFixedRepeat + repeat_count - kFirstEncodableRepeatCount;
+ }
+
+ // Decodes repeat count from a fixed repeat bytecode.
+ static int DecodeFixedRepeatCount(int bytecode) {
+ DCHECK(IsInRange(bytecode, kFixedRepeat + 0,
+ kFixedRepeat + kNumberOfFixedRepeat));
+ return bytecode - kFixedRepeat + kFirstEncodableRepeatCount;
+ }
+
+ // Encodes repeat count into a serialized variable repeat count value.
+ static int EncodeVariableRepeatCount(int repeat_count) {
+ DCHECK_LE(kFirstEncodableVariableRepeatCount, repeat_count);
+ return repeat_count - kFirstEncodableVariableRepeatCount;
+ }
+
+ // Decodes repeat count from a serialized variable repeat count value.
+ static int DecodeVariableRepeatCount(int value) {
+ DCHECK_LE(0, value);
+ return value + kFirstEncodableVariableRepeatCount;
+ }
+
// ---------- member variable ----------
HotObjectsList hot_objects_;
};
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 50394ab843..e5b371ef28 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -5,7 +5,7 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h" // For Space::identity().
#include "src/interpreter/interpreter.h"
#include "src/objects/code.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -91,7 +91,7 @@ void Serializer::SerializeDeferredObjects() {
while (!deferred_objects_.empty()) {
HeapObject obj = deferred_objects_.back();
deferred_objects_.pop_back();
- ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
+ ObjectSerializer obj_serializer(this, obj, &sink_);
obj_serializer.SerializeDeferred();
}
sink_.Put(kSynchronize, "Finished with deferred objects");
@@ -110,7 +110,7 @@ void Serializer::SerializeRootObject(Object object) {
if (object->IsSmi()) {
PutSmi(Smi::cast(object));
} else {
- SerializeObject(HeapObject::cast(object), kPlain, kStartOfObject, 0);
+ SerializeObject(HeapObject::cast(object));
}
}
@@ -123,21 +123,18 @@ void Serializer::PrintStack() {
}
#endif // DEBUG
-bool Serializer::SerializeRoot(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+bool Serializer::SerializeRoot(HeapObject obj) {
RootIndex root_index;
// Derived serializers are responsible for determining if the root has
// actually been serialized before calling this.
if (root_index_map()->Lookup(obj, &root_index)) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ PutRoot(root_index, obj);
return true;
}
return false;
}
-bool Serializer::SerializeHotObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
+bool Serializer::SerializeHotObject(HeapObject obj) {
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
if (index == HotObjectsList::kNotFound) return false;
@@ -147,17 +144,12 @@ bool Serializer::SerializeHotObject(HeapObject obj, HowToCode how_to_code,
obj->ShortPrint();
PrintF("\n");
}
- if (skip != 0) {
- sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
- sink_.PutInt(skip, "HotObjectSkipDistance");
- } else {
- sink_.Put(kHotObject + index, "HotObject");
- }
+ // TODO(ishell): remove kHotObjectWithSkip
+ sink_.Put(kHotObject + index, "HotObject");
return true;
}
-bool Serializer::SerializeBackReference(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+bool Serializer::SerializeBackReference(HeapObject obj) {
SerializerReference reference =
reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()));
if (!reference.is_valid()) return false;
@@ -166,12 +158,11 @@ bool Serializer::SerializeBackReference(HeapObject obj, HowToCode how_to_code,
// offset fromthe start of the deserialized objects or as an offset
// backwards from thecurrent allocation pointer.
if (reference.is_attached_reference()) {
- FlushSkip(skip);
if (FLAG_trace_serializer) {
PrintF(" Encoding attached reference %d\n",
reference.attached_reference_index());
}
- PutAttachedReference(reference, how_to_code, where_to_point);
+ PutAttachedReference(reference);
} else {
DCHECK(reference.is_back_reference());
if (FLAG_trace_serializer) {
@@ -182,13 +173,7 @@ bool Serializer::SerializeBackReference(HeapObject obj, HowToCode how_to_code,
PutAlignmentPrefix(obj);
AllocationSpace space = reference.space();
- if (skip == 0) {
- sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
- } else {
- sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
- "BackRefWithSkip");
- sink_.PutInt(skip, "BackRefSkipDistance");
- }
+ sink_.Put(kBackref + space, "BackRef");
PutBackReference(obj, reference);
}
return true;
@@ -199,10 +184,7 @@ bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
-void Serializer::PutRoot(RootIndex root, HeapObject object,
- SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point,
- int skip) {
+void Serializer::PutRoot(RootIndex root, HeapObject object) {
int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
@@ -215,17 +197,12 @@ void Serializer::PutRoot(RootIndex root, HeapObject object,
STATIC_ASSERT(static_cast<int>(RootIndex::kArgumentsMarker) ==
kNumberOfRootArrayConstants - 1);
- if (how_to_code == kPlain && where_to_point == kStartOfObject &&
- root_index < kNumberOfRootArrayConstants && !Heap::InNewSpace(object)) {
- if (skip == 0) {
- sink_.Put(kRootArrayConstants + root_index, "RootConstant");
- } else {
- sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
- sink_.PutInt(skip, "SkipInPutRoot");
- }
+ // TODO(ulan): Check that it works with young large objects.
+ if (root_index < kNumberOfRootArrayConstants &&
+ !Heap::InYoungGeneration(object)) {
+ sink_.Put(kRootArrayConstants + root_index, "RootConstant");
} else {
- FlushSkip(skip);
- sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+ sink_.Put(kRootArray, "RootSerialization");
sink_.PutInt(root_index, "root_index");
hot_objects_.Add(object);
}
@@ -260,14 +237,9 @@ void Serializer::PutBackReference(HeapObject object,
hot_objects_.Add(object);
}
-void Serializer::PutAttachedReference(SerializerReference reference,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
+void Serializer::PutAttachedReference(SerializerReference reference) {
DCHECK(reference.is_attached_reference());
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kFromCode && where_to_point == kStartOfObject) ||
- (how_to_code == kFromCode && where_to_point == kInnerPointer));
- sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
+ sink_.Put(kAttachedReference, "AttachedRef");
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
@@ -287,6 +259,15 @@ void Serializer::PutNextChunk(int space) {
sink_.Put(space, "NextChunkSpace");
}
+void Serializer::PutRepeat(int repeat_count) {
+ if (repeat_count <= kLastEncodableFixedRepeatCount) {
+ sink_.Put(EncodeFixedRepeat(repeat_count), "FixedRepeat");
+ } else {
+ sink_.Put(kVariableRepeat, "VariableRepeat");
+ sink_.PutInt(EncodeVariableRepeatCount(repeat_count), "repeat count");
+ }
+}
+
void Serializer::Pad(int padding_offset) {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
@@ -327,21 +308,20 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
SerializerReference back_reference;
if (space == LO_SPACE) {
- sink_->Put(kNewObject + reference_representation_ + space,
- "NewLargeObject");
+ sink_->Put(kNewObject + space, "NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
CHECK(!object_->IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
back_reference = serializer_->allocator()->AllocateMap();
- sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
+ sink_->Put(kNewObject + space, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->allocator()->Allocate(space, size + fill);
- sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
+ sink_->Put(kNewObject + space, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
@@ -356,7 +336,7 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
back_reference);
// Serialize the map (first word of the object).
- serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
+ serializer_->SerializeObject(map);
}
int32_t Serializer::ObjectSerializer::SerializeBackingStore(
@@ -501,6 +481,7 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Output the rest of the imaginary string.
int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
+ DCHECK(IsAligned(bytes_to_output, kTaggedSize));
// Output raw data header. Do not bother with common raw length cases here.
sink_->Put(kVariableRawData, "RawDataForString");
@@ -546,7 +527,7 @@ class UnlinkWeakNextScope {
private:
HeapObject object_;
Object next_;
- DISALLOW_HEAP_ALLOCATION(no_gc_);
+ DISALLOW_HEAP_ALLOCATION(no_gc_)
};
void Serializer::ObjectSerializer::Serialize() {
@@ -652,8 +633,6 @@ void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
OutputCode(size);
// Then iterate references via reloc info.
object_->IterateBody(map, size, this);
- // Finally skip to the end.
- serializer_->FlushSkip(SkipTo(object_->address() + size));
} else {
// For other objects, iterate references first.
object_->IterateBody(map, size, this);
@@ -691,80 +670,67 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
while (current < end &&
(*current)->GetHeapObject(&current_contents, &reference_type)) {
RootIndex root_index;
+ // Compute repeat count and write repeat prefix if applicable.
// Repeats are not subject to the write barrier so we can only use
// immortal immovable root members. They are never in new space.
- if (current != start &&
+ MaybeObjectSlot repeat_end = current + 1;
+ if (repeat_end < end &&
serializer_->root_index_map()->Lookup(current_contents,
&root_index) &&
RootsTable::IsImmortalImmovable(root_index) &&
- *current == *(current - 1)) {
+ *current == *repeat_end) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
- DCHECK(!Heap::InNewSpace(current_contents));
- int repeat_count = 1;
- while (current + repeat_count < end - 1 &&
- *(current + repeat_count) == *current) {
- repeat_count++;
+ DCHECK(!Heap::InYoungGeneration(current_contents));
+ while (repeat_end < end && *repeat_end == *current) {
+ repeat_end++;
}
- current += repeat_count;
+ int repeat_count = static_cast<int>(repeat_end - current);
+ current = repeat_end;
bytes_processed_so_far_ += repeat_count * kTaggedSize;
- if (repeat_count > kNumberOfFixedRepeat) {
- sink_->Put(kVariableRepeat, "VariableRepeat");
- sink_->PutInt(repeat_count, "repeat count");
- } else {
- sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
- }
+ serializer_->PutRepeat(repeat_count);
} else {
- if (reference_type == HeapObjectReferenceType::WEAK) {
- sink_->Put(kWeakPrefix, "WeakReference");
- }
- serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
- 0);
bytes_processed_so_far_ += kTaggedSize;
++current;
}
+ // Now write the object itself.
+ if (reference_type == HeapObjectReferenceType::WEAK) {
+ sink_->Put(kWeakPrefix, "WeakReference");
+ }
+ serializer_->SerializeObject(current_contents);
}
}
}
void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
RelocInfo* rinfo) {
- int skip = SkipTo(rinfo->target_address_address());
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Object object = rinfo->target_object();
- serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
- kStartOfObject, skip);
+ serializer_->SerializeObject(HeapObject::cast(object));
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
Address* p) {
- int skip = SkipTo(reinterpret_cast<Address>(p));
Address target = *p;
auto encoded_reference = serializer_->EncodeExternalReference(target);
if (encoded_reference.is_from_api()) {
sink_->Put(kApiReference, "ApiRef");
} else {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->Put(kExternalReference, "ExternalRef");
}
- sink_->PutInt(skip, "SkipB4ExternalRef");
sink_->PutInt(encoded_reference.index(), "reference index");
bytes_processed_so_far_ += kSystemPointerSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(Code host,
RelocInfo* rinfo) {
- int skip = SkipTo(rinfo->target_address_address());
Address target = rinfo->target_external_reference();
auto encoded_reference = serializer_->EncodeExternalReference(target);
if (encoded_reference.is_from_api()) {
DCHECK(!rinfo->IsCodedSpecially());
sink_->Put(kApiReference, "ApiRef");
} else {
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- sink_->Put(kExternalReference + how_to_code + kStartOfObject,
- "ExternalRef");
+ sink_->Put(kExternalReference, "ExternalRef");
}
- sink_->PutInt(skip, "SkipB4ExternalRef");
DCHECK_NE(target, kNullAddress); // Code does not reference null.
sink_->PutInt(encoded_reference.index(), "reference index");
bytes_processed_so_far_ += rinfo->target_address_size();
@@ -772,38 +738,18 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
RelocInfo* rinfo) {
- // We do not use skip from last patched pc to find the pc to patch, since
- // target_address_address may not return addresses in ascending order when
- // used for internal references. External references may be stored at the
- // end of the code in the constant pool, whereas internal references are
- // inline. That would cause the skip to be negative. Instead, we store the
- // offset from code entry.
Address entry = Code::cast(object_)->entry();
- DCHECK_GE(rinfo->target_internal_reference_address(), entry);
- uintptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
- DCHECK_LE(pc_offset, Code::cast(object_)->raw_instruction_size());
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
DCHECK_LE(target_offset, Code::cast(object_)->raw_instruction_size());
- sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
- ? kInternalReference
- : kInternalReferenceEncoded,
- "InternalRef");
- sink_->PutInt(pc_offset, "internal ref address");
+ sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
void Serializer::ObjectSerializer::VisitRuntimeEntry(Code host,
RelocInfo* rinfo) {
- int skip = SkipTo(rinfo->target_address_address());
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- Address target = rinfo->target_address();
- auto encoded_reference = serializer_->EncodeExternalReference(target);
- DCHECK(!encoded_reference.is_from_api());
- sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- sink_->PutInt(encoded_reference.index(), "reference index");
- bytes_processed_so_far_ += rinfo->target_address_size();
+ // We no longer serialize code that contains runtime entries.
+ UNREACHABLE();
}
void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
@@ -817,53 +763,46 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
Code target = InstructionStream::TryLookupCode(serializer_->isolate(), addr);
CHECK(Builtins::IsIsolateIndependentBuiltin(target));
- int skip = SkipTo(rinfo->target_address_address());
sink_->Put(kOffHeapTarget, "OffHeapTarget");
- sink_->PutInt(skip, "SkipB4OffHeapTarget");
sink_->PutInt(target->builtin_index(), "builtin index");
bytes_processed_so_far_ += rinfo->target_address_size();
}
-namespace {
-
-class CompareRelocInfo {
- public:
- bool operator()(RelocInfo x, RelocInfo y) {
- // Everything that does not use target_address_address will compare equal.
- Address x_num = 0;
- Address y_num = 0;
- if (x.HasTargetAddressAddress()) x_num = x.target_address_address();
- if (y.HasTargetAddressAddress()) y_num = y.target_address_address();
- return x_num > y_num;
- }
-};
-
-} // namespace
-
-void Serializer::ObjectSerializer::VisitRelocInfo(RelocIterator* it) {
- std::priority_queue<RelocInfo, std::vector<RelocInfo>, CompareRelocInfo>
- reloc_queue;
- for (; !it->done(); it->next()) {
- reloc_queue.push(*it->rinfo());
- }
- while (!reloc_queue.empty()) {
- RelocInfo rinfo = reloc_queue.top();
- reloc_queue.pop();
- rinfo.Visit(this);
- }
-}
-
void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
RelocInfo* rinfo) {
#ifdef V8_TARGET_ARCH_ARM
DCHECK(!RelocInfo::IsRelativeCodeTarget(rinfo->rmode()));
#endif
- int skip = SkipTo(rinfo->target_address_address());
Code object = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
+ serializer_->SerializeObject(object);
bytes_processed_so_far_ += rinfo->target_address_size();
}
+namespace {
+
+// Similar to OutputRawData, but substitutes the given field with the given
+// value instead of reading it from the object.
+void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
+ int written_so_far, int bytes_to_write,
+ int field_offset, int field_size,
+ const byte* field_value) {
+ int offset = field_offset - written_so_far;
+ if (0 <= offset && offset < bytes_to_write) {
+ DCHECK_GE(bytes_to_write, offset + field_size);
+ sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far), offset,
+ "Bytes");
+ sink->PutRaw(field_value, field_size, "Bytes");
+ written_so_far += offset + field_size;
+ bytes_to_write -= offset + field_size;
+ sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far),
+ bytes_to_write, "Bytes");
+ } else {
+ sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far),
+ bytes_to_write, "Bytes");
+ }
+}
+} // anonymous namespace
+
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
Address object_start = object_->address();
int base = bytes_processed_so_far_;
@@ -888,21 +827,21 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
reinterpret_cast<void*>(object_start + base), bytes_to_output);
#endif // MEMORY_SANITIZER
if (object_->IsBytecodeArray()) {
- // The code age byte can be changed concurrently by GC.
- const int bytes_to_age_byte = BytecodeArray::kBytecodeAgeOffset - base;
- if (0 <= bytes_to_age_byte && bytes_to_age_byte < bytes_to_output) {
- sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
- bytes_to_age_byte, "Bytes");
- byte bytecode_age = BytecodeArray::kNoAgeBytecodeAge;
- sink_->PutRaw(&bytecode_age, 1, "Bytes");
- const int bytes_written = bytes_to_age_byte + 1;
- sink_->PutRaw(
- reinterpret_cast<byte*>(object_start + base + bytes_written),
- bytes_to_output - bytes_written, "Bytes");
- } else {
- sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
- bytes_to_output, "Bytes");
- }
+ // The bytecode age field can be changed by GC concurrently.
+ byte field_value = BytecodeArray::kNoAgeBytecodeAge;
+ OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
+ BytecodeArray::kBytecodeAgeOffset,
+ sizeof(field_value), &field_value);
+ } else if (object_->IsDescriptorArray()) {
+ // The number of marked descriptors field can be changed by GC
+ // concurrently.
+ byte field_value[2];
+ field_value[0] = 0;
+ field_value[1] = 0;
+ OutputRawWithCustomField(
+ sink_, object_start, base, bytes_to_output,
+ DescriptorArray::kRawNumberOfMarkedDescriptorsOffset,
+ sizeof(field_value), field_value);
} else {
sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
bytes_to_output, "Bytes");
@@ -910,18 +849,6 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
}
-int Serializer::ObjectSerializer::SkipTo(Address to) {
- Address object_start = object_->address();
- int up_to_offset = static_cast<int>(to - object_start);
- int to_skip = up_to_offset - bytes_processed_so_far_;
- bytes_processed_so_far_ += to_skip;
- // This assert will fail if the reloc info gives us the target_address_address
- // locations in a non-ascending order. We make sure this doesn't happen by
- // sorting the relocation info.
- DCHECK_GE(to_skip, 0);
- return to_skip;
-}
-
void Serializer::ObjectSerializer::OutputCode(int size) {
DCHECK_EQ(kTaggedSize, bytes_processed_so_far_);
Code on_heap_code = Code::cast(object_);
@@ -950,6 +877,7 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
Address start = off_heap_code->address() + Code::kDataStart;
int bytes_to_output = size - Code::kDataStart;
+ DCHECK(IsAligned(bytes_to_output, kTaggedSize));
sink_->Put(kVariableRawCode, "VariableRawCode");
sink_->PutInt(bytes_to_output, "length");
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 9f37d6ffd9..9f7c0ac90c 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -191,8 +191,7 @@ class Serializer : public SerializerDeserializer {
};
void SerializeDeferredObjects();
- virtual void SerializeObject(HeapObject o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) = 0;
+ virtual void SerializeObject(HeapObject o) = 0;
virtual bool MustBeDeferred(HeapObject object);
@@ -200,40 +199,27 @@ class Serializer : public SerializerDeserializer {
FullObjectSlot start, FullObjectSlot end) override;
void SerializeRootObject(Object object);
- void PutRoot(RootIndex root_index, HeapObject object, HowToCode how,
- WhereToPoint where, int skip);
+ void PutRoot(RootIndex root_index, HeapObject object);
void PutSmi(Smi smi);
void PutBackReference(HeapObject object, SerializerReference reference);
- void PutAttachedReference(SerializerReference reference,
- HowToCode how_to_code, WhereToPoint where_to_point);
+ void PutAttachedReference(SerializerReference reference);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject object);
void PutNextChunk(int space);
+ void PutRepeat(int repeat_count);
// Returns true if the object was successfully serialized as a root.
- bool SerializeRoot(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeRoot(HeapObject obj);
// Returns true if the object was successfully serialized as hot object.
- bool SerializeHotObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeHotObject(HeapObject obj);
// Returns true if the object was successfully serialized as back reference.
- bool SerializeBackReference(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeBackReference(HeapObject obj);
// Returns true if the given heap object is a bytecode handler code object.
bool ObjectIsBytecodeHandler(HeapObject obj) const;
- static inline void FlushSkip(SnapshotByteSink* sink, int skip) {
- if (skip != 0) {
- sink->Put(kSkip, "SkipFromSerializeObject");
- sink->PutInt(skip, "SkipDistanceFromSerializeObject");
- }
- }
-
- inline void FlushSkip(int skip) { FlushSkip(&sink_, skip); }
-
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
@@ -303,12 +289,10 @@ class RelocInfoIterator;
class Serializer::ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer, HeapObject obj,
- SnapshotByteSink* sink, HowToCode how_to_code,
- WhereToPoint where_to_point)
+ SnapshotByteSink* sink)
: serializer_(serializer),
object_(obj),
sink_(sink),
- reference_representation_(how_to_code + where_to_point),
bytes_processed_so_far_(0) {
#ifdef DEBUG
serializer_->PushStack(obj);
@@ -334,8 +318,6 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitCodeTarget(Code host, RelocInfo* target) override;
void VisitRuntimeEntry(Code host, RelocInfo* reloc) override;
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
- // Relocation info needs to be visited sorted by target_address_address.
- void VisitRelocInfo(RelocIterator* it) override;
private:
void SerializePrologue(AllocationSpace space, int size, Map map);
@@ -345,7 +327,6 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void SerializeContent(Map map, int size);
void OutputRawData(Address up_to);
void OutputCode(int size);
- int SkipTo(Address to);
int32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
@@ -355,7 +336,6 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
Serializer* serializer_;
HeapObject object_;
SnapshotByteSink* sink_;
- int reference_representation_;
int bytes_processed_so_far_;
};
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 8ee14aac9d..8d65235902 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/counters.h"
#include "src/snapshot/partial-deserializer.h"
+#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/version.h"
@@ -44,10 +45,12 @@ bool Snapshot::Initialize(Isolate* isolate) {
SnapshotData startup_snapshot_data(startup_data);
Vector<const byte> read_only_data = ExtractReadOnlyData(blob);
SnapshotData read_only_snapshot_data(read_only_data);
- StartupDeserializer deserializer(&startup_snapshot_data,
- &read_only_snapshot_data);
- deserializer.SetRehashability(ExtractRehashability(blob));
- bool success = isolate->Init(&deserializer);
+ StartupDeserializer startup_deserializer(&startup_snapshot_data);
+ ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot_data);
+ startup_deserializer.SetRehashability(ExtractRehashability(blob));
+ read_only_deserializer.SetRehashability(ExtractRehashability(blob));
+ bool success =
+ isolate->InitWithSnapshot(&read_only_deserializer, &startup_deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = startup_data.length();
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 04e575fed6..f5b35b174e 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -38,7 +38,7 @@ class SnapshotByteSource final {
void Advance(int by) { position_ += by; }
- void CopyRaw(byte* to, int number_of_bytes) {
+ void CopyRaw(void* to, int number_of_bytes) {
memcpy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 582c105ccf..65def345ce 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -7,8 +7,8 @@
#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/snapshot.h"
+#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -16,10 +16,6 @@ namespace internal {
void StartupDeserializer::DeserializeInto(Isolate* isolate) {
Initialize(isolate);
- ReadOnlyDeserializer read_only_deserializer(read_only_data_);
- read_only_deserializer.SetRehashability(can_rehash());
- read_only_deserializer.DeserializeInto(isolate);
-
if (!allocator()->ReserveSpace()) {
V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
}
@@ -63,8 +59,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
LogNewMapEvents();
if (FLAG_rehash_snapshot && can_rehash()) {
- isolate->heap()->InitializeHashSeed();
- read_only_deserializer.RehashHeap();
+ // Hash seed was initalized in ReadOnlyDeserializer.
Rehash();
}
}
@@ -77,7 +72,7 @@ void StartupDeserializer::FlushICache() {
DCHECK(!deserializing_user_code());
// The entire isolate is newly deserialized. Simply flush all code pages.
for (Page* p : *isolate()->heap()->code_space()) {
- Assembler::FlushICache(p->area_start(), p->area_end() - p->area_start());
+ FlushInstructionCache(p->area_start(), p->area_end() - p->area_start());
}
}
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index cfe89f01e2..375e93054e 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -14,9 +14,8 @@ namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer {
public:
- StartupDeserializer(const SnapshotData* startup_data,
- const SnapshotData* read_only_data)
- : Deserializer(startup_data, false), read_only_data_(read_only_data) {}
+ explicit StartupDeserializer(const SnapshotData* startup_data)
+ : Deserializer(startup_data, false) {}
// Deserialize the snapshot into an empty heap.
void DeserializeInto(Isolate* isolate);
@@ -24,8 +23,6 @@ class StartupDeserializer final : public Deserializer {
private:
void FlushICache();
void LogNewMapEvents();
-
- const SnapshotData* read_only_data_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 2e64423ea5..79b6fe5b72 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -7,7 +7,9 @@
#include "src/api.h"
#include "src/code-tracer.h"
#include "src/contexts.h"
+#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/slots.h"
@@ -30,42 +32,53 @@ StartupSerializer::~StartupSerializer() {
OutputStatistics("StartupSerializer");
}
+#ifdef DEBUG
namespace {
-// Due to how we currently create the embedded blob, we may encounter both
-// off-heap trampolines and old, outdated full Code objects during
-// serialization. This ensures that we only serialize the canonical version of
-// each builtin.
-// See also CreateOffHeapTrampolines().
-HeapObject MaybeCanonicalizeBuiltin(Isolate* isolate, HeapObject obj) {
- if (!obj->IsCode()) return obj;
+bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
+ if (!obj->IsCode()) return false;
- const int builtin_index = Code::cast(obj)->builtin_index();
- if (!Builtins::IsBuiltinId(builtin_index)) return obj;
+ Code code = Code::cast(obj);
- return isolate->builtins()->builtin(builtin_index);
+ // TODO(v8:8768): Deopt entry code should not be serialized.
+ if (code->kind() == Code::STUB && isolate->deoptimizer_data() != nullptr) {
+ if (isolate->deoptimizer_data()->IsDeoptEntryCode(code)) return false;
+ }
+
+ if (code->kind() == Code::REGEXP) return false;
+ if (!code->is_builtin()) return true;
+ if (!FLAG_embedded_builtins) return false;
+ if (code->is_off_heap_trampoline()) return false;
+
+ // An on-heap builtin. We only expect this for the interpreter entry
+ // trampoline copy stored on the root list and transitively called builtins.
+ // See Heap::interpreter_entry_trampoline_for_profiling.
+
+ switch (code->builtin_index()) {
+ case Builtins::kAbort:
+ case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
+ case Builtins::kInterpreterEntryTrampoline:
+ case Builtins::kRecordWrite:
+ return false;
+ default:
+ return true;
+ }
+
+ UNREACHABLE();
}
} // namespace
+#endif // DEBUG
-void StartupSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
+void StartupSerializer::SerializeObject(HeapObject obj) {
DCHECK(!obj->IsJSFunction());
+ DCHECK(!IsUnexpectedCodeObject(isolate(), obj));
- // TODO(jgruber): Remove canonicalization once off-heap trampoline creation
- // moves to Isolate::Init().
- obj = MaybeCanonicalizeBuiltin(isolate(), obj);
-
- if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- if (IsRootAndHasBeenSerialized(obj) &&
- SerializeRoot(obj, how_to_code, where_to_point, skip))
- return;
- if (SerializeUsingReadOnlyObjectCache(&sink_, obj, how_to_code,
- where_to_point, skip))
- return;
- if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeHotObject(obj)) return;
+ if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) return;
+ if (SerializeUsingReadOnlyObjectCache(&sink_, obj)) return;
+ if (SerializeBackReference(obj)) return;
- FlushSkip(skip);
bool use_simulator = false;
#ifdef USE_SIMULATOR
use_simulator = true;
@@ -98,9 +111,8 @@ void StartupSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
- DCHECK(!isolate()->heap()->read_only_space()->Contains(obj));
- ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
- where_to_point);
+ DCHECK(!isolate()->heap()->InReadOnlySpace(obj));
+ ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
}
@@ -143,20 +155,14 @@ SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
}
bool StartupSerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- return read_only_serializer_->SerializeUsingReadOnlyObjectCache(
- sink, obj, how_to_code, where_to_point, skip);
+ SnapshotByteSink* sink, HeapObject obj) {
+ return read_only_serializer_->SerializeUsingReadOnlyObjectCache(sink, obj);
}
void StartupSerializer::SerializeUsingPartialSnapshotCache(
- SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- FlushSkip(sink, skip);
-
+ SnapshotByteSink* sink, HeapObject obj) {
int cache_index = SerializeInObjectCache(obj);
- sink->Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
+ sink->Put(kPartialSnapshotCache, "PartialSnapshotCache");
sink->PutInt(cache_index, "partial_snapshot_cache_index");
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 31c3081103..13638eae5e 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -33,20 +33,16 @@ class StartupSerializer : public RootsSerializer {
// read-only object cache if not already present and emits a
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
- bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink, HeapObject obj,
- HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
+ HeapObject obj);
// Adds |obj| to the partial snapshot object cache if not already present and
// emits a PartialSnapshotCache bytecode into |sink|.
void SerializeUsingPartialSnapshotCache(SnapshotByteSink* sink,
- HeapObject obj, HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip);
+ HeapObject obj);
private:
- void SerializeObject(HeapObject o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject o) override;
ReadOnlySerializer* read_only_serializer_;
std::vector<AccessorInfo> accessor_infos_;
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index 22ab4dfe11..11925627d6 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -48,11 +48,12 @@ void SubtractFromEntry(PositionTableEntry& value,
// Helper: Encode an integer.
template <typename T>
void EncodeInt(std::vector<byte>& bytes, T value) {
+ typedef typename std::make_unsigned<T>::type unsigned_type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
- value = ((value << 1) ^ (value >> kShift));
+ value = ((static_cast<unsigned_type>(value) << 1) ^ (value >> kShift));
DCHECK_GE(value, 0);
- auto encoded = static_cast<typename std::make_unsigned<T>::type>(value);
+ unsigned_type encoded = static_cast<unsigned_type>(value);
bool more;
do {
more = encoded > ValueBits::kMax;
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 8f676dc0f3..d59172d87f 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -44,11 +44,11 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
OwnedVector<byte> ToSourcePositionTableVector();
+ inline bool Omit() const { return mode_ == OMIT_SOURCE_POSITIONS; }
+
private:
void AddEntry(const PositionTableEntry& entry);
- inline bool Omit() const { return mode_ == OMIT_SOURCE_POSITIONS; }
-
RecordingMode mode_;
std::vector<byte> bytes_;
#ifdef ENABLE_SLOW_DCHECKS
@@ -119,7 +119,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
int index_ = 0;
PositionTableEntry current_;
IterationFilter filter_;
- DISALLOW_HEAP_ALLOCATION(no_gc);
+ DISALLOW_HEAP_ALLOCATION(no_gc)
};
} // namespace internal
diff --git a/deps/v8/src/string-builder-inl.h b/deps/v8/src/string-builder-inl.h
index 0c3f83b2d4..8442bbd455 100644
--- a/deps/v8/src/string-builder-inl.h
+++ b/deps/v8/src/string-builder-inl.h
@@ -65,6 +65,7 @@ class ReplacementStringBuilder {
ReplacementStringBuilder(Heap* heap, Handle<String> subject,
int estimated_part_count);
+ // Caution: Callers must ensure the builder has enough capacity.
static inline void AddSubjectSlice(FixedArrayBuilder* builder, int from,
int to) {
DCHECK_GE(from, 0);
@@ -82,9 +83,8 @@ class ReplacementStringBuilder {
}
}
- void EnsureCapacity(int elements);
-
void AddSubjectSlice(int from, int to) {
+ EnsureCapacity(2); // Subject slices are encoded with up to two smis.
AddSubjectSlice(&array_builder_, from, to);
IncrementCharacterCount(to - from);
}
@@ -103,7 +103,8 @@ class ReplacementStringBuilder {
}
private:
- void AddElement(Object element);
+ void AddElement(Handle<Object> element);
+ void EnsureCapacity(int elements);
Heap* heap_;
FixedArrayBuilder array_builder_;
@@ -206,7 +207,7 @@ class IncrementalStringBuilder {
private:
DestChar* start_;
DestChar* cursor_;
- DISALLOW_HEAP_ALLOCATION(no_gc_);
+ DISALLOW_HEAP_ALLOCATION(no_gc_)
};
template <typename DestChar>
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 57571a11a1..0c48681bf7 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -88,7 +88,7 @@ int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
String element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (*one_byte && !element->HasOnlyOneByteChars()) {
+ if (*one_byte && !element->IsOneByteRepresentation()) {
*one_byte = false;
}
} else {
@@ -141,7 +141,6 @@ void FixedArrayBuilder::EnsureCapacity(Isolate* isolate, int elements) {
void FixedArrayBuilder::Add(Object value) {
DCHECK(!value->IsSmi());
- DCHECK(length_ < capacity());
array_->set(length_, value);
length_++;
has_non_smi_elements_ = true;
@@ -149,7 +148,6 @@ void FixedArrayBuilder::Add(Object value) {
void FixedArrayBuilder::Add(Smi value) {
DCHECK(value->IsSmi());
- DCHECK(length_ < capacity());
array_->set(length_, value);
length_++;
}
@@ -166,7 +164,7 @@ ReplacementStringBuilder::ReplacementStringBuilder(Heap* heap,
Handle<String> subject,
int estimated_part_count)
: heap_(heap),
- array_builder_(heap->isolate(), estimated_part_count),
+ array_builder_(Isolate::FromHeap(heap), estimated_part_count),
subject_(subject),
character_count_(0),
is_one_byte_(subject->IsOneByteRepresentation()) {
@@ -176,13 +174,13 @@ ReplacementStringBuilder::ReplacementStringBuilder(Heap* heap,
}
void ReplacementStringBuilder::EnsureCapacity(int elements) {
- array_builder_.EnsureCapacity(heap_->isolate(), elements);
+ array_builder_.EnsureCapacity(Isolate::FromHeap(heap_), elements);
}
void ReplacementStringBuilder::AddString(Handle<String> string) {
int length = string->length();
DCHECK_GT(length, 0);
- AddElement(*string);
+ AddElement(string);
if (!string->IsOneByteRepresentation()) {
is_one_byte_ = false;
}
@@ -190,7 +188,7 @@ void ReplacementStringBuilder::AddString(Handle<String> string) {
}
MaybeHandle<String> ReplacementStringBuilder::ToString() {
- Isolate* isolate = heap_->isolate();
+ Isolate* isolate = Isolate::FromHeap(heap_);
if (array_builder_.length() == 0) {
return isolate->factory()->empty_string();
}
@@ -223,10 +221,11 @@ MaybeHandle<String> ReplacementStringBuilder::ToString() {
return joined_string;
}
-void ReplacementStringBuilder::AddElement(Object element) {
+void ReplacementStringBuilder::AddElement(Handle<Object> element) {
DCHECK(element->IsSmi() || element->IsString());
- DCHECK(array_builder_.capacity() > array_builder_.length());
- array_builder_.Add(element);
+ EnsureCapacity(1);
+ DisallowHeapAllocation no_gc;
+ array_builder_.Add(*element);
}
IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
diff --git a/deps/v8/src/string-case.cc b/deps/v8/src/string-case.cc
index f1a7f9e979..e9004a37ee 100644
--- a/deps/v8/src/string-case.cc
+++ b/deps/v8/src/string-case.cc
@@ -12,6 +12,11 @@
namespace v8 {
namespace internal {
+// FastAsciiConvert tries to do character processing on a word_t basis if
+// source and destination strings are properly aligned. Natural alignment of
+// string data depends on kTaggedSize so we define word_t via Tagged_t.
+using word_t = std::make_unsigned<Tagged_t>::type;
+
#ifdef DEBUG
bool CheckFastAsciiConvert(char* dst, const char* src, int length, bool changed,
bool is_to_lower) {
@@ -31,8 +36,8 @@ bool CheckFastAsciiConvert(char* dst, const char* src, int length, bool changed,
}
#endif
-const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-const uintptr_t kAsciiMask = kOneInEveryByte << 7;
+const word_t kOneInEveryByte = static_cast<word_t>(kUintptrAllBitsSet) / 0xFF;
+const word_t kAsciiMask = kOneInEveryByte << 7;
// Given a word and two range boundaries returns a word with high bit
// set in every byte iff the corresponding input byte was strictly in
@@ -41,14 +46,14 @@ const uintptr_t kAsciiMask = kOneInEveryByte << 7;
// boundaries are statically known.
// Requires: all bytes in the input word and the boundaries must be
// ASCII (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
+static inline word_t AsciiRangeMask(word_t w, char m, char n) {
// Use strict inequalities since in edge cases the function could be
// further simplified.
DCHECK(0 < m && m < n);
// Has high bit set in every w byte less than n.
- uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
+ word_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
// Has high bit set in every w byte greater than m.
- uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
+ word_t tmp2 = w + kOneInEveryByte * (0x7F - m);
return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
}
@@ -70,34 +75,34 @@ int FastAsciiConvert(char* dst, const char* src, int length,
const char* const limit = src + length;
// dst is newly allocated and always aligned.
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+ DCHECK(IsAligned(reinterpret_cast<Address>(dst), sizeof(word_t)));
// Only attempt processing one word at a time if src is also aligned.
- if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+ if (IsAligned(reinterpret_cast<Address>(src), sizeof(word_t))) {
// Process the prefix of the input that requires no conversion one aligned
// (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ while (src <= limit - sizeof(word_t)) {
+ const word_t w = *reinterpret_cast<const word_t*>(src);
if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
break;
}
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
+ *reinterpret_cast<word_t*>(dst) = w;
+ src += sizeof(word_t);
+ dst += sizeof(word_t);
}
// Process the remainder of the input performing conversion when
// required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ while (src <= limit - sizeof(word_t)) {
+ const word_t w = *reinterpret_cast<const word_t*>(src);
if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
- uintptr_t m = AsciiRangeMask(w, lo, hi);
+ word_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
// conversion and we know that the distance between cases is
// 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
+ *reinterpret_cast<word_t*>(dst) = w ^ (m >> 2);
+ src += sizeof(word_t);
+ dst += sizeof(word_t);
}
}
// Process the last few bytes of the input (or the whole input if
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index ea73139813..945a113704 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -11,6 +11,7 @@
#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/prototype.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -189,17 +190,18 @@ void StringStream::PrintObject(Object o) {
if (o->IsHeapObject() && object_print_mode_ == kPrintObjectVerbose) {
// TODO(delphick): Consider whether we can get the isolate without using
// TLS.
+ Isolate* isolate = Isolate::Current();
DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
+ isolate->string_stream_debug_object_cache();
for (size_t i = 0; i < debug_object_cache->size(); i++) {
- if ((*debug_object_cache)[i] == o) {
+ if (*(*debug_object_cache)[i] == o) {
Add("#%d#", static_cast<int>(i));
return;
}
}
if (debug_object_cache->size() < kMentionedObjectCacheMaxSize) {
Add("#%d#", static_cast<int>(debug_object_cache->size()));
- debug_object_cache->push_back(HeapObject::cast(o));
+ debug_object_cache->push_back(handle(HeapObject::cast(o), isolate));
} else {
Add("@%p", o);
}
@@ -363,7 +365,7 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (size_t i = 0; i < debug_object_cache->size(); i++) {
- HeapObject printee = (*debug_object_cache)[i];
+ HeapObject printee = *(*debug_object_cache)[i];
Add(" #%d# %p: ", static_cast<int>(i),
reinterpret_cast<void*>(printee->ptr()));
printee->ShortPrint(this);
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index dea31e0b3d..de4fd9e61d 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -6,6 +6,7 @@
#define V8_STRING_STREAM_H_
#include "src/allocation.h"
+#include "src/base/small-vector.h"
#include "src/handles.h"
#include "src/objects/heap-object.h"
#include "src/vector.h"
@@ -56,6 +57,34 @@ class FixedStringAllocator final : public StringAllocator {
DISALLOW_COPY_AND_ASSIGN(FixedStringAllocator);
};
+template <std::size_t kInlineSize>
+class SmallStringOptimizedAllocator final : public StringAllocator {
+ public:
+ typedef base::SmallVector<char, kInlineSize> SmallVector;
+
+ explicit SmallStringOptimizedAllocator(SmallVector* vector) V8_NOEXCEPT
+ : vector_(vector) {}
+
+ char* allocate(unsigned bytes) override {
+ vector_->resize_no_init(bytes);
+ return vector_->data();
+ }
+
+ char* grow(unsigned* bytes) override {
+ unsigned new_bytes = *bytes * 2;
+ // Check for overflow.
+ if (new_bytes <= *bytes) {
+ return vector_->data();
+ }
+ vector_->resize_no_init(new_bytes);
+ *bytes = new_bytes;
+ return vector_->data();
+ }
+
+ private:
+ SmallVector* vector_;
+};
+
class StringStream final {
class FmtElm final {
public:
@@ -105,8 +134,8 @@ class StringStream final {
public:
enum ObjectPrintMode { kPrintObjectConcise, kPrintObjectVerbose };
- StringStream(StringAllocator* allocator,
- ObjectPrintMode object_print_mode = kPrintObjectVerbose)
+ explicit StringStream(StringAllocator* allocator,
+ ObjectPrintMode object_print_mode = kPrintObjectVerbose)
: allocator_(allocator),
object_print_mode_(object_print_mode),
capacity_(kInitialCapacity),
diff --git a/deps/v8/src/thread-id.cc b/deps/v8/src/thread-id.cc
index 3b89f16ef6..a0585b3a41 100644
--- a/deps/v8/src/thread-id.cc
+++ b/deps/v8/src/thread-id.cc
@@ -9,12 +9,12 @@
namespace v8 {
namespace internal {
-base::Atomic32 ThreadId::highest_thread_id_ = 0;
-
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey, GetThreadIdKey,
- base::Thread::CreateThreadLocalKey());
+ base::Thread::CreateThreadLocalKey())
+
+std::atomic<int> next_thread_id{1};
} // namespace
@@ -26,10 +26,12 @@ ThreadId ThreadId::TryGetCurrent() {
// static
int ThreadId::GetCurrentThreadId() {
- int thread_id = base::Thread::GetThreadLocalInt(*GetThreadIdKey());
+ auto key = *GetThreadIdKey();
+ int thread_id = base::Thread::GetThreadLocalInt(key);
if (thread_id == 0) {
- thread_id = AllocateThreadId();
- base::Thread::SetThreadLocalInt(*GetThreadIdKey(), thread_id);
+ thread_id = next_thread_id.fetch_add(1);
+ CHECK_LE(1, thread_id);
+ base::Thread::SetThreadLocalInt(key, thread_id);
}
return thread_id;
}
diff --git a/deps/v8/src/thread-id.h b/deps/v8/src/thread-id.h
index 437109b839..3eef7d5876 100644
--- a/deps/v8/src/thread-id.h
+++ b/deps/v8/src/thread-id.h
@@ -5,7 +5,9 @@
#ifndef V8_THREAD_ID_H_
#define V8_THREAD_ID_H_
-#include "src/base/atomicops.h"
+#include <atomic>
+
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
@@ -14,10 +16,12 @@ namespace internal {
class ThreadId {
public:
// Creates an invalid ThreadId.
- ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
+ ThreadId() : ThreadId(kInvalidId) {}
+
+ ThreadId(const ThreadId& other) V8_NOEXCEPT : ThreadId(other.ToInteger()) {}
ThreadId& operator=(const ThreadId& other) V8_NOEXCEPT {
- base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
+ id_.store(other.ToInteger(), std::memory_order_relaxed);
return *this;
}
@@ -34,37 +38,28 @@ class ThreadId {
// Compares ThreadIds for equality.
V8_INLINE bool Equals(const ThreadId& other) const {
- return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
+ return ToInteger() == other.ToInteger();
}
// Checks whether this ThreadId refers to any thread.
- V8_INLINE bool IsValid() const {
- return base::Relaxed_Load(&id_) != kInvalidId;
- }
+ V8_INLINE bool IsValid() const { return ToInteger() != kInvalidId; }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::GetCurrentThreadId).
- int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
+ int ToInteger() const { return id_.load(std::memory_order_relaxed); }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::TerminateExecution).
static ThreadId FromInteger(int id) { return ThreadId(id); }
private:
- static const int kInvalidId = -1;
+ static constexpr int kInvalidId = -1;
- explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
-
- static int AllocateThreadId() {
- int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
- return new_id;
- }
+ explicit ThreadId(int id) { id_.store(id, std::memory_order_relaxed); }
static int GetCurrentThreadId();
- base::Atomic32 id_;
-
- static base::Atomic32 highest_thread_id_;
+ std::atomic<int> id_;
};
} // namespace internal
diff --git a/deps/v8/src/thread-local-top.cc b/deps/v8/src/thread-local-top.cc
new file mode 100644
index 0000000000..10cd6ea3a9
--- /dev/null
+++ b/deps/v8/src/thread-local-top.cc
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/thread-local-top.h"
+#include "src/isolate.h"
+#include "src/simulator.h"
+#include "src/trap-handler/trap-handler.h"
+
+namespace v8 {
+namespace internal {
+
+void ThreadLocalTop::Initialize(Isolate* isolate) {
+ *this = ThreadLocalTop();
+ isolate_ = isolate;
+#ifdef USE_SIMULATOR
+ simulator_ = Simulator::current(isolate);
+#endif
+ thread_id_ = ThreadId::Current();
+ thread_in_wasm_flag_address_ = reinterpret_cast<Address>(
+ trap_handler::GetThreadInWasmThreadLocalAddress());
+}
+
+void ThreadLocalTop::Free() {
+ // Match unmatched PopPromise calls.
+ while (promise_on_stack_) isolate_->PopPromise();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/thread-local-top.h b/deps/v8/src/thread-local-top.h
new file mode 100644
index 0000000000..dd99221537
--- /dev/null
+++ b/deps/v8/src/thread-local-top.h
@@ -0,0 +1,122 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_THREAD_LOCAL_TOP_H_
+#define V8_THREAD_LOCAL_TOP_H_
+
+#include "src/contexts.h"
+#include "src/globals.h"
+#include "src/thread-id.h"
+
+namespace v8 {
+
+class TryCatch;
+
+namespace internal {
+
+class ExternalCallbackScope;
+class Isolate;
+class PromiseOnStack;
+class Simulator;
+
+class ThreadLocalTop {
+ public:
+ // TODO(all): This is not particularly beautiful. We should probably
+ // refactor this to really consist of just Addresses and 32-bit
+ // integer fields.
+ static constexpr uint32_t kSizeInBytes = 23 * kSystemPointerSize;
+
+ // Does early low-level initialization that does not depend on the
+ // isolate being present.
+ ThreadLocalTop() = default;
+
+ // Initialize the thread data.
+ void Initialize(Isolate*);
+
+ // The top C++ try catch handler or nullptr if none are registered.
+ //
+ // This field is not guaranteed to hold an address that can be
+ // used for comparison with addresses into the JS stack. If such
+ // an address is needed, use try_catch_handler_address.
+ v8::TryCatch* try_catch_handler_ = nullptr;
+
+ // Get the address of the top C++ try catch handler or nullptr if
+ // none are registered.
+ //
+ // This method always returns an address that can be compared to
+ // pointers into the JavaScript stack. When running on actual
+ // hardware, try_catch_handler_address and TryCatchHandler return
+ // the same pointer. When running on a simulator with a separate JS
+ // stack, try_catch_handler_address returns a JS stack address that
+ // corresponds to the place on the JS stack where the C++ handler
+ // would have been if the stack were not separate.
+ Address try_catch_handler_address() {
+ return reinterpret_cast<Address>(
+ v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
+ }
+
+ void Free();
+
+ Isolate* isolate_ = nullptr;
+ // The context where the current execution method is created and for variable
+ // lookups.
+ // TODO(3770): This field is read/written from generated code, so it would
+ // be cleaner to make it an "Address raw_context_", and construct a Context
+ // object in the getter. Same for {pending_handler_context_} below. In the
+ // meantime, assert that the memory layout is the same.
+ STATIC_ASSERT(sizeof(Context) == kSystemPointerSize);
+ Context context_;
+ ThreadId thread_id_ = ThreadId::Invalid();
+ Object pending_exception_;
+
+ // Communication channel between Isolate::FindHandler and the CEntry.
+ Context pending_handler_context_;
+ Address pending_handler_entrypoint_ = kNullAddress;
+ Address pending_handler_constant_pool_ = kNullAddress;
+ Address pending_handler_fp_ = kNullAddress;
+ Address pending_handler_sp_ = kNullAddress;
+
+ // Communication channel between Isolate::Throw and message consumers.
+ Object pending_message_obj_;
+ bool rethrowing_message_ = false;
+
+ // Use a separate value for scheduled exceptions to preserve the
+ // invariants that hold about pending_exception. We may want to
+ // unify them later.
+ bool external_caught_exception_ = false;
+ Object scheduled_exception_;
+
+ // Stack.
+ // The frame pointer of the top c entry frame.
+ Address c_entry_fp_ = kNullAddress;
+ // Try-blocks are chained through the stack.
+ Address handler_ = kNullAddress;
+ // C function that was called at c entry.
+ Address c_function_ = kNullAddress;
+
+ // Throwing an exception may cause a Promise rejection. For this purpose
+ // we keep track of a stack of nested promises and the corresponding
+ // try-catch handlers.
+ PromiseOnStack* promise_on_stack_ = nullptr;
+
+ // Simulator field is always present to get predictable layout.
+ Simulator* simulator_ = nullptr;
+
+ // The stack pointer of the bottom JS entry frame.
+ Address js_entry_sp_ = kNullAddress;
+ // The external callback we're currently in.
+ ExternalCallbackScope* external_callback_scope_ = nullptr;
+ StateTag current_vm_state_ = EXTERNAL;
+
+ // Call back function to report unsafe JS accesses.
+ v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
+
+ // Address of the thread-local "thread in wasm" flag.
+ Address thread_in_wasm_flag_address_ = kNullAddress;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_THREAD_LOCAL_TOP_H_
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 4a7eecb16c..fc4e298a10 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -87,6 +87,7 @@ namespace torque {
AST_STATEMENT_NODE_KIND_LIST(V) \
AST_DECLARATION_NODE_KIND_LIST(V) \
AST_CALLABLE_NODE_KIND_LIST(V) \
+ V(Identifier) \
V(LabelBlock)
struct AstNode {
@@ -194,21 +195,29 @@ class Ast {
static const char* const kThisParameterName = "this";
+// A Identifier is a string with a SourcePosition attached.
+struct Identifier : AstNode {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(Identifier)
+ Identifier(SourcePosition pos, std::string identifier)
+ : AstNode(kKind, pos), value(std::move(identifier)) {}
+ std::string value;
+};
+
struct IdentifierExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IdentifierExpression)
IdentifierExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- std::string name, std::vector<TypeExpression*> args = {})
+ Identifier* name, std::vector<TypeExpression*> args = {})
: LocationExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
- name(std::move(name)),
+ name(name),
generic_arguments(std::move(args)) {}
- IdentifierExpression(SourcePosition pos, std::string name,
+ IdentifierExpression(SourcePosition pos, Identifier* name,
std::vector<TypeExpression*> args = {})
- : IdentifierExpression(pos, {}, std::move(name), std::move(args)) {}
- bool IsThis() const { return name == kThisParameterName; }
+ : IdentifierExpression(pos, {}, name, std::move(args)) {}
+ bool IsThis() const { return name->value == kThisParameterName; }
std::vector<std::string> namespace_qualification;
- std::string name;
+ Identifier* name;
std::vector<TypeExpression*> generic_arguments;
};
@@ -410,7 +419,7 @@ struct NewExpression : Expression {
};
struct ParameterList {
- std::vector<std::string> names;
+ std::vector<Identifier*> names;
std::vector<TypeExpression*> types;
size_t implicit_count;
bool has_varargs;
@@ -532,16 +541,16 @@ struct TailCallStatement : Statement {
struct VarDeclarationStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(VarDeclarationStatement)
VarDeclarationStatement(
- SourcePosition pos, bool const_qualified, std::string name,
+ SourcePosition pos, bool const_qualified, Identifier* name,
base::Optional<TypeExpression*> type,
base::Optional<Expression*> initializer = base::nullopt)
: Statement(kKind, pos),
const_qualified(const_qualified),
- name(std::move(name)),
+ name(name),
type(type),
initializer(initializer) {}
bool const_qualified;
- std::string name;
+ Identifier* name;
base::Optional<TypeExpression*> type;
base::Optional<Expression*> initializer;
};
@@ -657,17 +666,17 @@ struct BlockStatement : Statement {
struct TypeDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeDeclaration)
- TypeDeclaration(SourcePosition pos, std::string name, bool transient,
+ TypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
base::Optional<std::string> extends,
base::Optional<std::string> generates,
base::Optional<std::string> constexpr_generates)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
transient(transient),
extends(std::move(extends)),
generates(std::move(generates)),
constexpr_generates(std::move(constexpr_generates)) {}
- std::string name;
+ Identifier* name;
bool transient;
base::Optional<std::string> extends;
base::Optional<std::string> generates;
@@ -676,15 +685,15 @@ struct TypeDeclaration : Declaration {
struct TypeAliasDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeAliasDeclaration)
- TypeAliasDeclaration(SourcePosition pos, std::string name,
+ TypeAliasDeclaration(SourcePosition pos, Identifier* name,
TypeExpression* type)
- : Declaration(kKind, pos), name(std::move(name)), type(type) {}
- std::string name;
+ : Declaration(kKind, pos), name(name), type(type) {}
+ Identifier* name;
TypeExpression* type;
};
struct NameAndTypeExpression {
- std::string name;
+ Identifier* name;
TypeExpression* type;
};
@@ -694,6 +703,7 @@ struct StructFieldExpression {
struct ClassFieldExpression {
NameAndTypeExpression name_and_type;
+ base::Optional<std::string> index;
bool weak;
};
@@ -815,13 +825,13 @@ struct ExternalRuntimeDeclaration : CallableNode {
struct ConstDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ConstDeclaration)
- ConstDeclaration(SourcePosition pos, std::string name, TypeExpression* type,
+ ConstDeclaration(SourcePosition pos, Identifier* name, TypeExpression* type,
Expression* expression)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
type(type),
expression(expression) {}
- std::string name;
+ Identifier* name;
TypeExpression* type;
Expression* expression;
};
@@ -838,14 +848,14 @@ struct StandardDeclaration : Declaration {
struct GenericDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(GenericDeclaration)
GenericDeclaration(SourcePosition pos, CallableNode* callable,
- std::vector<std::string> generic_parameters,
+ std::vector<Identifier*> generic_parameters,
base::Optional<Statement*> body = base::nullopt)
: Declaration(kKind, pos),
callable(callable),
generic_parameters(std::move(generic_parameters)),
body(body) {}
CallableNode* callable;
- std::vector<std::string> generic_parameters;
+ std::vector<Identifier*> generic_parameters;
base::Optional<Statement*> body;
};
@@ -872,47 +882,50 @@ struct SpecializationDeclaration : Declaration {
struct ExternConstDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternConstDeclaration)
- ExternConstDeclaration(SourcePosition pos, std::string name,
+ ExternConstDeclaration(SourcePosition pos, Identifier* name,
TypeExpression* type, std::string literal)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
type(type),
literal(std::move(literal)) {}
- std::string name;
+ Identifier* name;
TypeExpression* type;
std::string literal;
};
struct StructDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
- StructDeclaration(SourcePosition pos, std::string name,
+ StructDeclaration(SourcePosition pos, Identifier* name,
std::vector<Declaration*> methods,
std::vector<StructFieldExpression> fields)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
methods(std::move(methods)),
fields(std::move(fields)) {}
- std::string name;
+ Identifier* name;
std::vector<Declaration*> methods;
std::vector<StructFieldExpression> fields;
};
struct ClassDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration)
- ClassDeclaration(SourcePosition pos, std::string name, bool transient,
- std::string super, base::Optional<std::string> generates,
+ ClassDeclaration(SourcePosition pos, Identifier* name, bool is_extern,
+ bool transient, base::Optional<std::string> super,
+ base::Optional<std::string> generates,
std::vector<Declaration*> methods,
std::vector<ClassFieldExpression> fields)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
+ is_extern(is_extern),
transient(transient),
super(std::move(super)),
generates(std::move(generates)),
methods(std::move(methods)),
fields(std::move(fields)) {}
- std::string name;
+ Identifier* name;
+ bool is_extern;
bool transient;
- std::string super;
+ base::Optional<std::string> super;
base::Optional<std::string> generates;
std::vector<Declaration*> methods;
std::vector<ClassFieldExpression> fields;
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 8fb02e1072..f854e9ae75 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -54,7 +54,7 @@ class ContextualVariable {
static_assert(std::is_base_of<ContextualVariable, Derived>::value,
"Curiously Recurring Template Pattern");
- DISALLOW_NEW_AND_DELETE();
+ DISALLOW_NEW_AND_DELETE()
DISALLOW_COPY_AND_ASSIGN(Scope);
};
@@ -72,7 +72,7 @@ class ContextualVariable {
// Usage: DECLARE_CONTEXTUAL_VARIABLE(VarName, VarType)
#define DECLARE_CONTEXTUAL_VARIABLE(VarName, ...) \
struct VarName \
- : v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {};
+ : v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
template <> \
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 68bb170863..93715691d3 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -4,6 +4,7 @@
#include "src/torque/csa-generator.h"
+#include "src/globals.h"
#include "src/torque/type-oracle.h"
#include "src/torque/utils.h"
@@ -54,8 +55,20 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
return stack;
}
+void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
+ const std::string& file = SourceFileMap::GetSource(pos.source);
+ if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
+ // Lines in Torque SourcePositions are zero-based, while the
+ // CodeStubAssembler and downwind systems are one-based.
+ out_ << " ca_.SetSourcePosition(\"" << file << "\", "
+ << (pos.start.line + 1) << ");\n";
+ previous_position_ = pos;
+ }
+}
+
void CSAGenerator::EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack) {
+ EmitSourcePosition(instruction->pos);
switch (instruction.kind()) {
#define ENUM_ITEM(T) \
case InstructionKind::k##T: \
@@ -119,7 +132,7 @@ void CSAGenerator::EmitInstruction(
out_ << results[0] << " = ";
}
out_ << instruction.constant->ExternalAssemblerName() << "(state_)."
- << instruction.constant->constant_name() << "()";
+ << instruction.constant->name()->value << "()";
if (type->IsStructType()) {
out_ << ".Flatten();\n";
} else {
@@ -179,30 +192,19 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
}
}
- if (instruction.intrinsic->ExternalName() == "%RawObjectCast") {
+ if (instruction.intrinsic->ExternalName() == "%RawDownCast") {
if (parameter_types.size() != 1) {
- ReportError("%RawObjectCast must take a single parameter");
+ ReportError("%RawDownCast must take a single parameter");
+ }
+ if (!return_type->IsSubtypeOf(parameter_types[0])) {
+ ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
+ *parameter_types[0]);
}
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
if (return_type->GetGeneratedTNodeTypeName() !=
parameter_types[0]->GetGeneratedTNodeTypeName()) {
out_ << "TORQUE_CAST";
}
- } else {
- std::stringstream s;
- s << "%RawObjectCast must cast to subtype of Tagged (" << *return_type
- << " is not)";
- ReportError(s.str());
- }
- } else if (instruction.intrinsic->ExternalName() == "%RawPointerCast") {
- if (parameter_types.size() != 1) {
- ReportError("%RawPointerCast must take a single parameter");
- }
- if (!return_type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
- std::stringstream s;
- s << "%RawObjectCast must cast to subtype of RawPtr (" << *return_type
- << " is not)";
- ReportError(s.str());
}
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
@@ -234,9 +236,30 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
s << "%FromConstexpr does not support return type " << *return_type;
ReportError(s.str());
}
+ } else if (instruction.intrinsic->ExternalName() ==
+ "%GetAllocationBaseSize") {
+ if (instruction.specialization_types.size() != 1) {
+ ReportError(
+ "incorrect number of specialization classes for "
+ "%GetAllocationBaseSize (should be one)");
+ }
+ const ClassType* class_type =
+ ClassType::cast(instruction.specialization_types[0]);
+ // Special case classes that may not always have a fixed size (e.g.
+ // JSObjects). Their size must be fetched from the map.
+ if (class_type != TypeOracle::GetJSObjectType()) {
+ out_ << "CodeStubAssembler(state_).IntPtrConstant((";
+ args[0] = std::to_string(class_type->size());
+ } else {
+ out_ << "CodeStubAssembler(state_).TimesTaggedSize(CodeStubAssembler("
+ "state_).LoadMapInstanceSizeInWords(";
+ }
} else if (instruction.intrinsic->ExternalName() == "%Allocate") {
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_).Allocate";
+ } else if (instruction.intrinsic->ExternalName() ==
+ "%AllocateInternalClass") {
+ out_ << "CodeStubAssembler(state_).AllocateUninitializedFixedArray";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
@@ -245,11 +268,21 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
out_ << "(";
PrintCommaSeparatedList(out_, args);
if (instruction.intrinsic->ExternalName() == "%Allocate") out_ << ")";
+ if (instruction.intrinsic->ExternalName() == "%GetAllocationBaseSize")
+ out_ << "))";
if (return_type->IsStructType()) {
out_ << ").Flatten();\n";
} else {
out_ << ");\n";
}
+ if (instruction.intrinsic->ExternalName() == "%Allocate") {
+ out_ << " CodeStubAssembler(state_).InitializeFieldsWithRoot("
+ << results[0] << ", ";
+ out_ << "CodeStubAssembler(state_).IntPtrConstant("
+ << std::to_string(ClassType::cast(return_type)->size()) << "), ";
+ PrintCommaSeparatedList(out_, args);
+ out_ << ", RootIndex::kUndefinedValue);\n";
+ }
}
void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
@@ -580,7 +613,7 @@ void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
void CSAGenerator::EmitInstruction(
const ConstexprBranchInstruction& instruction, Stack<std::string>* stack) {
- out_ << " if (" << instruction.condition << ") {\n";
+ out_ << " if ((" << instruction.condition << ")) {\n";
out_ << " ca_.Goto(&" << BlockName(instruction.if_true);
for (const std::string& value : *stack) {
out_ << ", " << value;
@@ -647,7 +680,7 @@ void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
StringLiteralQuote(SourceFileMap::GetSource(instruction.pos.source));
out_ << " CodeStubAssembler(state_).FailAssert("
<< StringLiteralQuote(instruction.message) << ", " << file << ", "
- << instruction.pos.line + 1 << ");\n";
+ << instruction.pos.start.line + 1 << ");\n";
break;
}
}
@@ -666,17 +699,28 @@ void CSAGenerator::EmitInstruction(
const Field& field =
instruction.class_type->LookupField(instruction.field_name);
std::string result_name = FreshNodeName();
- std::string type_string =
- field.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())
- ? "MachineType::TaggedSigned()"
- : "MachineType::AnyTagged()";
- out_ << field.name_and_type.type->GetGeneratedTypeName() << " " << result_name
- << " = "
- << "ca_.UncheckedCast<"
- << field.name_and_type.type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler(state_).LoadObjectField("
- << stack->Top() + ", " + std::to_string(field.offset) + ", "
- << type_string + "));\n";
+
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string, machine_type) =
+ field.GetFieldSizeInformation();
+
+ if (instruction.class_type->IsExtern()) {
+ out_ << field.name_and_type.type->GetGeneratedTypeName() << " "
+ << result_name << " = ca_.UncheckedCast<"
+ << field.name_and_type.type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_).LoadObjectField(" << stack->Top()
+ << ", " << field.aggregate->GetGeneratedTNodeTypeName() << "::k"
+ << CamelifyString(field.name_and_type.name) << "Offset, "
+ << machine_type + "));\n";
+ } else {
+ out_ << field.name_and_type.type->GetGeneratedTypeName() << " "
+ << result_name << " = ca_.UncheckedCast<"
+ << field.name_and_type.type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_).UnsafeLoadFixedArrayElement("
+ << stack->Top() << ", " << (field.offset / kTaggedSize) << "));\n";
+ }
stack->Poke(stack->AboveTop() - 1, result_name);
}
@@ -687,12 +731,32 @@ void CSAGenerator::EmitInstruction(
stack->Push(value);
const Field& field =
instruction.class_type->LookupField(instruction.field_name);
- if (field.offset == 0) {
- out_ << " CodeStubAssembler(state_).StoreMap(" + object + ", " + value +
- ");\n";
+ if (instruction.class_type->IsExtern()) {
+ if (field.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (field.offset == 0) {
+ out_ << " CodeStubAssembler(state_).StoreMap(" << object << ", "
+ << value << ");\n";
+ } else {
+ out_ << " CodeStubAssembler(state_).StoreObjectField(" << object
+ << ", " << field.offset << ", " << value << ");\n";
+ }
+ } else {
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string, machine_type) =
+ field.GetFieldSizeInformation();
+ if (field.offset == 0) {
+ ReportError("the first field in a class object must be a map");
+ }
+ out_ << " CodeStubAssembler(state_).StoreObjectFieldNoWriteBarrier("
+ << object << ", " << field.offset << ", " << value << ", "
+ << machine_type << ".representation());\n";
+ }
} else {
- out_ << " CodeStubAssembler(state_).StoreObjectField(" + object + ", " +
- std::to_string(field.offset) + ", " + value + ");\n";
+ out_ << " CodeStubAssembler(state_).UnsafeStoreFixedArrayElement("
+ << object << ", " << (field.offset / kTaggedSize) << ", " << value
+ << ");\n";
}
}
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
index e3fbacdecf..5790d9434c 100644
--- a/deps/v8/src/torque/csa-generator.h
+++ b/deps/v8/src/torque/csa-generator.h
@@ -18,7 +18,10 @@ class CSAGenerator {
public:
CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
base::Optional<Builtin::Kind> linkage = base::nullopt)
- : cfg_(cfg), out_(out), linkage_(linkage) {}
+ : cfg_(cfg),
+ out_(out),
+ linkage_(linkage),
+ previous_position_(SourcePosition::Invalid()) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static constexpr const char* ARGUMENTS_VARIABLE_STRING = "arguments";
@@ -31,6 +34,9 @@ class CSAGenerator {
std::ostream& out_;
size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
+ SourcePosition previous_position_;
+
+ void EmitSourcePosition(SourcePosition pos, bool always_emit = false);
std::string PreCallableExceptionPreparation(
base::Optional<Block*> catch_block);
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 89501e5682..a918cdcb27 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -6,12 +6,13 @@
#include <iostream>
#include "src/torque/declarable.h"
+#include "src/torque/global-context.h"
namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(CurrentScope);
+DEFINE_CONTEXTUAL_VARIABLE(CurrentScope)
std::ostream& operator<<(std::ostream& os, const QualifiedName& name) {
for (const std::string& qualifier : name.namespace_qualification) {
@@ -55,7 +56,9 @@ std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
std::ostream& operator<<(std::ostream& os, const Generic& g) {
os << "generic " << g.name() << "<";
- PrintCommaSeparatedList(os, g.declaration()->generic_parameters);
+ PrintCommaSeparatedList(
+ os, g.declaration()->generic_parameters,
+ [](const Identifier* identifier) { return identifier->value; });
os << ">";
return os;
@@ -63,7 +66,7 @@ std::ostream& operator<<(std::ostream& os, const Generic& g) {
base::Optional<const Type*> Generic::InferTypeArgument(
size_t i, const TypeVector& arguments) {
- const std::string type_name = declaration()->generic_parameters[i];
+ const std::string type_name = declaration()->generic_parameters[i]->value;
const std::vector<TypeExpression*>& parameters =
declaration()->callable->signature->parameters.types;
size_t j = declaration()->callable->signature->parameters.implicit_count;
@@ -95,6 +98,12 @@ base::Optional<TypeVector> Generic::InferSpecializationTypes(
return result;
}
+bool Namespace::IsDefaultNamespace() const {
+ return this == GlobalContext::GetDefaultNamespace();
+}
+
+bool Namespace::IsTestNamespace() const { return name() == kTestNamespaceName; }
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index a262022409..d8b4260dd4 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -106,7 +106,7 @@ class Declarable {
class Scope : public Declarable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Scope, scope);
+ DECLARE_DECLARABLE_BOILERPLATE(Scope, scope)
explicit Scope(Declarable::Kind kind) : Declarable(kind) {}
std::vector<Declarable*> LookupShallow(const QualifiedName& name) {
@@ -151,13 +151,15 @@ class Scope : public Declarable {
class Namespace : public Scope {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Namespace, namespace);
+ DECLARE_DECLARABLE_BOILERPLATE(Namespace, namespace)
explicit Namespace(const std::string& name)
: Scope(Declarable::kNamespace), name_(name) {}
const std::string& name() const { return name_; }
std::string ExternalName() const {
return CamelifyString(name()) + "BuiltinsFromDSLAssembler";
}
+ bool IsDefaultNamespace() const;
+ bool IsTestNamespace() const;
std::ostream& source_stream() { return source_stream_; }
std::ostream& header_stream() { return header_stream_; }
std::string source() { return source_stream_.str(); }
@@ -181,8 +183,8 @@ inline Namespace* CurrentNamespace() {
class Value : public Declarable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Value, value);
- const std::string& name() const { return name_; }
+ DECLARE_DECLARABLE_BOILERPLATE(Value, value)
+ const Identifier* name() const { return name_; }
virtual bool IsConst() const { return true; }
VisitResult value() const { return *value_; }
const Type* type() const { return type_; }
@@ -193,20 +195,19 @@ class Value : public Declarable {
}
protected:
- Value(Kind kind, const Type* type, const std::string& name)
+ Value(Kind kind, const Type* type, Identifier* name)
: Declarable(kind), type_(type), name_(name) {}
private:
const Type* type_;
- std::string name_;
+ Identifier* name_;
base::Optional<VisitResult> value_;
};
class NamespaceConstant : public Value {
public:
- DECLARE_DECLARABLE_BOILERPLATE(NamespaceConstant, constant);
+ DECLARE_DECLARABLE_BOILERPLATE(NamespaceConstant, constant)
- const std::string& constant_name() const { return constant_name_; }
Expression* body() { return body_; }
std::string ExternalAssemblerName() const {
return Namespace::cast(ParentScope())->ExternalName();
@@ -214,31 +215,29 @@ class NamespaceConstant : public Value {
private:
friend class Declarations;
- explicit NamespaceConstant(std::string constant_name, const Type* type,
+ explicit NamespaceConstant(Identifier* constant_name, const Type* type,
Expression* body)
: Value(Declarable::kNamespaceConstant, type, constant_name),
- constant_name_(std::move(constant_name)),
body_(body) {}
- std::string constant_name_;
Expression* body_;
};
class ExternConstant : public Value {
public:
- DECLARE_DECLARABLE_BOILERPLATE(ExternConstant, constant);
+ DECLARE_DECLARABLE_BOILERPLATE(ExternConstant, constant)
private:
friend class Declarations;
- explicit ExternConstant(std::string name, const Type* type, std::string value)
- : Value(Declarable::kExternConstant, type, std::move(name)) {
+ explicit ExternConstant(Identifier* name, const Type* type, std::string value)
+ : Value(Declarable::kExternConstant, type, name) {
set_value(VisitResult(type, std::move(value)));
}
};
class Callable : public Scope {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Callable, callable);
+ DECLARE_DECLARABLE_BOILERPLATE(Callable, callable)
const std::string& ExternalName() const { return external_name_; }
const std::string& ReadableName() const { return readable_name_; }
const Signature& signature() const { return signature_; }
@@ -254,7 +253,7 @@ class Callable : public Scope {
base::Optional<Statement*> body() const { return body_; }
bool IsExternal() const { return !body_.has_value(); }
virtual bool ShouldBeInlined() const { return false; }
- bool IsConstructor() const { return readable_name_ == kConstructMethodName; }
+ virtual bool ShouldGenerateExternalCode() const { return !ShouldBeInlined(); }
protected:
Callable(Declarable::Kind kind, std::string external_name,
@@ -282,7 +281,7 @@ class Callable : public Scope {
class Macro : public Callable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Macro, macro);
+ DECLARE_DECLARABLE_BOILERPLATE(Macro, macro)
bool ShouldBeInlined() const override {
for (const LabelDeclaration& label : signature().labels) {
for (const Type* type : label.types) {
@@ -323,7 +322,7 @@ class Macro : public Callable {
class Method : public Macro {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Method, Method);
+ DECLARE_DECLARABLE_BOILERPLATE(Method, Method)
bool ShouldBeInlined() const override {
return Macro::ShouldBeInlined() ||
signature()
@@ -347,7 +346,7 @@ class Method : public Macro {
class Builtin : public Callable {
public:
enum Kind { kStub, kFixedArgsJavaScript, kVarArgsJavaScript };
- DECLARE_DECLARABLE_BOILERPLATE(Builtin, builtin);
+ DECLARE_DECLARABLE_BOILERPLATE(Builtin, builtin)
Kind kind() const { return kind_; }
bool IsStub() const { return kind_ == kStub; }
bool IsVarArgsJavaScript() const { return kind_ == kVarArgsJavaScript; }
@@ -367,7 +366,7 @@ class Builtin : public Callable {
class RuntimeFunction : public Callable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(RuntimeFunction, runtime);
+ DECLARE_DECLARABLE_BOILERPLATE(RuntimeFunction, runtime)
private:
friend class Declarations;
@@ -379,7 +378,7 @@ class RuntimeFunction : public Callable {
class Intrinsic : public Callable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Intrinsic, intrinsic);
+ DECLARE_DECLARABLE_BOILERPLATE(Intrinsic, intrinsic)
private:
friend class Declarations;
@@ -394,10 +393,10 @@ class Intrinsic : public Callable {
class Generic : public Declarable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Generic, generic);
+ DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
GenericDeclaration* declaration() const { return declaration_; }
- const std::vector<std::string> generic_parameters() const {
+ const std::vector<Identifier*> generic_parameters() const {
return declaration()->generic_parameters;
}
const std::string& name() const { return name_; }
@@ -438,20 +437,27 @@ struct SpecializationKey {
class TypeAlias : public Declarable {
public:
- DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias);
+ DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias)
const Type* type() const { return type_; }
bool IsRedeclaration() const { return redeclaration_; }
+ SourcePosition GetDeclarationPosition() const {
+ return declaration_position_;
+ }
private:
friend class Declarations;
- explicit TypeAlias(const Type* type, bool redeclaration)
+ explicit TypeAlias(
+ const Type* type, bool redeclaration,
+ SourcePosition declaration_position = SourcePosition::Invalid())
: Declarable(Declarable::kTypeAlias),
type_(type),
- redeclaration_(redeclaration) {}
+ redeclaration_(redeclaration),
+ declaration_position_(declaration_position) {}
const Type* type_;
bool redeclaration_;
+ const SourcePosition declaration_position_;
};
std::ostream& operator<<(std::ostream& os, const Callable& m);
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index cccb8ce1d2..8e3b6b8edf 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/torque/declaration-visitor.h"
+
+#include "src/globals.h"
#include "src/torque/ast.h"
namespace v8 {
@@ -73,10 +75,21 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
}
+ for (size_t i = 0; i < signature.types().size(); ++i) {
+ if (const StructType* type =
+ StructType::DynamicCast(signature.types()[i])) {
+ std::stringstream stream;
+ stream << "builtin '" << decl->name << "' uses the struct '"
+ << type->name() << "' as argument '"
+ << signature.parameter_names[i] << "'. This is not supported.";
+ ReportError(stream.str());
+ }
+ }
+
if (const StructType* struct_type =
StructType::DynamicCast(signature.return_type)) {
std::stringstream stream;
- stream << "builtins (in this case" << decl->name
+ stream << "builtins (in this case " << decl->name
<< ") cannot return structs (in this case " << struct_type->name()
<< ")";
ReportError(stream.str());
@@ -232,12 +245,6 @@ void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
void DeclarationVisitor::DeclareMethods(
AggregateType* container_type, const std::vector<Declaration*>& methods) {
// Declare the class' methods
- IdentifierExpression* constructor_this = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, kThisParameterName);
- AggregateType* constructor_this_type =
- container_type->IsStructType()
- ? container_type
- : ClassType::cast(container_type)->struct_type();
for (auto declaration : methods) {
CurrentSourcePosition::Scope pos_scope(declaration->pos);
StandardDeclaration* standard_declaration =
@@ -248,211 +255,69 @@ void DeclarationVisitor::DeclareMethods(
Signature signature = MakeSignature(method->signature.get());
signature.parameter_names.insert(
signature.parameter_names.begin() + signature.implicit_count,
- kThisParameterName);
+ MakeNode<Identifier>(kThisParameterName));
Statement* body = *(standard_declaration->body);
std::string method_name(method->name);
- if (method->name == kConstructMethodName) {
- signature.parameter_types.types.insert(
- signature.parameter_types.types.begin() + signature.implicit_count,
- constructor_this_type);
- // Constructor
- if (!signature.return_type->IsVoid()) {
- ReportError("constructors musn't have a return type");
- }
- if (signature.labels.size() != 0) {
- ReportError("constructors musn't have labels");
- }
- method_name = kConstructMethodName;
- Declarations::CreateMethod(constructor_this_type, method_name, signature,
- false, body);
- } else {
signature.parameter_types.types.insert(
signature.parameter_types.types.begin() + signature.implicit_count,
container_type);
Declarations::CreateMethod(container_type, method_name, signature, false,
body);
- }
- }
-
- if (container_type->Constructors().size() != 0) return;
-
- // Generate default constructor.
- Signature constructor_signature;
- constructor_signature.parameter_types.var_args = false;
- constructor_signature.return_type = TypeOracle::GetVoidType();
- std::vector<const AggregateType*> hierarchy = container_type->GetHierarchy();
-
- std::vector<Statement*> statements;
- std::vector<Statement*> initializer_statements;
-
- size_t parameter_number = 0;
- constructor_signature.parameter_names.push_back(kThisParameterName);
- constructor_signature.parameter_types.types.push_back(constructor_this_type);
- std::vector<Expression*> super_arguments;
- for (auto current_type : hierarchy) {
- for (auto& f : current_type->fields()) {
- std::string parameter_name("p" + std::to_string(parameter_number++));
- constructor_signature.parameter_names.push_back(parameter_name);
- constructor_signature.parameter_types.types.push_back(
- f.name_and_type.type);
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, parameter_name);
- if (container_type != current_type) {
- super_arguments.push_back(MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, parameter_name));
- } else {
- LocationExpression* location = MakeNode<FieldAccessExpression>(
- constructor_this, f.name_and_type.name);
- Statement* statement = MakeNode<ExpressionStatement>(
- MakeNode<AssignmentExpression>(location, base::nullopt, value));
- initializer_statements.push_back(statement);
- }
- }
}
-
- if (hierarchy.size() > 1) {
- IdentifierExpression* super_identifier = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, kSuperMethodName);
- Statement* statement =
- MakeNode<ExpressionStatement>(MakeNode<CallMethodExpression>(
- constructor_this, super_identifier, super_arguments,
- std::vector<std::string>{}));
- statements.push_back(statement);
- }
-
- for (auto s : initializer_statements) {
- statements.push_back(s);
- }
-
- Statement* constructor_body = MakeNode<BlockStatement>(false, statements);
-
- Declarations::CreateMethod(constructor_this_type, kConstructMethodName,
- constructor_signature, false, constructor_body);
}
void DeclarationVisitor::Visit(StructDeclaration* decl) {
- std::vector<Field> fields;
- size_t offset = 0;
- for (auto& field : decl->fields) {
- const Type* field_type = Declarations::GetType(field.name_and_type.type);
- fields.push_back({field.name_and_type.type->pos,
- {field.name_and_type.name, field_type},
- offset,
- false});
- offset += LoweredSlotCount(field_type);
- }
- StructType* struct_type = Declarations::DeclareStruct(decl->name, fields);
- DeclareMethods(struct_type, decl->methods);
+ StructType* struct_type = Declarations::DeclareStruct(decl->name);
+ struct_declarations_.push_back(
+ std::make_tuple(CurrentScope::Get(), decl, struct_type));
}
void DeclarationVisitor::Visit(ClassDeclaration* decl) {
- // Compute the offset of the class' first member. If the class extends
- // another class, it's the size of the extended class, otherwise zero.
- size_t first_field_offset = 0;
- const Type* super_type = Declarations::LookupType(decl->super);
- if (super_type != TypeOracle::GetTaggedType()) {
- const ClassType* super_class = ClassType::DynamicCast(super_type);
- if (!super_class) {
- ReportError("class \"", decl->name,
- "\" must extend either Tagged or an already declared class");
+ ClassType* new_class;
+ if (decl->is_extern) {
+ if (!decl->super) {
+ ReportError("Extern class must extend another type.");
}
- first_field_offset = super_class->size();
- }
-
- // The generates clause must create a TNode<>
- std::string generates = decl->name;
- if (decl->generates) {
- if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
- generates.substr(generates.length() - 1, 1) != ">") {
- ReportError("generated type \"", generates,
- "\" should be of the form \"TNode<...>\"");
+ // Compute the offset of the class' first member. If the class extends
+ // another class, it's the size of the extended class, otherwise zero.
+ const Type* super_type = Declarations::LookupType(*decl->super);
+ if (super_type != TypeOracle::GetTaggedType()) {
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ if (!super_class) {
+ ReportError(
+ "class \"", decl->name->value,
+ "\" must extend either Tagged or an already declared class");
+ }
}
- generates = generates.substr(6, generates.length() - 7);
- }
- std::vector<Field> fields;
- size_t class_offset = first_field_offset;
- bool seen_strong = false;
- bool seen_weak = false;
- for (ClassFieldExpression& field : decl->fields) {
- const Type* field_type = Declarations::GetType(field.name_and_type.type);
- if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- if (field.weak) {
- seen_weak = true;
- } else {
- if (seen_weak) {
- ReportError("cannot declare strong field \"",
- field.name_and_type.name,
- "\" after weak Tagged references");
- }
- seen_strong = true;
+ // The generates clause must create a TNode<>
+ std::string generates = decl->name->value;
+ if (decl->generates) {
+ generates = *decl->generates;
+ if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
+ generates.substr(generates.length() - 1, 1) != ">") {
+ ReportError("generated type \"", generates,
+ "\" should be of the form \"TNode<...>\"");
}
- } else {
- if (seen_strong || seen_weak) {
- ReportError("cannot declare scalar field \"", field.name_and_type.name,
- "\" after strong or weak Tagged references");
- }
- }
- if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- ReportError(
- "field \"", field.name_and_type.name, "\" of class \"", decl->name,
- "\" must be a subtype of Tagged (other types not yet supported)");
+ generates = generates.substr(6, generates.length() - 7);
}
- fields.push_back({field.name_and_type.type->pos,
- {field.name_and_type.name, field_type},
- class_offset,
- field.weak});
- class_offset += kTaggedSize;
- }
-
- auto new_class = Declarations::DeclareClass(
- super_type, decl->name, decl->transient, generates, fields, class_offset);
- DeclareMethods(new_class, decl->methods);
- // For each field, construct AST snippits that implement a CSA accessor
- // function and define a corresponding '.field' operator. The
- // implementation iterator will turn the snippits into code.
- for (auto& field : new_class->fields()) {
- IdentifierExpression* parameter =
- MakeNode<IdentifierExpression>(std::string{"o"});
-
- // Load accessor
- std::string camel_field_name = CamelifyString(field.name_and_type.name);
- std::string load_macro_name = "Load" + new_class->name() + camel_field_name;
- std::string load_operator_name = "." + field.name_and_type.name;
- Signature load_signature;
- load_signature.parameter_names.push_back("o");
- load_signature.parameter_types.types.push_back(new_class);
- load_signature.parameter_types.var_args = false;
- load_signature.return_type = field.name_and_type.type;
- Statement* load_body =
- MakeNode<ReturnStatement>(MakeNode<LoadObjectFieldExpression>(
- parameter, field.name_and_type.name));
- Declarations::DeclareMacro(load_macro_name, base::nullopt, load_signature,
- false, load_body, load_operator_name);
-
- // Store accessor
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, std::string{"v"});
- std::string store_macro_name =
- "Store" + new_class->name() + camel_field_name;
- std::string store_operator_name = "." + field.name_and_type.name + "=";
- Signature store_signature;
- store_signature.parameter_names.push_back("o");
- store_signature.parameter_names.push_back("v");
- store_signature.parameter_types.types.push_back(new_class);
- store_signature.parameter_types.types.push_back(field.name_and_type.type);
- store_signature.parameter_types.var_args = false;
- // TODO(danno): Store macros probably should return their value argument
- store_signature.return_type = TypeOracle::GetVoidType();
- Statement* store_body =
- MakeNode<ExpressionStatement>(MakeNode<StoreObjectFieldExpression>(
- parameter, field.name_and_type.name, value));
- Declarations::DeclareMacro(store_macro_name, base::nullopt, store_signature,
- false, store_body, store_operator_name);
+ new_class = Declarations::DeclareClass(
+ super_type, decl->name, decl->is_extern, decl->transient, generates);
+ } else {
+ if (decl->super) {
+ ReportError("Only extern classes can inherit.");
+ }
+ if (decl->generates) {
+ ReportError("Only extern classes can specify a generated type.");
+ }
+ new_class = Declarations::DeclareClass(TypeOracle::GetTaggedType(),
+ decl->name, decl->is_extern,
+ decl->transient, "FixedArray");
}
-
- GlobalContext::RegisterClass(decl->name, new_class);
+ GlobalContext::RegisterClass(decl->name->value, new_class);
+ class_declarations_.push_back(
+ std::make_tuple(CurrentScope::Get(), decl, new_class));
}
void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
@@ -477,7 +342,12 @@ void DeclarationVisitor::Visit(TypeDeclaration* decl) {
if (decl->transient) {
ReportError("cannot declare a transient type that is also constexpr");
}
- std::string constexpr_name = CONSTEXPR_TYPE_PREFIX + decl->name;
+ // DeclareAbstractType expects an Identifier*. A new one is created from the
+ // declaration, and the SourcePosition copied from the original name.
+ Identifier* constexpr_name =
+ MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + decl->name->value);
+ constexpr_name->pos = decl->name->pos;
+
base::Optional<std::string> constexpr_extends;
if (decl->extends)
constexpr_extends = CONSTEXPR_TYPE_PREFIX + *decl->extends;
@@ -500,7 +370,7 @@ void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
}
for (auto type : key.specialized_types) {
- std::string generic_type_name =
+ Identifier* generic_type_name =
key.generic->declaration()->generic_parameters[i++];
Declarations::DeclareType(generic_type_name, type, true);
}
@@ -587,6 +457,158 @@ Callable* DeclarationVisitor::Specialize(
return callable;
}
+void DeclarationVisitor::FinalizeStructFieldsAndMethods(
+ StructType* struct_type, StructDeclaration* struct_declaration) {
+ size_t offset = 0;
+ for (auto& field : struct_declaration->fields) {
+ const Type* field_type = Declarations::GetType(field.name_and_type.type);
+ struct_type->RegisterField({field.name_and_type.type->pos,
+ struct_type,
+ base::nullopt,
+ {field.name_and_type.name->value, field_type},
+ offset,
+ false});
+ offset += LoweredSlotCount(field_type);
+ }
+ CurrentSourcePosition::Scope position_activator(struct_declaration->pos);
+ DeclareMethods(struct_type, struct_declaration->methods);
+}
+
+void DeclarationVisitor::FinalizeClassFieldsAndMethods(
+ ClassType* class_type, ClassDeclaration* class_declaration) {
+ const ClassType* super_class = class_type->GetSuperClass();
+ size_t class_offset = super_class ? super_class->size() : 0;
+ bool seen_indexed_field = false;
+ for (ClassFieldExpression& field_expression : class_declaration->fields) {
+ CurrentSourcePosition::Scope position_activator(
+ field_expression.name_and_type.type->pos);
+ const Type* field_type =
+ Declarations::GetType(field_expression.name_and_type.type);
+ if (!class_declaration->is_extern) {
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ ReportError("non-extern classes do not support untagged fields");
+ }
+ if (field_expression.weak) {
+ ReportError("non-extern classes do not support weak fields");
+ }
+ }
+ if (field_expression.index) {
+ if (seen_indexed_field ||
+ (super_class && super_class->HasIndexedField())) {
+ ReportError(
+ "only one indexable field is currently supported per class");
+ }
+ seen_indexed_field = true;
+ const Field* index_field =
+ &(class_type->LookupField(*field_expression.index));
+ class_type->RegisterField(
+ {field_expression.name_and_type.type->pos,
+ class_type,
+ index_field,
+ {field_expression.name_and_type.name->value, field_type},
+ class_offset,
+ field_expression.weak});
+ } else {
+ if (seen_indexed_field) {
+ ReportError("cannot declare non-indexable field \"",
+ field_expression.name_and_type.name,
+ "\" after an indexable field "
+ "declaration");
+ }
+ const Field& field = class_type->RegisterField(
+ {field_expression.name_and_type.type->pos,
+ class_type,
+ base::nullopt,
+ {field_expression.name_and_type.name->value, field_type},
+ class_offset,
+ field_expression.weak});
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string, machine_type) =
+ field.GetFieldSizeInformation();
+ // Our allocations don't support alignments beyond kTaggedSize.
+ size_t alignment = std::min(size_t{kTaggedSize}, field_size);
+ if (class_offset % alignment != 0) {
+ ReportError("field ", field_expression.name_and_type.name,
+ " at offset ", class_offset, " is not ", alignment,
+ "-byte aligned.");
+ }
+ class_offset += field_size;
+ }
+ }
+ class_type->SetSize(class_offset);
+
+ // For each field, construct AST snippits that implement a CSA accessor
+ // function and define a corresponding '.field' operator. The
+ // implementation iterator will turn the snippits into code.
+ for (auto& field : class_type->fields()) {
+ if (field.index) continue;
+ CurrentSourcePosition::Scope position_activator(field.pos);
+ IdentifierExpression* parameter =
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
+
+ // Load accessor
+ std::string camel_field_name = CamelifyString(field.name_and_type.name);
+ std::string load_macro_name =
+ "Load" + class_type->name() + camel_field_name;
+ std::string load_operator_name = "." + field.name_and_type.name;
+ Signature load_signature;
+ load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
+ load_signature.parameter_types.types.push_back(class_type);
+ load_signature.parameter_types.var_args = false;
+ load_signature.return_type = field.name_and_type.type;
+ Statement* load_body =
+ MakeNode<ReturnStatement>(MakeNode<LoadObjectFieldExpression>(
+ parameter, field.name_and_type.name));
+ Declarations::DeclareMacro(load_macro_name, base::nullopt, load_signature,
+ false, load_body, load_operator_name);
+
+ // Store accessor
+ IdentifierExpression* value = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
+ std::string store_macro_name =
+ "Store" + class_type->name() + camel_field_name;
+ std::string store_operator_name = "." + field.name_and_type.name + "=";
+ Signature store_signature;
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("v"));
+ store_signature.parameter_types.types.push_back(class_type);
+ store_signature.parameter_types.types.push_back(field.name_and_type.type);
+ store_signature.parameter_types.var_args = false;
+ // TODO(danno): Store macros probably should return their value argument
+ store_signature.return_type = TypeOracle::GetVoidType();
+ Statement* store_body =
+ MakeNode<ExpressionStatement>(MakeNode<StoreObjectFieldExpression>(
+ parameter, field.name_and_type.name, value));
+ Declarations::DeclareMacro(store_macro_name, base::nullopt, store_signature,
+ false, store_body, store_operator_name);
+ }
+
+ DeclareMethods(class_type, class_declaration->methods);
+}
+
+void DeclarationVisitor::FinalizeStructsAndClasses() {
+ for (auto current_struct_info : struct_declarations_) {
+ Scope* scope;
+ StructDeclaration* struct_declaration;
+ StructType* struct_type;
+ std::tie(scope, struct_declaration, struct_type) = current_struct_info;
+ CurrentScope::Scope scope_activator(scope);
+ FinalizeStructFieldsAndMethods(struct_type, struct_declaration);
+ }
+
+ for (auto current_class_info : class_declarations_) {
+ Scope* scope;
+ ClassDeclaration* class_declaration;
+ ClassType* class_type;
+ std::tie(scope, class_declaration, class_type) = current_class_info;
+ CurrentScope::Scope scope_activator(scope);
+ CurrentSourcePosition::Scope position_activator(class_declaration->pos);
+ FinalizeClassFieldsAndMethods(class_type, class_declaration);
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index a492a277fd..855dd4f048 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -52,7 +52,7 @@ class DeclarationVisitor : public FileVisitor {
void Visit(TypeAliasDeclaration* decl) {
const Type* type = Declarations::GetType(decl->type);
- type->AddAlias(decl->name);
+ type->AddAlias(decl->name->value);
Declarations::DeclareType(decl->name, type, true);
}
@@ -93,8 +93,20 @@ class DeclarationVisitor : public FileVisitor {
base::Optional<const CallableNodeSignature*> signature,
base::Optional<Statement*> body);
+ void FinalizeStructsAndClasses();
+
private:
void DeclareSpecializedTypes(const SpecializationKey& key);
+
+ void FinalizeStructFieldsAndMethods(StructType* struct_type,
+ StructDeclaration* struct_declaration);
+ void FinalizeClassFieldsAndMethods(ClassType* class_type,
+ ClassDeclaration* class_declaration);
+
+ std::vector<std::tuple<Scope*, StructDeclaration*, StructType*>>
+ struct_declarations_;
+ std::vector<std::tuple<Scope*, ClassDeclaration*, ClassType*>>
+ class_declarations_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 6cf7d0c4a4..b4b414ba5c 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -5,6 +5,7 @@
#include "src/torque/declarations.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/server-data.h"
#include "src/torque/type-oracle.h"
namespace v8 {
@@ -59,10 +60,14 @@ std::vector<Declarable*> Declarations::LookupGlobalScope(
return d;
}
-const Type* Declarations::LookupType(const QualifiedName& name) {
+const TypeAlias* Declarations::LookupTypeAlias(const QualifiedName& name) {
TypeAlias* declaration =
EnsureUnique(FilterDeclarables<TypeAlias>(Lookup(name)), name, "type");
- return declaration->type();
+ return declaration;
+}
+
+const Type* Declarations::LookupType(const QualifiedName& name) {
+ return LookupTypeAlias(name)->type();
}
const Type* Declarations::LookupType(std::string name) {
@@ -79,7 +84,13 @@ const Type* Declarations::GetType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
std::string name =
(basic->is_constexpr ? CONSTEXPR_TYPE_PREFIX : "") + basic->name;
- return LookupType(QualifiedName{basic->namespace_qualification, name});
+ const TypeAlias* alias =
+ LookupTypeAlias(QualifiedName{basic->namespace_qualification, name});
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(type_expression->pos,
+ alias->GetDeclarationPosition());
+ }
+ return alias->type();
} else if (auto* union_type = UnionTypeExpression::cast(type_expression)) {
return TypeOracle::GetUnionType(GetType(union_type->a),
GetType(union_type->b));
@@ -147,70 +158,42 @@ Namespace* Declarations::DeclareNamespace(const std::string& name) {
}
const AbstractType* Declarations::DeclareAbstractType(
- const std::string& name, bool transient, const std::string& generated,
+ const Identifier* name, bool transient, std::string generated,
base::Optional<const AbstractType*> non_constexpr_version,
const base::Optional<std::string>& parent) {
- CheckAlreadyDeclared<TypeAlias>(name, "type");
+ CheckAlreadyDeclared<TypeAlias>(name->value, "type");
const Type* parent_type = nullptr;
if (parent) {
parent_type = LookupType(QualifiedName{*parent});
}
+ if (generated == "" && parent) {
+ generated = parent_type->GetGeneratedTNodeTypeName();
+ }
const AbstractType* type = TypeOracle::GetAbstractType(
- parent_type, name, transient, generated, non_constexpr_version);
+ parent_type, name->value, transient, generated, non_constexpr_version);
DeclareType(name, type, false);
return type;
}
-void Declarations::DeclareType(const std::string& name, const Type* type,
+void Declarations::DeclareType(const Identifier* name, const Type* type,
bool redeclaration) {
- CheckAlreadyDeclared<TypeAlias>(name, "type");
- Declare(name, std::unique_ptr<TypeAlias>(new TypeAlias(type, redeclaration)));
+ CheckAlreadyDeclared<TypeAlias>(name->value, "type");
+ Declare(name->value, std::unique_ptr<TypeAlias>(
+ new TypeAlias(type, redeclaration, name->pos)));
}
-StructType* Declarations::DeclareStruct(const std::string& name,
- const std::vector<Field>& fields) {
- StructType* new_type = TypeOracle::GetStructType(name, fields);
+StructType* Declarations::DeclareStruct(const Identifier* name) {
+ StructType* new_type = TypeOracle::GetStructType(name->value);
DeclareType(name, new_type, false);
return new_type;
}
ClassType* Declarations::DeclareClass(const Type* super_type,
- const std::string& name, bool transient,
- const std::string& generates,
- std::vector<Field> fields, size_t size) {
- std::vector<Field> this_struct_fields;
- size_t struct_offset = 0;
- const StructType* super_struct_type = nullptr;
- // In order to ensure "atomicity" of object allocation, a class'
- // constructors operate on a per-class internal struct rather than the class
- // directly until the constructor has successfully completed and all class
- // members are available. Create the appropriate struct type for use in the
- // class' constructors, including a '_super' field in the struct that
- // contains the values constructed by calls to super constructors.
- if (const ClassType* super_class = ClassType::DynamicCast(super_type)) {
- super_struct_type = super_class->struct_type();
- this_struct_fields.push_back(
- {CurrentSourcePosition::Get(),
- {kConstructorStructSuperFieldName, super_struct_type},
- struct_offset,
- false});
- struct_offset += LoweredSlotCount(super_struct_type);
- }
- for (auto& field : fields) {
- const Type* field_type = field.name_and_type.type;
- this_struct_fields.push_back({field.pos,
- {field.name_and_type.name, field_type},
- struct_offset,
- false});
- struct_offset += LoweredSlotCount(field_type);
- }
- StructType* this_struct_type = DeclareStruct(
- kClassConstructorThisStructPrefix + name, this_struct_fields);
-
- ClassType* new_type =
- TypeOracle::GetClassType(super_type, name, transient, generates,
- std::move(fields), this_struct_type, size);
- this_struct_type->SetDerivedFrom(new_type);
+ const Identifier* name, bool is_extern,
+ bool transient,
+ const std::string& generates) {
+ ClassType* new_type = TypeOracle::GetClassType(
+ super_type, name->value, is_extern, transient, generates);
DeclareType(name, new_type, false);
return new_type;
}
@@ -304,18 +287,19 @@ RuntimeFunction* Declarations::DeclareRuntimeFunction(
new RuntimeFunction(name, signature, transitioning))));
}
-void Declarations::DeclareExternConstant(const std::string& name,
- const Type* type, std::string value) {
- CheckAlreadyDeclared<Value>(name, "constant");
+void Declarations::DeclareExternConstant(Identifier* name, const Type* type,
+ std::string value) {
+ CheckAlreadyDeclared<Value>(name->value, "constant");
ExternConstant* result = new ExternConstant(name, type, value);
- Declare(name, std::unique_ptr<Declarable>(result));
+ Declare(name->value, std::unique_ptr<Declarable>(result));
}
-NamespaceConstant* Declarations::DeclareNamespaceConstant(
- const std::string& name, const Type* type, Expression* body) {
- CheckAlreadyDeclared<Value>(name, "constant");
+NamespaceConstant* Declarations::DeclareNamespaceConstant(Identifier* name,
+ const Type* type,
+ Expression* body) {
+ CheckAlreadyDeclared<Value>(name->value, "constant");
NamespaceConstant* result = new NamespaceConstant(name, type, body);
- Declare(name, std::unique_ptr<Declarable>(result));
+ Declare(name->value, std::unique_ptr<Declarable>(result));
return result;
}
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index efc01e8138..1c01e26894 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -54,6 +54,7 @@ class Declarations {
static std::vector<Declarable*> LookupGlobalScope(const std::string& name);
+ static const TypeAlias* LookupTypeAlias(const QualifiedName& name);
static const Type* LookupType(const QualifiedName& name);
static const Type* LookupType(std::string name);
static const Type* LookupGlobalType(const std::string& name);
@@ -74,19 +75,18 @@ class Declarations {
static Namespace* DeclareNamespace(const std::string& name);
static const AbstractType* DeclareAbstractType(
- const std::string& name, bool transient, const std::string& generated,
+ const Identifier* name, bool transient, std::string generated,
base::Optional<const AbstractType*> non_constexpr_version,
const base::Optional<std::string>& parent = {});
- static void DeclareType(const std::string& name, const Type* type,
+ static void DeclareType(const Identifier* name, const Type* type,
bool redeclaration);
- static StructType* DeclareStruct(const std::string& name,
- const std::vector<Field>& fields);
+ static StructType* DeclareStruct(const Identifier* name);
- static ClassType* DeclareClass(const Type* super, const std::string& name,
- bool transient, const std::string& generates,
- std::vector<Field> fields, size_t size);
+ static ClassType* DeclareClass(const Type* super, const Identifier* name,
+ bool is_extern, bool transient,
+ const std::string& generates);
static Macro* CreateMacro(std::string external_name,
std::string readable_name,
@@ -121,9 +121,9 @@ class Declarations {
const Signature& signature,
bool transitioning);
- static void DeclareExternConstant(const std::string& name, const Type* type,
+ static void DeclareExternConstant(Identifier* name, const Type* type,
std::string value);
- static NamespaceConstant* DeclareNamespaceConstant(const std::string& name,
+ static NamespaceConstant* DeclareNamespaceConstant(Identifier* name,
const Type* type,
Expression* body);
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index 69ecf3c580..ff12d4a449 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -17,18 +17,27 @@ namespace torque {
namespace {
-void UpdateSourcePosition(InputPosition from, InputPosition to,
- SourcePosition* pos) {
- while (from != to) {
- if (*from == '\n') {
- pos->line += 1;
- pos->column = 0;
- } else {
- pos->column += 1;
+struct LineAndColumnTracker {
+ LineAndColumn previous{0, 0};
+ LineAndColumn current{0, 0};
+
+ void Advance(InputPosition from, InputPosition to) {
+ previous = current;
+ while (from != to) {
+ if (*from == '\n') {
+ current.line += 1;
+ current.column = 0;
+ } else {
+ current.column += 1;
+ }
+ ++from;
}
- ++from;
}
-}
+
+ SourcePosition ToSourcePosition() {
+ return {CurrentSourceFile::Get(), previous, current};
+ }
+};
} // namespace
@@ -107,14 +116,18 @@ LexerResult Lexer::RunLexer(const std::string& input) {
InputPosition const end = begin + input.size();
InputPosition pos = begin;
InputPosition token_start = pos;
- CurrentSourcePosition::Scope scope(
- SourcePosition{CurrentSourceFile::Get(), 0, 0});
+ LineAndColumnTracker line_column_tracker;
+
match_whitespace_(&pos);
+ line_column_tracker.Advance(token_start, pos);
while (pos != end) {
- UpdateSourcePosition(token_start, pos, &CurrentSourcePosition::Get());
token_start = pos;
Symbol* symbol = MatchToken(&pos, end);
+ InputPosition token_end = pos;
+ line_column_tracker.Advance(token_start, token_end);
if (!symbol) {
+ CurrentSourcePosition::Scope pos_scope(
+ line_column_tracker.ToSourcePosition());
ReportError("Lexer Error: unknown token " +
StringLiteralQuote(std::string(
token_start, token_start + std::min<ptrdiff_t>(
@@ -122,12 +135,15 @@ LexerResult Lexer::RunLexer(const std::string& input) {
}
result.token_symbols.push_back(symbol);
result.token_contents.push_back(
- {token_start, pos, CurrentSourcePosition::Get()});
+ {token_start, pos, line_column_tracker.ToSourcePosition()});
match_whitespace_(&pos);
+ line_column_tracker.Advance(token_end, pos);
}
- UpdateSourcePosition(token_start, pos, &CurrentSourcePosition::Get());
+
// Add an additional token position to simplify corner cases.
- result.token_contents.push_back({pos, pos, CurrentSourcePosition::Get()});
+ line_column_tracker.Advance(token_start, pos);
+ result.token_contents.push_back(
+ {pos, pos, line_column_tracker.ToSourcePosition()});
return result;
}
@@ -176,7 +192,7 @@ const Item* RunEarleyAlgorithm(
// Worklist for items at the next position.
std::vector<Item> future_items;
CurrentSourcePosition::Scope source_position(
- SourcePosition{CurrentSourceFile::Get(), 0, 0});
+ SourcePosition{CurrentSourceFile::Get(), {0, 0}, {0, 0}});
std::vector<const Item*> completed_items;
std::unordered_map<std::pair<size_t, Symbol*>, std::set<const Item*>,
base::hash<std::pair<size_t, Symbol*>>>
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 8efe0c704d..7a2718c059 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -39,6 +39,51 @@ class ParseResultHolderBase {
const TypeId type_id_;
};
+enum class ParseResultHolderBase::TypeId {
+ kStdString,
+ kBool,
+ kStdVectorOfString,
+ kExpressionPtr,
+ kIdentifierPtr,
+ kLocationExpressionPtr,
+ kStatementPtr,
+ kDeclarationPtr,
+ kTypeExpressionPtr,
+ kOptionalTypeExpressionPtr,
+ kLabelBlockPtr,
+ kOptionalLabelBlockPtr,
+ kNameAndTypeExpression,
+ kClassFieldExpression,
+ kStructFieldExpression,
+ kStdVectorOfNameAndTypeExpression,
+ kStdVectorOfClassFieldExpression,
+ kStdVectorOfStructFieldExpression,
+ kIncrementDecrementOperator,
+ kOptionalStdString,
+ kStdVectorOfStatementPtr,
+ kStdVectorOfDeclarationPtr,
+ kStdVectorOfExpressionPtr,
+ kExpressionWithSource,
+ kParameterList,
+ kRangeExpression,
+ kOptionalRangeExpression,
+ kTypeList,
+ kOptionalTypeList,
+ kLabelAndTypes,
+ kStdVectorOfLabelAndTypes,
+ kStdVectorOfLabelBlockPtr,
+ kOptionalStatementPtr,
+ kOptionalExpressionPtr,
+ kTypeswitchCase,
+ kStdVectorOfTypeswitchCase,
+ kStdVectorOfIdentifierPtr,
+
+ kJsonValue,
+ kJsonMember,
+ kStdVectorOfJsonValue,
+ kStdVectorOfJsonMember,
+};
+
using ParseResultTypeId = ParseResultHolderBase::TypeId;
template <class T>
@@ -71,13 +116,17 @@ class ParseResult {
explicit ParseResult(T x) : value_(new ParseResultHolder<T>(std::move(x))) {}
template <class T>
- const T& Cast() const {
+ const T& Cast() const& {
return value_->Cast<T>();
}
template <class T>
- T& Cast() {
+ T& Cast() & {
return value_->Cast<T>();
}
+ template <class T>
+ T&& Cast() && {
+ return std::move(value_->Cast<T>());
+ }
private:
std::unique_ptr<ParseResultHolderBase> value_;
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index 949362ca1c..f791f9a9f2 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -17,12 +17,15 @@ namespace torque {
class GlobalContext : public ContextualClass<GlobalContext> {
public:
- explicit GlobalContext(Ast ast) : verbose_(false), ast_(std::move(ast)) {
+ explicit GlobalContext(Ast ast)
+ : verbose_(false),
+ collect_language_server_data_(false),
+ ast_(std::move(ast)) {
CurrentScope::Scope current_scope(nullptr);
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), -1, -1});
+ SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
default_namespace_ =
- RegisterDeclarable(base::make_unique<Namespace>("base"));
+ RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
}
static Namespace* GetDefaultNamespace() { return Get().default_namespace_; }
template <class T>
@@ -46,12 +49,11 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return result;
}
- static void RegisterClass(const std::string& name,
- const ClassType* new_class) {
+ static void RegisterClass(const std::string& name, ClassType* new_class) {
Get().classes_[name] = new_class;
}
- static const std::map<std::string, const ClassType*>& GetClasses() {
+ static const std::map<std::string, ClassType*>& GetClasses() {
return Get().classes_;
}
@@ -64,15 +66,22 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static void SetVerbose() { Get().verbose_ = true; }
static bool verbose() { return Get().verbose_; }
+ static void SetCollectLanguageServerData() {
+ Get().collect_language_server_data_ = true;
+ }
+ static bool collect_language_server_data() {
+ return Get().collect_language_server_data_;
+ }
static Ast* ast() { return &Get().ast_; }
private:
bool verbose_;
+ bool collect_language_server_data_;
Namespace* default_namespace_;
Ast ast_;
std::vector<std::unique_ptr<Declarable>> declarables_;
std::vector<std::string> cpp_includes_;
- std::map<std::string, const ClassType*> classes_;
+ std::map<std::string, ClassType*> classes_;
};
template <class T>
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 5b52a31cf3..e519f9ca7c 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -4,10 +4,12 @@
#include <algorithm>
+#include "src/globals.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
+#include "src/torque/server-data.h"
namespace v8 {
namespace internal {
@@ -74,6 +76,8 @@ void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
if (nspace != GlobalContext::GetDefaultNamespace()) {
header << "#include \"src/code-stub-assembler.h\"\n";
}
+ header << "#include \"src/utils.h\"\n";
+ header << "#include \"torque-generated/class-definitions-from-dsl.h\"\n";
header << "\n";
header << "namespace v8 {\n"
@@ -103,8 +107,8 @@ void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
header << " private:\n"
<< " compiler::CodeAssemblerState* const state_;\n"
- << " compiler::CodeAssembler ca_;"
- << "}; \n\n";
+ << " compiler::CodeAssembler ca_;\n"
+ << "};\n\n";
header << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
@@ -113,7 +117,7 @@ void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(), {}};
- const std::string& name = decl->name();
+ const std::string& name = decl->name()->value;
BindingsManagersScope bindings_managers_scope;
@@ -147,6 +151,32 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
void ImplementationVisitor::Visit(TypeAlias* alias) {
if (alias->IsRedeclaration()) return;
+ const ClassType* class_type = ClassType::DynamicCast(alias->type());
+ if (class_type && class_type->IsExtern()) {
+ // Classes that are in the default namespace are defined in the C++
+ // world and all of their fields and methods are declared explicitly.
+ // Internal classes (e.g. ones used for testing that are not in the default
+ // name space) need to be defined by Torque.
+ // TODO(danno): This is a pretty cheesy hack for now. There should be a more
+ // robust mechanism for this, e.g. declaring classes 'extern' or something.
+ if (class_type->nspace()->IsTestNamespace()) {
+ std::string class_name{
+ class_type->GetSuperClass()->GetGeneratedTNodeTypeName()};
+ header_out() << " class " << class_type->name() << " : public "
+ << class_name << " {\n";
+ header_out() << " public:\n";
+ header_out() << " DEFINE_FIELD_OFFSET_CONSTANTS(" << class_name
+ << "::kSize, TORQUE_GENERATED_"
+ << CapifyStringWithUnderscores(class_type->name())
+ << "_FIELDS)\n";
+ header_out() << " };\n";
+ } else if (!class_type->nspace()->IsDefaultNamespace()) {
+ ReportError(
+ "extern classes are currently only supported in the default and test "
+ "namespaces");
+ }
+ return;
+ }
const StructType* struct_type = StructType::DynamicCast(alias->type());
if (!struct_type) return;
const std::string& name = struct_type->name();
@@ -195,10 +225,6 @@ VisitResult ImplementationVisitor::InlineMacro(
const Type* return_type = macro->signature().return_type;
bool can_return = return_type != TypeOracle::GetNeverType();
- CurrentConstructorInfo::Scope current_constructor;
- if (macro->IsConstructor())
- CurrentConstructorInfo::Get() = ConstructorInfo{0};
-
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
DCHECK_EQ(macro->signature().parameter_names.size(),
@@ -222,7 +248,7 @@ VisitResult ImplementationVisitor::InlineMacro(
size_t i = 0;
for (auto arg : arguments) {
if (this_reference && i == signature.implicit_count) i++;
- const std::string& name = macro->parameter_names()[i++];
+ const Identifier* name = macro->parameter_names()[i++];
parameter_bindings.Add(name, LocalValue{true, arg});
}
@@ -295,11 +321,6 @@ VisitResult ImplementationVisitor::InlineMacro(
}
void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
- // Do not generate code for inlined macros.
- if (macro->ShouldBeInlined()) {
- return;
- }
-
CurrentCallable::Scope current_callable(macro);
const Signature& signature = macro->signature();
const Type* return_type = macro->signature().return_type;
@@ -307,10 +328,6 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
- // Struct methods should never generate code, they should always be inlined
- DCHECK(!macro->IsMethod() ||
- Method::cast(macro)->aggregate_type()->IsClassType());
-
header_out() << " ";
GenerateMacroFunctionDeclaration(header_out(), "", macro);
header_out() << ";\n";
@@ -327,19 +344,28 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
base::Optional<LocationReference> this_reference;
if (Method* method = Method::DynamicCast(macro)) {
const Type* this_type = method->aggregate_type();
- DCHECK(this_type->IsClassType());
- lowered_parameter_types.Push(this_type);
- lowered_parameters.Push(ExternalParameterName(kThisParameterName));
- VisitResult this_result =
- VisitResult(this_type, lowered_parameters.TopRange(1));
- // Mark the this as a temporary to prevent assignment to it.
+ LowerParameter(this_type, ExternalParameterName(kThisParameterName),
+ &lowered_parameters);
+ StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
+ VisitResult this_result = VisitResult(this_type, range);
+ // For classes, mark 'this' as a temporary to prevent assignment to it.
+ // Note that using a VariableAccess for non-class types is technically
+ // incorrect because changes to the 'this' variable do not get reflected
+ // to the caller. Therefore struct methods should always be inlined and a
+ // C++ version should never be generated, since it would be incorrect.
+ // However, in order to be able to type- and semantics-check even unused
+ // struct methods, set the this_reference to be the local variable copy of
+ // the passed-in this, which allows the visitor to at least find and report
+ // errors.
this_reference =
- LocationReference::Temporary(this_result, "this parameter");
+ (this_type->IsClassType())
+ ? LocationReference::Temporary(this_result, "this parameter")
+ : LocationReference::VariableAccess(this_result);
}
for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
if (this_reference && i == macro->signature().implicit_count) continue;
- const std::string& name = macro->parameter_names()[i];
+ const std::string& name = macro->parameter_names()[i]->value;
std::string external_name = ExternalParameterName(name);
const Type* type = macro->signature().types()[i];
@@ -378,8 +404,9 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
assembler().Bind(label_block);
std::vector<std::string> label_parameter_variables;
for (size_t i = 0; i < label_info.types.size(); ++i) {
- label_parameter_variables.push_back(
- ExternalLabelParameterName(label_info.name, i));
+ LowerLabelParameter(label_info.types[i],
+ ExternalLabelParameterName(label_info.name, i),
+ &label_parameter_variables);
}
assembler().Emit(GotoExternalInstruction{ExternalLabelName(label_info.name),
label_parameter_variables});
@@ -419,7 +446,7 @@ std::string AddParameter(size_t i, Builtin* builtin,
Stack<std::string>* parameters,
Stack<const Type*>* parameter_types,
BlockBindings<LocalValue>* parameter_bindings) {
- const std::string& name = builtin->signature().parameter_names[i];
+ const Identifier* name = builtin->signature().parameter_names[i];
const Type* type = builtin->signature().types()[i];
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
@@ -482,7 +509,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
if (i < first) continue;
- const std::string& parameter_name = signature.parameter_names[i];
+ const std::string& parameter_name = signature.parameter_names[i]->value;
const Type* type = signature.types()[i];
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings);
@@ -512,11 +539,6 @@ const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
const Type* ImplementationVisitor::Visit(
VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
- if (!stmt->const_qualified && !stmt->type) {
- ReportError(
- "variable declaration is missing type. Only 'const' bindings can "
- "infer the type.");
- }
// const qualified variables are required to be initialized properly.
if (stmt->const_qualified && !stmt->initializer) {
ReportError("local constant \"", stmt->name, "\" is not initialized.");
@@ -546,7 +568,7 @@ const Type* ImplementationVisitor::Visit(
TypeVector lowered_types = LowerType(*type);
for (const Type* type : lowered_types) {
assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
- "unitialized variable '" + stmt->name + "' of type " +
+ "unitialized variable '" + stmt->name->value + "' of type " +
type->ToString() + " originally defined at " +
PositionAsString(stmt->pos),
type)});
@@ -1096,7 +1118,7 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
element_result = element_scope.Yield(result);
}
Binding<LocalValue> element_var_binding{&ValueBindingsManager::Get(),
- stmt->var_declaration->name,
+ stmt->var_declaration->name->value,
LocalValue{true, element_result}};
Visit(stmt->body);
}
@@ -1219,6 +1241,61 @@ VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}
+InitializerResults ImplementationVisitor::VisitInitializerResults(
+ const std::vector<Expression*>& expressions) {
+ InitializerResults result;
+ for (auto e : expressions) {
+ result.results.push_back(Visit(e));
+ }
+ return result;
+}
+
+size_t ImplementationVisitor::InitializeAggregateHelper(
+ const AggregateType* aggregate_type, VisitResult allocate_result,
+ const InitializerResults& initializer_results) {
+ const ClassType* current_class = ClassType::DynamicCast(aggregate_type);
+ size_t current = 0;
+ if (current_class) {
+ const ClassType* super = current_class->GetSuperClass();
+ if (super) {
+ current = InitializeAggregateHelper(super, allocate_result,
+ initializer_results);
+ }
+ }
+
+ for (auto f : aggregate_type->fields()) {
+ if (current == initializer_results.results.size()) {
+ ReportError("insufficient number of initializers for ",
+ aggregate_type->name());
+ }
+ VisitResult current_value = initializer_results.results[current];
+ if (aggregate_type->IsClassType()) {
+ allocate_result.SetType(aggregate_type);
+ GenerateCopy(allocate_result);
+ GenerateImplicitConvert(f.name_and_type.type, current_value);
+ assembler().Emit(StoreObjectFieldInstruction(
+ ClassType::cast(aggregate_type), f.name_and_type.name));
+ } else {
+ LocationReference struct_field_ref = LocationReference::VariableAccess(
+ ProjectStructField(allocate_result, f.name_and_type.name));
+ GenerateAssignToLocation(struct_field_ref, current_value);
+ }
+ ++current;
+ }
+ return current;
+}
+
+void ImplementationVisitor::InitializeAggregate(
+ const AggregateType* aggregate_type, VisitResult allocate_result,
+ const InitializerResults& initializer_results) {
+ size_t consumed_initializers = InitializeAggregateHelper(
+ aggregate_type, allocate_result, initializer_results);
+ if (consumed_initializers != initializer_results.results.size()) {
+ ReportError("more initializers than fields present in ",
+ aggregate_type->name());
+ }
+}
+
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
StackScope stack_scope(this);
const Type* type = Declarations::GetType(expr->type);
@@ -1228,68 +1305,44 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
"\" is not");
}
- // In order to ensure "atomicity" of object allocation, a class' constructors
- // operate on a per-class internal struct rather than the class directly until
- // the constructor has successfully completed and all class members are
- // available. Create the appropriate unitialized struct and pass it to the
- // matching class constructor with the arguments that were passed to new{}
- StructType* class_this_struct = class_type->struct_type();
- VisitResult unitialized_struct = TemporaryUninitializedStruct(
- class_this_struct,
- "it's not set in the constructor for class " + class_type->name());
- Arguments constructor_arguments;
- for (auto p : expr->parameters) {
- constructor_arguments.parameters.push_back(Visit(p));
- }
- LocationReference unitialized_struct_ref =
- LocationReference::VariableAccess(unitialized_struct);
- Callable* callable =
- LookupConstructor(unitialized_struct_ref, constructor_arguments, {});
- GenerateCall(callable, unitialized_struct_ref, constructor_arguments,
- {class_type}, false);
- VisitResult new_struct_result = unitialized_struct;
+ if (!class_type->AllowInstantiation()) {
+ // Classes that are only used for testing should never be instantiated.
+ ReportError(*class_type,
+ " cannot be allocated with new (it's used for testing)");
+ }
+
+ InitializerResults initializer_results =
+ VisitInitializerResults(expr->parameters);
// Output the code to generate an unitialized object of the class size in the
// GC heap.
- Arguments allocate_arguments;
- allocate_arguments.parameters.push_back(VisitResult(
- TypeOracle::GetConstInt31Type(), std::to_string(class_type->size())));
- VisitResult allocate_result =
- GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
- DCHECK(allocate_result.IsOnStack());
-
- // Fill in the fields of the newly allocated class by copying the values
- // from the struct that was built by the constructor. So that the generaeted
- // code is a bit more readable, assign the values from the first class
- // member to the last, in order. To do this, first build a list of fields
- // to assign to in reverse order by visiting the class heirarchy.
- std::vector<std::pair<const Field*, VisitResult>> store_pairs;
- const ClassType* current_class = class_type;
- while (current_class != nullptr) {
- auto& fields = current_class->fields();
- for (auto i = fields.rbegin(); i != fields.rend(); ++i) {
- store_pairs.push_back(std::make_pair(
- &*i, ProjectStructField(new_struct_result, i->name_and_type.name)));
- }
- current_class = current_class->GetSuperClass();
- if (current_class) {
- new_struct_result = ProjectStructField(new_struct_result,
- kConstructorStructSuperFieldName);
- }
- }
-
- // Now that the reversed list of fields and the assignment VisitResults are
- // available, emit the copies in reverse order of the reversed list to
- // produce the class field assignments in the expected order.
- for (auto i = store_pairs.rbegin(); i != store_pairs.rend(); ++i) {
- assembler().Emit(
- PeekInstruction(allocate_result.stack_range().begin(), class_type));
- assembler().Emit(PeekInstruction(i->second.stack_range().begin(),
- i->first->name_and_type.type));
- assembler().Emit(
- StoreObjectFieldInstruction(class_type, i->first->name_and_type.name));
+ VisitResult allocate_result;
+ if (class_type->IsExtern()) {
+ if (initializer_results.results.size() == 0) {
+ ReportError(
+ "external classes initializers must have a map as first parameter");
+ }
+ VisitResult object_map = initializer_results.results[0];
+ Arguments size_arguments;
+ size_arguments.parameters.push_back(object_map);
+ VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
+ size_arguments, {class_type}, false);
+ Arguments allocate_arguments;
+ allocate_arguments.parameters.push_back(object_size);
+ allocate_result =
+ GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
+ DCHECK(allocate_result.IsOnStack());
+ } else {
+ Arguments allocate_arguments;
+ allocate_arguments.parameters.push_back(
+ VisitResult(TypeOracle::GetConstexprIntPtrType(),
+ std::to_string(class_type->size() / kTaggedSize)));
+ allocate_result = GenerateCall("%AllocateInternalClass", allocate_arguments,
+ {class_type}, false);
}
+ InitializeAggregate(class_type, allocate_result, initializer_results);
+
return stack_scope.Yield(allocate_result);
}
@@ -1400,14 +1453,14 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
DCHECK_EQ(signature.types().size(), parameter_names.size());
auto type_iterator = signature.types().begin();
bool first = true;
- for (const std::string& name : parameter_names) {
+ for (const Identifier* name : parameter_names) {
if (!first) {
o << ", ";
}
const Type* parameter_type = *type_iterator;
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
- o << generated_type_name << " " << ExternalParameterName(name);
+ o << generated_type_name << " " << ExternalParameterName(name->value);
type_iterator++;
first = false;
}
@@ -1419,9 +1472,14 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
o << "compiler::CodeAssemblerLabel* " << ExternalLabelName(label_info.name);
size_t i = 0;
for (const Type* type : label_info.types) {
- std::string generated_type_name("compiler::TypedCodeAssemblerVariable<");
- generated_type_name += type->GetGeneratedTNodeTypeName();
- generated_type_name += ">*";
+ std::string generated_type_name;
+ if (type->IsStructType()) {
+ generated_type_name = "\n#error no structs allowed in labels\n";
+ } else {
+ generated_type_name = "compiler::TypedCodeAssemblerVariable<";
+ generated_type_name += type->GetGeneratedTNodeTypeName();
+ generated_type_name += ">*";
+ }
o << ", ";
o << generated_type_name << " "
<< ExternalLabelParameterName(label_info.name, i);
@@ -1522,16 +1580,9 @@ Callable* ImplementationVisitor::LookupCallable(
const Signature& signature = overload_signatures[i];
bool try_bool_context = labels.size() == 0 &&
signature.return_type == TypeOracle::GetNeverType();
- base::Optional<Binding<LocalLabel>*> true_label;
- base::Optional<Binding<LocalLabel>*> false_label;
- if (try_bool_context) {
- true_label = TryLookupLabel(kTrueLabelName);
- false_label = TryLookupLabel(kFalseLabelName);
- }
- if (IsCompatibleSignature(signature, parameter_types, labels) ||
- (true_label && false_label &&
- IsCompatibleSignature(signature, parameter_types,
- {*true_label, *false_label}))) {
+ if (IsCompatibleSignature(signature, parameter_types, labels.size()) ||
+ (try_bool_context &&
+ IsCompatibleSignature(signature, parameter_types, 2))) {
candidates.push_back(i);
}
}
@@ -1642,20 +1693,18 @@ VisitResult ImplementationVisitor::Visit(StructExpression* decl) {
s << decl->name << " is not a struct but used like one ";
ReportError(s.str());
}
+
+ InitializerResults initialization_results =
+ ImplementationVisitor::VisitInitializerResults(decl->expressions);
+
const StructType* struct_type = StructType::cast(raw_type);
// Push unitialized 'this'
- VisitResult uninitialized_struct = TemporaryUninitializedStruct(
- struct_type,
- "it's not set in the constructor for struct " + struct_type->name());
- Arguments constructor_arguments;
- for (auto p : decl->expressions) {
- constructor_arguments.parameters.push_back(Visit(p));
- }
- LocationReference this_ref =
- LocationReference::VariableAccess(uninitialized_struct);
- Callable* callable = LookupConstructor(this_ref, constructor_arguments, {});
- GenerateCall(callable, this_ref, constructor_arguments, {}, false);
- return stack_scope.Yield(uninitialized_struct);
+ VisitResult result = TemporaryUninitializedStruct(
+ struct_type, "it's not initialized in the struct " + struct_type->name());
+
+ InitializeAggregate(struct_type, result, initialization_results);
+
+ return stack_scope.Yield(result);
}
LocationReference ImplementationVisitor::GetLocationReference(
@@ -1687,29 +1736,48 @@ LocationReference ImplementationVisitor::GetLocationReference(
ProjectStructField(reference.temporary(), expr->field),
reference.temporary_description());
}
- return LocationReference::FieldAccess(GenerateFetchFromLocation(reference),
- expr->field);
+ VisitResult object_result = GenerateFetchFromLocation(reference);
+ if (const ClassType* class_type =
+ ClassType::DynamicCast(object_result.type())) {
+ if (class_type->HasField(expr->field)) {
+ const Field& field = (class_type->LookupField(expr->field));
+ if (field.index) {
+ return LocationReference::IndexedFieldAccess(object_result,
+ expr->field);
+ }
+ }
+ }
+ return LocationReference::FieldAccess(object_result, expr->field);
}
LocationReference ImplementationVisitor::GetLocationReference(
ElementAccessExpression* expr) {
- VisitResult array = Visit(expr->array);
+ LocationReference reference = GetLocationReference(expr->array);
VisitResult index = Visit(expr->index);
- return LocationReference::ArrayAccess(array, index);
+ if (reference.IsIndexedFieldAccess()) {
+ return LocationReference::IndexedFieldIndexedAccess(reference, index);
+ } else {
+ return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
+ index);
+ }
}
LocationReference ImplementationVisitor::GetLocationReference(
IdentifierExpression* expr) {
if (expr->namespace_qualification.empty()) {
if (base::Optional<Binding<LocalValue>*> value =
- TryLookupLocalValue(expr->name)) {
+ TryLookupLocalValue(expr->name->value)) {
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(expr->name->pos,
+ (*value)->declaration_position());
+ }
if (expr->generic_arguments.size() != 0) {
ReportError("cannot have generic parameters on local name ",
expr->name);
}
if ((*value)->is_const) {
- return LocationReference::Temporary((*value)->value,
- "constant value " + expr->name);
+ return LocationReference::Temporary(
+ (*value)->value, "constant value " + expr->name->value);
}
return LocationReference::VariableAccess((*value)->value);
}
@@ -1718,10 +1786,14 @@ LocationReference ImplementationVisitor::GetLocationReference(
if (expr->IsThis()) {
ReportError("\"this\" cannot be qualified");
}
- QualifiedName name = QualifiedName(expr->namespace_qualification, expr->name);
+ QualifiedName name =
+ QualifiedName(expr->namespace_qualification, expr->name->value);
if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(expr->name->pos, (*builtin)->pos());
+ }
return LocationReference::Temporary(GetBuiltinCode(*builtin),
- "builtin " + expr->name);
+ "builtin " + expr->name->value);
}
if (expr->generic_arguments.size() != 0) {
Generic* generic = Declarations::LookupUniqueGeneric(name);
@@ -1730,31 +1802,34 @@ LocationReference ImplementationVisitor::GetLocationReference(
if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
DCHECK(!builtin->IsExternal());
return LocationReference::Temporary(GetBuiltinCode(builtin),
- "builtin " + expr->name);
+ "builtin " + expr->name->value);
} else {
ReportError("cannot create function pointer for non-builtin ",
generic->name());
}
}
Value* value = Declarations::LookupValue(name);
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
+ }
if (auto* constant = NamespaceConstant::DynamicCast(value)) {
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
VisitResult(constant->type(), constant->ExternalAssemblerName() +
"(state_)." +
- constant->constant_name() + "()"),
- "namespace constant " + expr->name);
+ constant->name()->value + "()"),
+ "namespace constant " + expr->name->value);
}
assembler().Emit(NamespaceConstantInstruction{constant});
StackRange stack_range =
assembler().TopRange(LoweredSlotCount(constant->type()));
return LocationReference::Temporary(
VisitResult(constant->type(), stack_range),
- "namespace constant " + expr->name);
+ "namespace constant " + expr->name->value);
}
ExternConstant* constant = ExternConstant::cast(value);
return LocationReference::Temporary(constant->value(),
- "extern value " + expr->name);
+ "extern value " + expr->name->value);
}
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
@@ -1764,6 +1839,10 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
} else if (reference.IsVariableAccess()) {
return GenerateCopy(reference.variable());
} else {
+ if (reference.IsIndexedFieldAccess()) {
+ ReportError(
+ "fetching a value directly from an indexed field isn't allowed");
+ }
DCHECK(reference.IsCallAccess());
return GenerateCall(reference.eval_function(),
Arguments{reference.call_arguments(), {}});
@@ -1782,6 +1861,8 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateImplicitConvert(variable.type(), assignment_value);
assembler().Poke(variable.stack_range(), converted_value.stack_range(),
variable.type());
+ } else if (reference.IsIndexedFieldAccess()) {
+ ReportError("assigning a value directly to an indexed field isn't allowed");
} else {
DCHECK(reference.IsTemporary());
ReportError("cannot assign to temporary ",
@@ -1815,7 +1896,7 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
ParameterTypes types{type->parameter_types(), false};
Signature sig;
sig.parameter_types = types;
- if (!IsCompatibleSignature(sig, parameter_types, {})) {
+ if (!IsCompatibleSignature(sig, parameter_types, 0)) {
std::stringstream stream;
stream << "parameters do not match function pointer signature. Expected: ("
<< type->parameter_types() << ") but got: (" << parameter_types
@@ -1865,10 +1946,19 @@ VisitResult ImplementationVisitor::GenerateCall(
// return but have a True and False label
if (arguments.labels.size() == 0 &&
callable->signature().labels.size() == 2) {
- Binding<LocalLabel>* true_label = LookupLabel(kTrueLabelName);
- arguments.labels.push_back(true_label);
- Binding<LocalLabel>* false_label = LookupLabel(kFalseLabelName);
- arguments.labels.push_back(false_label);
+ base::Optional<Binding<LocalLabel>*> true_label =
+ TryLookupLabel(kTrueLabelName);
+ base::Optional<Binding<LocalLabel>*> false_label =
+ TryLookupLabel(kFalseLabelName);
+ if (!true_label || !false_label) {
+ ReportError(
+ callable->ReadableName(),
+ " does not return a value, but has to be called in a branching "
+ "context (e.g., conditional or if-condition). You can fix this by "
+ "adding \"? true : false\".");
+ }
+ arguments.labels.push_back(*true_label);
+ arguments.labels.push_back(*false_label);
}
const Type* return_type = callable->signature().return_type;
@@ -1879,7 +1969,8 @@ VisitResult ImplementationVisitor::GenerateCall(
size_t current = 0;
for (; current < callable->signature().implicit_count; ++current) {
- std::string implicit_name = callable->signature().parameter_names[current];
+ std::string implicit_name =
+ callable->signature().parameter_names[current]->value;
base::Optional<Binding<LocalValue>*> val =
TryLookupLocalValue(implicit_name);
if (!val) {
@@ -2083,8 +2174,8 @@ VisitResult ImplementationVisitor::GenerateCall(
result << ")";
return VisitResult(return_type, result.str());
} else {
- assembler().Emit(
- CallIntrinsicInstruction{intrinsic, constexpr_arguments});
+ assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
+ constexpr_arguments});
size_t return_slot_count =
LoweredSlotCount(intrinsic->signature().return_type);
return VisitResult(return_type, assembler().TopRange(return_slot_count));
@@ -2108,8 +2199,8 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
bool is_tailcall) {
StackScope scope(this);
Arguments arguments;
- QualifiedName name =
- QualifiedName(expr->callee->namespace_qualification, expr->callee->name);
+ QualifiedName name = QualifiedName(expr->callee->namespace_qualification,
+ expr->callee->name->value);
TypeVector specialization_types =
GetTypeVector(expr->callee->generic_arguments);
bool has_template_arguments = !specialization_types.empty();
@@ -2121,6 +2212,12 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
return scope.Yield(
GeneratePointerCall(expr->callee, arguments, is_tailcall));
} else {
+ if (GlobalContext::collect_language_server_data()) {
+ Callable* callable = LookupCallable(name, Declarations::Lookup(name),
+ arguments, specialization_types);
+ LanguageServerData::AddDefinition(expr->callee->name->pos,
+ callable->pos());
+ }
return scope.Yield(
GenerateCall(name, arguments, specialization_types, is_tailcall));
}
@@ -2129,7 +2226,7 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
StackScope scope(this);
Arguments arguments;
- std::string method_name = expr->method->name;
+ std::string method_name = expr->method->name->value;
TypeVector specialization_types =
GetTypeVector(expr->method->generic_arguments);
LocationReference target = GetLocationReference(expr->target);
@@ -2142,33 +2239,6 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
if (!target_type) {
ReportError("target of method call not a struct or class type");
}
- if (method_name == kConstructMethodName || method_name == kSuperMethodName) {
- if (CurrentConstructorInfo::Get()) {
- ConstructorInfo& info = *CurrentConstructorInfo::Get();
- if (method_name == kSuperMethodName) {
- if (info.super_calls != 0) {
- ReportError("\"super\" can only be called once from a constructor");
- }
- ++info.super_calls;
- DCHECK(target_type->IsStructType());
- base::Optional<const ClassType*> derived_from =
- StructType::cast(target_type)->GetDerivedFrom();
- if (!derived_from) {
- ReportError("\"super\" can only be called from class constructors");
- }
- if ((*derived_from)->GetSuperClass() == nullptr) {
- ReportError(
- "\"super\" can only be called in constructors for derived "
- "classes");
- }
- } else {
- ReportError("cannot call a constructor from a constructor");
- }
- } else {
- ReportError(
- "cannot call a constructor or \"super\" from a non-constructor");
- }
- }
for (Expression* arg : expr->arguments) {
arguments.parameters.push_back(Visit(arg));
}
@@ -2177,19 +2247,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
QualifiedName qualified_name = QualifiedName(method_name);
Callable* callable = nullptr;
- if (method_name == kConstructMethodName) {
- callable = LookupConstructor(target, arguments, {});
- } else if (method_name == kSuperMethodName) {
- LocationReference super_this =
- LocationReference::VariableAccess(ProjectStructField(
- target.GetVisitResult(), kConstructorStructSuperFieldName));
- callable = LookupConstructor(super_this, arguments, {});
- VisitResult super_result =
- GenerateCall(callable, super_this, arguments, {}, false);
- return scope.Yield(super_result);
- } else {
- callable = LookupMethod(method_name, target, arguments, {});
- }
+ callable = LookupMethod(method_name, target, arguments, {});
return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}
@@ -2322,6 +2380,21 @@ StackRange ImplementationVisitor::LowerParameter(
}
}
+void ImplementationVisitor::LowerLabelParameter(
+ const Type* type, const std::string& parameter_name,
+ std::vector<std::string>* lowered_parameters) {
+ if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ for (auto& field : struct_type->fields()) {
+ LowerLabelParameter(
+ field.name_and_type.type,
+ "&((*" + parameter_name + ")." + field.name_and_type.name + ")",
+ lowered_parameters);
+ }
+ } else {
+ lowered_parameters->push_back(parameter_name);
+ }
+}
+
std::string ImplementationVisitor::ExternalLabelName(
const std::string& label_name) {
return "label_" + label_name;
@@ -2337,22 +2410,17 @@ std::string ImplementationVisitor::ExternalParameterName(
return std::string("p_") + name;
}
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager);
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager);
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable);
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue);
-DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentConstructorInfo);
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
- const std::vector<Binding<LocalLabel>*>& labels) {
+ size_t label_count) {
auto i = sig.parameter_types.types.begin() + sig.implicit_count;
if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
return false;
- // TODO(danno): The test below is actually insufficient. The labels'
- // parameters must be checked too. ideally, the named part of
- // LabelDeclarationVector would be factored out so that the label count and
- // parameter types could be passed separately.
- if (sig.labels.size() != labels.size()) return false;
+ if (sig.labels.size() != label_count) return false;
for (auto current : types) {
if (i == sig.parameter_types.types.end()) {
if (!sig.parameter_types.var_args) return false;
@@ -2389,6 +2457,7 @@ void ImplementationVisitor::GenerateCatchBlock(
}
void ImplementationVisitor::VisitAllDeclarables() {
+ CurrentCallable::Scope current_callable(nullptr);
const std::vector<std::unique_ptr<Declarable>>& all_declarables =
GlobalContext::AllDeclarables();
// This has to be an index-based loop because all_declarables can be extended
@@ -2399,7 +2468,6 @@ void ImplementationVisitor::VisitAllDeclarables() {
}
void ImplementationVisitor::Visit(Declarable* declarable) {
- CurrentConstructorInfo::Scope current_constructor(base::nullopt);
CurrentScope::Scope current_scope(declarable->ParentScope());
CurrentSourcePosition::Scope current_source_position(declarable->pos());
switch (declarable->kind()) {
@@ -2458,7 +2526,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
int index = 0;
for (const auto& parameter : builtin->parameter_names()) {
if (index >= firstParameterIndex) {
- new_contents_stream << ", k" << CamelifyString(parameter);
+ new_contents_stream << ", k" << CamelifyString(parameter->value);
}
index++;
}
@@ -2474,7 +2542,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
Declarations::FindSomeInternalBuiltinWithType(type);
if (!example_builtin) {
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), -1, -1});
+ SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
ReportError("unable to find any builtin with type \"", *type, "\"");
}
new_contents_stream << " V(" << type->function_pointer_type_id() << ","
@@ -2489,6 +2557,79 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
ReplaceFileContentsIfDifferent(file_name, new_contents);
}
+namespace {
+
+enum class FieldSectionType {
+ kNoSection = 0,
+ kWeakSection,
+ kStrongSection,
+ kScalarSection
+};
+
+void PossiblyStartTagged(FieldSectionType* section,
+ std::set<FieldSectionType>* completed_sections,
+ std::stringstream* o) {
+ if (completed_sections->count(FieldSectionType::kWeakSection) == 0 &&
+ completed_sections->count(FieldSectionType::kStrongSection) == 0 &&
+ *section != FieldSectionType::kWeakSection &&
+ *section != FieldSectionType::kStrongSection) {
+ *o << "V(kStartOfPointerFieldsOffset, 0) \\\n";
+ }
+}
+
+void PossiblyEndTagged(FieldSectionType* section,
+ std::set<FieldSectionType>* completed_sections,
+ std::stringstream* o) {
+ if (completed_sections->count(FieldSectionType::kWeakSection) != 0 &&
+ completed_sections->count(FieldSectionType::kStrongSection) != 0) {
+ *o << "V(kEndOfTaggedFieldsOffset, 0) \\\n";
+ }
+}
+
+void ProcessFieldInSection(FieldSectionType* section,
+ std::set<FieldSectionType>* completed_sections,
+ FieldSectionType field_section,
+ std::stringstream* o) {
+ if (*section != FieldSectionType::kNoSection) {
+ if (*section != field_section) {
+ if (completed_sections->count(field_section) != 0) {
+ ReportError("reopening of weak, strong or scalar field section");
+ }
+ completed_sections->insert(*section);
+ if (*section == FieldSectionType::kWeakSection) {
+ *o << "V(kEndOfWeakFieldsOffset, 0) \\\n";
+ PossiblyEndTagged(section, completed_sections, o);
+ } else if (*section == FieldSectionType::kStrongSection) {
+ *o << "V(kEndOfStrongFieldsOffset, 0) \\\n";
+ PossiblyEndTagged(section, completed_sections, o);
+ }
+ }
+ }
+ if (*section != field_section) {
+ if (field_section == FieldSectionType::kWeakSection) {
+ PossiblyStartTagged(section, completed_sections, o);
+ *o << "V(kStartOfWeakFieldsOffset, 0) \\\n";
+ } else if (field_section == FieldSectionType::kStrongSection) {
+ PossiblyStartTagged(section, completed_sections, o);
+ *o << "V(kStartOfStrongFieldsOffset, 0) \\\n";
+ }
+ }
+ *section = field_section;
+}
+
+void CompleteFieldSection(FieldSectionType* section,
+ std::set<FieldSectionType>* completed_sections,
+ FieldSectionType field_section,
+ std::stringstream* o) {
+ if (completed_sections->count(field_section) == 0) {
+ ProcessFieldInSection(section, completed_sections, field_section, o);
+ ProcessFieldInSection(section, completed_sections,
+ FieldSectionType::kNoSection, o);
+ }
+}
+
+} // namespace
+
void ImplementationVisitor::GenerateClassDefinitions(std::string& file_name) {
std::stringstream new_contents_stream;
new_contents_stream << "#ifndef V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
@@ -2496,32 +2637,55 @@ void ImplementationVisitor::GenerateClassDefinitions(std::string& file_name) {
"\n\n";
for (auto i : GlobalContext::GetClasses()) {
+ ClassType* type = i.second;
+ if (!type->IsExtern()) continue;
+
// TODO(danno): Ideally (and we've got several core V8 dev's feedback
// supporting this), Torque should generate the constants for the offsets
// directly and not go through the existing layer of macros, which actually
// currently just serves to additionally obfuscate where these values come
// from.
new_contents_stream << "#define ";
- new_contents_stream << CapifyStringWithUnderscores(i.first)
+ new_contents_stream << "TORQUE_GENERATED_"
+ << CapifyStringWithUnderscores(i.first)
<< "_FIELDS(V) \\\n";
- const ClassType* type = i.second;
std::vector<Field> fields = type->fields();
- new_contents_stream << "V(kStartOfStrongFieldsOffset, 0) \\\n";
- for (auto f : fields) {
- if (!f.is_weak) {
- new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
- << "Offset, kTaggedSize) \\\n";
- }
- }
- new_contents_stream << "V(kEndOfStrongFieldsOffset, 0) \\\n";
- new_contents_stream << "V(kStartOfWeakFieldsOffset, 0) \\\n";
+ FieldSectionType section = FieldSectionType::kNoSection;
+ std::set<FieldSectionType> completed_sections;
for (auto f : fields) {
- if (f.is_weak) {
- new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
- << "Offset, kTaggedSize) \\\n";
+ CurrentSourcePosition::Scope scope(f.pos);
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (f.is_weak) {
+ ProcessFieldInSection(&section, &completed_sections,
+ FieldSectionType::kWeakSection,
+ &new_contents_stream);
+ } else {
+ ProcessFieldInSection(&section, &completed_sections,
+ FieldSectionType::kStrongSection,
+ &new_contents_stream);
+ }
+ } else {
+ ProcessFieldInSection(&section, &completed_sections,
+ FieldSectionType::kScalarSection,
+ &new_contents_stream);
}
- }
- new_contents_stream << "V(kEndOfWeakFieldsOffset, 0) \\\n";
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string, machine_type) =
+ f.GetFieldSizeInformation();
+ new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
+ << "Offset, " << size_string << ") \\\n";
+ }
+
+ ProcessFieldInSection(&section, &completed_sections,
+ FieldSectionType::kNoSection, &new_contents_stream);
+ CompleteFieldSection(&section, &completed_sections,
+ FieldSectionType::kWeakSection, &new_contents_stream);
+ CompleteFieldSection(&section, &completed_sections,
+ FieldSectionType::kStrongSection,
+ &new_contents_stream);
+
new_contents_stream << "V(kSize, 0) \\\n";
new_contents_stream << "\n";
}
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 1cbccf3142..8926ec3f17 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -54,6 +54,26 @@ class LocationReference {
result.eval_function_ = "." + fieldname;
result.assign_function_ = "." + fieldname + "=";
result.call_arguments_ = {object};
+ result.index_field_ = base::nullopt;
+ return result;
+ }
+ static LocationReference IndexedFieldIndexedAccess(
+ const LocationReference& indexed_field, VisitResult index) {
+ LocationReference result;
+ DCHECK(indexed_field.IsIndexedFieldAccess());
+ std::string fieldname = *indexed_field.index_field_;
+ result.eval_function_ = "." + fieldname + "[]";
+ result.assign_function_ = "." + fieldname + "[]=";
+ result.call_arguments_ = indexed_field.call_arguments_;
+ result.call_arguments_.push_back(index);
+ result.index_field_ = fieldname;
+ return result;
+ }
+ static LocationReference IndexedFieldAccess(VisitResult object,
+ std::string fieldname) {
+ LocationReference result;
+ result.call_arguments_ = {object};
+ result.index_field_ = fieldname;
return result;
}
@@ -82,6 +102,13 @@ class LocationReference {
return *temporary_description_;
}
+ bool IsArrayField() const { return index_field_.has_value(); }
+ bool IsIndexedFieldAccess() const {
+ return IsArrayField() && !IsCallAccess();
+ }
+ bool IsIndexedFieldIndexedAccess() const {
+ return IsArrayField() && IsCallAccess();
+ }
bool IsCallAccess() const {
bool is_call_access = eval_function_.has_value();
DCHECK_EQ(is_call_access, assign_function_.has_value());
@@ -107,10 +134,15 @@ class LocationReference {
base::Optional<std::string> eval_function_;
base::Optional<std::string> assign_function_;
VisitResultVector call_arguments_;
+ base::Optional<std::string> index_field_;
LocationReference() = default;
};
+struct InitializerResults {
+ std::vector<VisitResult> results;
+};
+
template <class T>
class Binding;
@@ -138,6 +170,11 @@ class Binding : public T {
previous_binding_(this) {
std::swap(previous_binding_, manager_->current_bindings_[name]);
}
+ template <class... Args>
+ Binding(BindingsManager<T>* manager, const Identifier* name, Args&&... args)
+ : Binding(manager, name->value, std::forward<Args>(args)...) {
+ declaration_position_ = name->pos;
+ }
~Binding() { manager_->current_bindings_[name_] = previous_binding_; }
const std::string& name() const { return name_; }
@@ -156,18 +193,17 @@ class BlockBindings {
public:
explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
void Add(std::string name, T value) {
- for (const auto& binding : bindings_) {
- if (binding->name() == name) {
- ReportError(
- "redeclaration of name \"", name,
- "\" in the same block is illegal, previous declaration at: ",
- binding->declaration_position());
- }
- }
+ ReportErrorIfAlreadyBound(name);
bindings_.push_back(base::make_unique<Binding<T>>(manager_, std::move(name),
std::move(value)));
}
+ void Add(const Identifier* name, T value) {
+ ReportErrorIfAlreadyBound(name->value);
+ bindings_.push_back(
+ base::make_unique<Binding<T>>(manager_, name, std::move(value)));
+ }
+
std::vector<Binding<T>*> bindings() const {
std::vector<Binding<T>*> result;
result.reserve(bindings_.size());
@@ -178,6 +214,17 @@ class BlockBindings {
}
private:
+ void ReportErrorIfAlreadyBound(const std::string& name) {
+ for (const auto& binding : bindings_) {
+ if (binding->name() == name) {
+ ReportError(
+ "redeclaration of name \"", name,
+ "\" in the same block is illegal, previous declaration at: ",
+ binding->declaration_position());
+ }
+ }
+ }
+
BindingsManager<T>* manager_;
std::vector<std::unique_ptr<Binding<T>>> bindings_;
};
@@ -201,8 +248,9 @@ struct Arguments {
std::vector<Binding<LocalLabel>*> labels;
};
+// Determine if a callable should be considered as an overload.
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
- const std::vector<Binding<LocalLabel>*>& labels);
+ size_t label_count);
class ImplementationVisitor : public FileVisitor {
public:
@@ -212,6 +260,17 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
+ InitializerResults VisitInitializerResults(
+ const std::vector<Expression*>& expressions);
+
+ size_t InitializeAggregateHelper(
+ const AggregateType* aggregate_type, VisitResult allocate_result,
+ const InitializerResults& initializer_results);
+
+ void InitializeAggregate(const AggregateType* aggregate_type,
+ VisitResult allocate_result,
+ const InitializerResults& initializer_results);
+
VisitResult TemporaryUninitializedStruct(const StructType* struct_type,
const std::string& reason);
VisitResult Visit(StructExpression* decl);
@@ -251,8 +310,8 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(CallExpression* expr, bool is_tail = false);
VisitResult Visit(CallMethodExpression* expr);
VisitResult Visit(IntrinsicCallExpression* intrinsic);
- VisitResult Visit(LoadObjectFieldExpression* intrinsic);
- VisitResult Visit(StoreObjectFieldExpression* intrinsic);
+ VisitResult Visit(LoadObjectFieldExpression* expr);
+ VisitResult Visit(StoreObjectFieldExpression* expr);
const Type* Visit(TailCallStatement* stmt);
VisitResult Visit(ConditionalExpression* expr);
@@ -290,18 +349,12 @@ class ImplementationVisitor : public FileVisitor {
void GenerateImplementation(const std::string& dir, Namespace* nspace);
- struct ConstructorInfo {
- int super_calls;
- };
-
DECLARE_CONTEXTUAL_VARIABLE(ValueBindingsManager,
BindingsManager<LocalValue>);
DECLARE_CONTEXTUAL_VARIABLE(LabelBindingsManager,
BindingsManager<LocalLabel>);
DECLARE_CONTEXTUAL_VARIABLE(CurrentCallable, Callable*);
DECLARE_CONTEXTUAL_VARIABLE(CurrentReturnValue, base::Optional<VisitResult>);
- DECLARE_CONTEXTUAL_VARIABLE(CurrentConstructorInfo,
- base::Optional<ConstructorInfo>);
// A BindingsManagersScope has to be active for local bindings to be created.
// Shadowing an existing BindingsManagersScope by creating a new one hides all
@@ -413,13 +466,6 @@ class ImplementationVisitor : public FileVisitor {
const Arguments& arguments,
const TypeVector& specialization_types);
- Method* LookupConstructor(LocationReference target,
- const Arguments& arguments,
- const TypeVector& specialization_types) {
- return LookupMethod(kConstructMethodName, target, arguments,
- specialization_types);
- }
-
const Type* GetCommonType(const Type* left, const Type* right);
VisitResult GenerateCopy(const VisitResult& to_copy);
@@ -478,15 +524,30 @@ class ImplementationVisitor : public FileVisitor {
StackRange LowerParameter(const Type* type, const std::string& parameter_name,
Stack<std::string>* lowered_parameters);
+ void LowerLabelParameter(const Type* type, const std::string& parameter_name,
+ std::vector<std::string>* lowered_parameters);
+
std::string ExternalLabelName(const std::string& label_name);
std::string ExternalLabelParameterName(const std::string& label_name,
size_t i);
std::string ExternalParameterName(const std::string& name);
- std::ostream& source_out() { return CurrentNamespace()->source_stream(); }
-
- std::ostream& header_out() { return CurrentNamespace()->header_stream(); }
-
+ std::ostream& source_out() {
+ Callable* callable = CurrentCallable::Get();
+ if (!callable || callable->ShouldGenerateExternalCode()) {
+ return CurrentNamespace()->source_stream();
+ } else {
+ return null_stream_;
+ }
+ }
+ std::ostream& header_out() {
+ Callable* callable = CurrentCallable::Get();
+ if (!callable || callable->ShouldGenerateExternalCode()) {
+ return CurrentNamespace()->header_stream();
+ } else {
+ return null_stream_;
+ }
+ }
CfgAssembler& assembler() { return *assembler_; }
void SetReturnValue(VisitResult return_value) {
@@ -503,6 +564,7 @@ class ImplementationVisitor : public FileVisitor {
}
base::Optional<CfgAssembler> assembler_;
+ NullOStream null_stream_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 1bf38aaa94..544a841bf2 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -207,10 +207,7 @@ struct LoadObjectFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
LoadObjectFieldInstruction(const ClassType* class_type,
std::string field_name)
- : class_type(class_type) {
- // The normal way to write this triggers a bug in Clang on Windows.
- this->field_name = std::move(field_name);
- }
+ : class_type(class_type), field_name(std::move(field_name)) {}
const ClassType* class_type;
std::string field_name;
};
@@ -219,10 +216,7 @@ struct StoreObjectFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
StoreObjectFieldInstruction(const ClassType* class_type,
std::string field_name)
- : class_type(class_type) {
- // The normal way to write this triggers a bug in Clang on Windows.
- this->field_name = std::move(field_name);
- }
+ : class_type(class_type), field_name(std::move(field_name)) {}
const ClassType* class_type;
std::string field_name;
};
@@ -230,10 +224,14 @@ struct StoreObjectFieldInstruction : InstructionBase {
struct CallIntrinsicInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CallIntrinsicInstruction(Intrinsic* intrinsic,
+ TypeVector specialization_types,
std::vector<std::string> constexpr_arguments)
- : intrinsic(intrinsic), constexpr_arguments(constexpr_arguments) {}
+ : intrinsic(intrinsic),
+ specialization_types(std::move(specialization_types)),
+ constexpr_arguments(constexpr_arguments) {}
Intrinsic* intrinsic;
+ TypeVector specialization_types;
std::vector<std::string> constexpr_arguments;
};
@@ -395,10 +393,8 @@ struct ReturnInstruction : InstructionBase {
struct PrintConstantStringInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- explicit PrintConstantStringInstruction(std::string message) {
- // The normal way to write this triggers a bug in Clang on Windows.
- this->message = std::move(message);
- }
+ explicit PrintConstantStringInstruction(std::string message)
+ : message(std::move(message)) {}
std::string message;
};
@@ -407,10 +403,8 @@ struct AbortInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
enum class Kind { kDebugBreak, kUnreachable, kAssertionFailure };
bool IsBlockTerminator() const override { return kind != Kind::kDebugBreak; }
- explicit AbortInstruction(Kind kind, std::string message = "") : kind(kind) {
- // The normal way to write this triggers a bug in Clang on Windows.
- this->message = std::move(message);
- }
+ explicit AbortInstruction(Kind kind, std::string message = "")
+ : kind(kind), message(std::move(message)) {}
Kind kind;
std::string message;
diff --git a/deps/v8/src/torque/ls/globals.h b/deps/v8/src/torque/ls/globals.h
new file mode 100644
index 0000000000..df6589c146
--- /dev/null
+++ b/deps/v8/src/torque/ls/globals.h
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_GLOBALS_H_
+#define V8_TORQUE_LS_GLOBALS_H_
+
+#include <fstream>
+#include "src/torque/contextual.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+// When the language server is run by VS code, stdout can not be seen, as it is
+// used as the communication channel. For debugging purposes a simple
+// Log class is added, that allows writing diagnostics to a file configurable
+// via command line flag.
+class Logger : public ContextualClass<Logger> {
+ public:
+ Logger() : enabled_(false) {}
+ ~Logger() {
+ if (enabled_) logfile_.close();
+ }
+
+ static void Enable(std::string path) {
+ Get().enabled_ = true;
+ Get().logfile_.open(path);
+ }
+
+ template <class... Args>
+ static void Log(Args&&... args) {
+ if (Enabled()) {
+ USE((Stream() << std::forward<Args>(args))...);
+ Flush();
+ }
+ }
+
+ private:
+ static bool Enabled() { return Get().enabled_; }
+ static std::ofstream& Stream() {
+ CHECK(Get().enabled_);
+ return Get().logfile_;
+ }
+ static void Flush() { Get().logfile_.flush(); }
+
+ private:
+ bool enabled_;
+ std::ofstream logfile_;
+};
+
+DECLARE_CONTEXTUAL_VARIABLE(TorqueFileList, std::vector<std::string>);
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_GLOBALS_H_
diff --git a/deps/v8/src/torque/ls/json-parser.cc b/deps/v8/src/torque/ls/json-parser.cc
new file mode 100644
index 0000000000..0de66a9285
--- /dev/null
+++ b/deps/v8/src/torque/ls/json-parser.cc
@@ -0,0 +1,195 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/ls/json-parser.h"
+
+#include <cctype>
+#include "src/torque/earley-parser.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<ls::JsonValue>::id =
+ ParseResultTypeId::kJsonValue;
+
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::pair<std::string, ls::JsonValue>>::id =
+ ParseResultTypeId::kJsonMember;
+
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<ls::JsonValue>>::id =
+ ParseResultTypeId::kStdVectorOfJsonValue;
+
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<std::pair<std::string, ls::JsonValue>>>::id =
+ ParseResultTypeId::kStdVectorOfJsonMember;
+
+namespace ls {
+
+using JsonMember = std::pair<std::string, JsonValue>;
+
+template <bool value>
+base::Optional<ParseResult> MakeBoolLiteral(
+ ParseResultIterator* child_results) {
+ return ParseResult{JsonValue::From(value)};
+}
+
+base::Optional<ParseResult> MakeNullLiteral(
+ ParseResultIterator* child_results) {
+ JsonValue result;
+ result.tag = JsonValue::IS_NULL;
+ return ParseResult{std::move(result)};
+}
+
+base::Optional<ParseResult> MakeNumberLiteral(
+ ParseResultIterator* child_results) {
+ auto number = child_results->NextAs<std::string>();
+ double d = std::stod(number.c_str());
+ return ParseResult{JsonValue::From(d)};
+}
+
+base::Optional<ParseResult> MakeStringLiteral(
+ ParseResultIterator* child_results) {
+ std::string literal = child_results->NextAs<std::string>();
+ return ParseResult{JsonValue::From(StringLiteralUnquote(literal))};
+}
+
+base::Optional<ParseResult> MakeArray(ParseResultIterator* child_results) {
+ JsonArray array = child_results->NextAs<JsonArray>();
+ return ParseResult{JsonValue::From(std::move(array))};
+}
+
+base::Optional<ParseResult> MakeMember(ParseResultIterator* child_results) {
+ JsonMember result;
+ std::string key = child_results->NextAs<std::string>();
+ result.first = StringLiteralUnquote(key);
+ result.second = child_results->NextAs<JsonValue>();
+ return ParseResult{std::move(result)};
+}
+
+base::Optional<ParseResult> MakeObject(ParseResultIterator* child_results) {
+ using MemberList = std::vector<JsonMember>;
+ MemberList members = child_results->NextAs<MemberList>();
+
+ JsonObject object;
+ for (auto& member : members) object.insert(std::move(member));
+
+ return ParseResult{JsonValue::From(std::move(object))};
+}
+
+class JsonGrammar : public Grammar {
+ static bool MatchWhitespace(InputPosition* pos) {
+ while (MatchChar(std::isspace, pos)) {
+ }
+ return true;
+ }
+
+ static bool MatchStringLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ if (MatchString("\"", &current)) {
+ while (
+ (MatchString("\\", &current) && MatchAnyChar(&current)) ||
+ MatchChar([](char c) { return c != '"' && c != '\n'; }, &current)) {
+ }
+ if (MatchString("\"", &current)) {
+ *pos = current;
+ return true;
+ }
+ }
+ current = *pos;
+ if (MatchString("'", &current)) {
+ while (
+ (MatchString("\\", &current) && MatchAnyChar(&current)) ||
+ MatchChar([](char c) { return c != '\'' && c != '\n'; }, &current)) {
+ }
+ if (MatchString("'", &current)) {
+ *pos = current;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool MatchHexLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ MatchString("-", &current);
+ if (MatchString("0x", &current) && MatchChar(std::isxdigit, &current)) {
+ while (MatchChar(std::isxdigit, &current)) {
+ }
+ *pos = current;
+ return true;
+ }
+ return false;
+ }
+
+ static bool MatchDecimalLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ bool found_digit = false;
+ MatchString("-", &current);
+ while (MatchChar(std::isdigit, &current)) found_digit = true;
+ MatchString(".", &current);
+ while (MatchChar(std::isdigit, &current)) found_digit = true;
+ if (!found_digit) return false;
+ *pos = current;
+ if ((MatchString("e", &current) || MatchString("E", &current)) &&
+ (MatchString("+", &current) || MatchString("-", &current) || true) &&
+ MatchChar(std::isdigit, &current)) {
+ while (MatchChar(std::isdigit, &current)) {
+ }
+ *pos = current;
+ return true;
+ }
+ return true;
+ }
+
+ public:
+ JsonGrammar() : Grammar(&file) { SetWhitespace(MatchWhitespace); }
+
+ Symbol trueLiteral = {Rule({Token("true")})};
+ Symbol falseLiteral = {Rule({Token("false")})};
+ Symbol nullLiteral = {Rule({Token("null")})};
+
+ Symbol decimalLiteral = {
+ Rule({Pattern(MatchDecimalLiteral)}, YieldMatchedInput),
+ Rule({Pattern(MatchHexLiteral)}, YieldMatchedInput)};
+
+ Symbol stringLiteral = {
+ Rule({Pattern(MatchStringLiteral)}, YieldMatchedInput)};
+
+ Symbol* elementList = List<JsonValue>(&value, Token(","));
+ Symbol array = {Rule({Token("["), elementList, Token("]")})};
+
+ Symbol member = {Rule({&stringLiteral, Token(":"), &value}, MakeMember)};
+ Symbol* memberList = List<JsonMember>(&member, Token(","));
+ Symbol object = {Rule({Token("{"), memberList, Token("}")})};
+
+ Symbol value = {Rule({&trueLiteral}, MakeBoolLiteral<true>),
+ Rule({&falseLiteral}, MakeBoolLiteral<false>),
+ Rule({&nullLiteral}, MakeNullLiteral),
+ Rule({&decimalLiteral}, MakeNumberLiteral),
+ Rule({&stringLiteral}, MakeStringLiteral),
+ Rule({&object}, MakeObject),
+ Rule({&array}, MakeArray)};
+
+ Symbol file = {Rule({&value})};
+};
+
+JsonValue ParseJson(const std::string& input) {
+ // Torque needs a CurrentSourceFile scope during parsing.
+ // As JSON lives in memory only, a unknown file scope is created.
+ SourceFileMap::Scope source_map_scope;
+ CurrentSourceFile::Scope unkown_file(SourceFileMap::AddSource("<json>"));
+
+ return (*JsonGrammar().Parse(input)).Cast<JsonValue>();
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/ls/json-parser.h b/deps/v8/src/torque/ls/json-parser.h
new file mode 100644
index 0000000000..38562113cc
--- /dev/null
+++ b/deps/v8/src/torque/ls/json-parser.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_JSON_PARSER_H_
+#define V8_TORQUE_LS_JSON_PARSER_H_
+
+#include "src/base/macros.h"
+#include "src/torque/ls/json.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+V8_EXPORT_PRIVATE JsonValue ParseJson(const std::string& input);
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_JSON_PARSER_H_
diff --git a/deps/v8/src/torque/ls/json.cc b/deps/v8/src/torque/ls/json.cc
new file mode 100644
index 0000000000..013c0d652e
--- /dev/null
+++ b/deps/v8/src/torque/ls/json.cc
@@ -0,0 +1,69 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/ls/json.h"
+
+#include <iostream>
+#include <sstream>
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+namespace {
+
+void SerializeToString(std::stringstream& str, const JsonValue& value) {
+ switch (value.tag) {
+ case JsonValue::NUMBER:
+ str << value.ToNumber();
+ break;
+ case JsonValue::STRING:
+ str << StringLiteralQuote(value.ToString());
+ break;
+ case JsonValue::IS_NULL:
+ str << "null";
+ break;
+ case JsonValue::BOOL:
+ str << (value.ToBool() ? "true" : "false");
+ break;
+ case JsonValue::OBJECT: {
+ str << "{";
+ size_t i = 0;
+ for (const auto& pair : value.ToObject()) {
+ str << "\"" << pair.first << "\":";
+ SerializeToString(str, pair.second);
+ if (++i < value.ToObject().size()) str << ",";
+ }
+ str << "}";
+ break;
+ }
+ case JsonValue::ARRAY: {
+ str << "[";
+ size_t i = 0;
+ for (const auto& element : value.ToArray()) {
+ SerializeToString(str, element);
+ if (++i < value.ToArray().size()) str << ",";
+ }
+ str << "]";
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+} // namespace
+
+std::string SerializeToString(const JsonValue& value) {
+ std::stringstream result;
+ SerializeToString(result, value);
+ return result.str();
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/ls/json.h b/deps/v8/src/torque/ls/json.h
new file mode 100644
index 0000000000..e208fa7f21
--- /dev/null
+++ b/deps/v8/src/torque/ls/json.h
@@ -0,0 +1,123 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_JSON_H_
+#define V8_TORQUE_LS_JSON_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/template-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+struct JsonValue;
+
+using JsonObject = std::map<std::string, JsonValue>;
+using JsonArray = std::vector<JsonValue>;
+
+struct JsonValue {
+ public:
+ enum { OBJECT, ARRAY, STRING, NUMBER, BOOL, IS_NULL } tag;
+
+ static JsonValue From(double number) {
+ JsonValue result;
+ result.tag = JsonValue::NUMBER;
+ result.number_ = number;
+ return result;
+ }
+
+ static JsonValue From(JsonObject object) {
+ JsonValue result;
+ result.tag = JsonValue::OBJECT;
+ result.object_ = base::make_unique<JsonObject>(std::move(object));
+ return result;
+ }
+
+ static JsonValue From(bool b) {
+ JsonValue result;
+ result.tag = JsonValue::BOOL;
+ result.flag_ = b;
+ return result;
+ }
+
+ static JsonValue From(const std::string& string) {
+ JsonValue result;
+ result.tag = JsonValue::STRING;
+ result.string_ = string;
+ return result;
+ }
+
+ static JsonValue From(JsonArray array) {
+ JsonValue result;
+ result.tag = JsonValue::ARRAY;
+ result.array_ = base::make_unique<JsonArray>(std::move(array));
+ return result;
+ }
+
+ static JsonValue JsonNull() {
+ JsonValue result;
+ result.tag = JsonValue::IS_NULL;
+ return result;
+ }
+
+ bool IsNumber() const { return tag == NUMBER; }
+ double ToNumber() const {
+ CHECK(IsNumber());
+ return number_;
+ }
+
+ bool IsBool() const { return tag == BOOL; }
+ bool ToBool() const {
+ CHECK(IsBool());
+ return flag_;
+ }
+
+ bool IsString() const { return tag == STRING; }
+ const std::string& ToString() const {
+ CHECK(IsString());
+ return string_;
+ }
+
+ bool IsObject() const { return object_ && tag == OBJECT; }
+ const JsonObject& ToObject() const {
+ CHECK(IsObject());
+ return *object_;
+ }
+ JsonObject& ToObject() {
+ CHECK(IsObject());
+ return *object_;
+ }
+
+ bool IsArray() const { return array_ && tag == ARRAY; }
+ const JsonArray& ToArray() const {
+ CHECK(IsArray());
+ return *array_;
+ }
+ JsonArray& ToArray() {
+ CHECK(IsArray());
+ return *array_;
+ }
+
+ private:
+ double number_ = 0;
+ bool flag_ = false;
+ std::string string_;
+ std::unique_ptr<JsonObject> object_;
+ std::unique_ptr<JsonArray> array_;
+};
+
+std::string SerializeToString(const JsonValue& value);
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_JSON_H_
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
new file mode 100644
index 0000000000..d76365b5d5
--- /dev/null
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -0,0 +1,224 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include "src/torque/ls/message-handler.h"
+
+#include "src/torque/ls/globals.h"
+#include "src/torque/ls/json-parser.h"
+#include "src/torque/ls/message-pipe.h"
+#include "src/torque/ls/message.h"
+#include "src/torque/server-data.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/torque-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(Logger)
+DEFINE_CONTEXTUAL_VARIABLE(TorqueFileList)
+
+namespace ls {
+
+static const char kContentLength[] = "Content-Length: ";
+static const size_t kContentLengthSize = sizeof(kContentLength) - 1;
+
+JsonValue ReadMessage() {
+ std::string line;
+ std::getline(std::cin, line);
+
+ if (line.rfind(kContentLength) != 0) {
+ // Invalid message, we just crash.
+ Logger::Log("[fatal] Did not find Content-Length ...\n");
+ v8::base::OS::Abort();
+ }
+
+ const int content_length = std::atoi(line.substr(kContentLengthSize).c_str());
+ std::getline(std::cin, line);
+ std::string content(content_length, ' ');
+ std::cin.read(&content[0], content_length);
+
+ Logger::Log("[incoming] ", content, "\n\n");
+
+ return ParseJson(content);
+}
+
+void WriteMessage(JsonValue& message) {
+ std::string content = SerializeToString(message);
+
+ Logger::Log("[outgoing] ", content, "\n\n");
+
+ std::cout << kContentLength << content.size() << "\r\n\r\n";
+ std::cout << content << std::flush;
+}
+
+namespace {
+
+void RecompileTorque() {
+ Logger::Log("[info] Start compilation run ...\n");
+
+ LanguageServerData::Get() = LanguageServerData();
+ SourceFileMap::Get() = SourceFileMap();
+
+ TorqueCompilerOptions options;
+ options.output_directory = "";
+ options.verbose = false;
+ options.collect_language_server_data = true;
+ options.abort_on_lint_errors = false;
+ CompileTorque(TorqueFileList::Get(), options);
+
+ Logger::Log("[info] Finished compilation run ...\n");
+}
+
+void HandleInitializeRequest(InitializeRequest request, MessageWriter writer) {
+ InitializeResponse response;
+ response.set_id(request.id());
+ response.result().capabilities().textDocumentSync();
+ response.result().capabilities().set_definitionProvider(true);
+
+ // TODO(szuend): Register for document synchronisation here,
+ // so we work with the content that the client
+ // provides, not directly read from files.
+ // TODO(szuend): Check that the client actually supports dynamic
+ // "workspace/didChangeWatchedFiles" capability.
+ // TODO(szuend): Check if client supports "LocationLink". This will
+ // influence the result of "goto definition".
+ writer(response.GetJsonValue());
+}
+
+void HandleInitializedNotification(MessageWriter writer) {
+ RegistrationRequest request;
+ // TODO(szuend): The language server needs a "global" request id counter.
+ request.set_id(2000);
+ request.set_method("client/registerCapability");
+
+ Registration reg = request.params().add_registrations();
+ auto options =
+ reg.registerOptions<DidChangeWatchedFilesRegistrationOptions>();
+ FileSystemWatcher watcher = options.add_watchers();
+ watcher.set_globPattern("**/*.tq");
+ watcher.set_kind(FileSystemWatcher::WatchKind::kAll);
+
+ reg.set_id("did-change-id");
+ reg.set_method("workspace/didChangeWatchedFiles");
+
+ writer(request.GetJsonValue());
+}
+
+void HandleTorqueFileListNotification(TorqueFileListNotification notification) {
+ CHECK_EQ(notification.params().object()["files"].tag, JsonValue::ARRAY);
+
+ std::vector<std::string>& files = TorqueFileList::Get();
+ Logger::Log("[info] Initial file list:\n");
+ for (const auto& file_json :
+ notification.params().object()["files"].ToArray()) {
+ CHECK(file_json.IsString());
+
+ // We only consider file URIs (there shouldn't be anything else).
+ // Internally we store the URI instead of the path, eliminating the need
+ // to encode it again.
+ if (auto maybe_path = FileUriDecode(file_json.ToString())) {
+ files.push_back(file_json.ToString());
+ Logger::Log(" ", *maybe_path, "\n");
+ }
+ }
+
+ // The Torque compiler expects to see some files first,
+ // we need to order them in the correct way.
+ std::sort(files.begin(), files.end(),
+ [](const std::string& a, const std::string& b) {
+ if (a.find("base.tq") != std::string::npos) return true;
+ if (b.find("base.tq") != std::string::npos) return false;
+
+ if (a.find("array.tq") != std::string::npos) return true;
+ if (b.find("array.tq") != std::string::npos) return false;
+
+ return false;
+ });
+
+ RecompileTorque();
+}
+
+void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
+ MessageWriter writer) {
+ GotoDefinitionResponse response;
+ response.set_id(request.id());
+
+ SourceId id =
+ SourceFileMap::GetSourceId(request.params().textDocument().uri());
+
+ // Unknown source files cause an empty response which corresponds with
+ // the definition not beeing found.
+ if (!id.IsValid()) {
+ response.SetNull("result");
+ writer(response.GetJsonValue());
+ return;
+ }
+
+ LineAndColumn pos{request.params().position().line(),
+ request.params().position().character()};
+
+ if (auto maybe_definition = LanguageServerData::FindDefinition(id, pos)) {
+ SourcePosition definition = *maybe_definition;
+
+ std::string definition_file = SourceFileMap::GetSource(definition.source);
+ response.result().set_uri(definition_file);
+
+ Range range = response.result().range();
+ range.start().set_line(definition.start.line);
+ range.start().set_character(definition.start.column);
+ range.end().set_line(definition.end.line);
+ range.end().set_character(definition.end.column);
+ } else {
+ response.SetNull("result");
+ }
+
+ writer(response.GetJsonValue());
+}
+
+void HandleChangeWatchedFilesNotification(
+ DidChangeWatchedFilesNotification notification) {
+ // TODO(szuend): Implement updates to the TorqueFile list when create/delete
+ // notifications are received. Currently we simply re-compile.
+ RecompileTorque();
+}
+
+} // namespace
+
+void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
+ Request<bool> request(raw_message);
+
+ // We ignore responses for now. They are matched to requests
+ // by id and don't have a method set.
+ // TODO(szuend): Implement proper response handling for requests
+ // that originate from the server.
+ if (!request.has_method()) {
+ Logger::Log("[info] Unhandled response with id ", request.id(), "\n\n");
+ return;
+ }
+
+ const std::string method = request.method();
+ if (method == "initialize") {
+ HandleInitializeRequest(InitializeRequest(request.GetJsonValue()), writer);
+ } else if (method == "initialized") {
+ HandleInitializedNotification(writer);
+ } else if (method == "torque/fileList") {
+ HandleTorqueFileListNotification(
+ TorqueFileListNotification(request.GetJsonValue()));
+ } else if (method == "textDocument/definition") {
+ HandleGotoDefinitionRequest(GotoDefinitionRequest(request.GetJsonValue()),
+ writer);
+ } else if (method == "workspace/didChangeWatchedFiles") {
+ HandleChangeWatchedFilesNotification(
+ DidChangeWatchedFilesNotification(request.GetJsonValue()));
+ } else {
+ Logger::Log("[error] Message of type ", method, " is not handled!\n\n");
+ }
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/ls/message-handler.h b/deps/v8/src/torque/ls/message-handler.h
new file mode 100644
index 0000000000..2f0f83f1b4
--- /dev/null
+++ b/deps/v8/src/torque/ls/message-handler.h
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_MESSAGE_HANDLER_H_
+#define V8_TORQUE_LS_MESSAGE_HANDLER_H_
+
+#include "src/base/macros.h"
+#include "src/torque/ls/json.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+// The message handler might send responses or follow up requests.
+// To allow unit testing, the "sending" function is configurable.
+using MessageWriter = void (*)(JsonValue& message);
+
+V8_EXPORT_PRIVATE void HandleMessage(JsonValue& raw_message, MessageWriter);
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_MESSAGE_HANDLER_H_
diff --git a/deps/v8/src/torque/ls/message-macros.h b/deps/v8/src/torque/ls/message-macros.h
new file mode 100644
index 0000000000..128e720be9
--- /dev/null
+++ b/deps/v8/src/torque/ls/message-macros.h
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_MESSAGE_MACROS_H_
+#define V8_TORQUE_LS_MESSAGE_MACROS_H_
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+#define JSON_STRING_ACCESSORS(name) \
+ inline const std::string& name() const { \
+ return object().at(#name).ToString(); \
+ } \
+ inline void set_##name(const std::string& str) { \
+ object()[#name] = JsonValue::From(str); \
+ } \
+ inline bool has_##name() const { \
+ return object().find(#name) != object().end(); \
+ }
+
+#define JSON_BOOL_ACCESSORS(name) \
+ inline bool name() const { return object().at(#name).ToBool(); } \
+ inline void set_##name(bool b) { object()[#name] = JsonValue::From(b); }
+
+#define JSON_INT_ACCESSORS(name) \
+ inline int name() const { return object().at(#name).ToNumber(); } \
+ inline void set_##name(int n) { \
+ object()[#name] = JsonValue::From(static_cast<double>(n)); \
+ }
+
+#define JSON_OBJECT_ACCESSORS(type, name) \
+ inline type name() { return GetObject<type>(#name); }
+
+#define JSON_DYNAMIC_OBJECT_ACCESSORS(name) \
+ template <class T> \
+ inline T name() { \
+ return GetObject<T>(#name); \
+ }
+
+#define JSON_ARRAY_OBJECT_ACCESSORS(type, name) \
+ inline type add_##name() { \
+ JsonObject& new_element = AddObjectElementToArrayProperty(#name); \
+ return type(new_element); \
+ } \
+ inline std::size_t name##_size() { return GetArrayProperty(#name).size(); } \
+ inline type name(size_t idx) { \
+ CHECK(idx < name##_size()); \
+ return type(GetArrayProperty(#name)[idx].ToObject()); \
+ }
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_MESSAGE_MACROS_H_
diff --git a/deps/v8/src/torque/ls/message-pipe.h b/deps/v8/src/torque/ls/message-pipe.h
new file mode 100644
index 0000000000..981fed4b2f
--- /dev/null
+++ b/deps/v8/src/torque/ls/message-pipe.h
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_MESSAGE_PIPE_H_
+#define V8_TORQUE_LS_MESSAGE_PIPE_H_
+
+#include <memory>
+#include "src/torque/ls/json.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+JsonValue ReadMessage();
+void WriteMessage(JsonValue& message);
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_MESSAGE_PIPE_H_
diff --git a/deps/v8/src/torque/ls/message.h b/deps/v8/src/torque/ls/message.h
new file mode 100644
index 0000000000..65c7ce1b9e
--- /dev/null
+++ b/deps/v8/src/torque/ls/message.h
@@ -0,0 +1,291 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_LS_MESSAGE_H_
+#define V8_TORQUE_LS_MESSAGE_H_
+
+#include "src/base/logging.h"
+#include "src/torque/ls/json.h"
+#include "src/torque/ls/message-macros.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+// Base class for Messages and Objects that are backed by either a
+// JsonValue or a reference to a JsonObject.
+// Helper methods are used by macros to implement typed accessors.
+class BaseJsonAccessor {
+ public:
+ template <class T>
+ T GetObject(const std::string& property) {
+ return T(GetObjectProperty(property));
+ }
+
+ bool HasProperty(const std::string& property) const {
+ return object().count(property) > 0;
+ }
+
+ void SetNull(const std::string& property) {
+ object()[property] = JsonValue::JsonNull();
+ }
+
+ bool IsNull(const std::string& property) const {
+ return HasProperty(property) &&
+ object().at(property).tag == JsonValue::IS_NULL;
+ }
+
+ protected:
+ virtual const JsonObject& object() const = 0;
+ virtual JsonObject& object() = 0;
+
+ JsonObject& GetObjectProperty(const std::string& property) {
+ if (!object()[property].IsObject()) {
+ object()[property] = JsonValue::From(JsonObject{});
+ }
+ return object()[property].ToObject();
+ }
+
+ JsonArray& GetArrayProperty(const std::string& property) {
+ if (!object()[property].IsArray()) {
+ object()[property] = JsonValue::From(JsonArray{});
+ }
+ return object()[property].ToArray();
+ }
+
+ JsonObject& AddObjectElementToArrayProperty(const std::string& property) {
+ JsonArray& array = GetArrayProperty(property);
+ array.push_back(JsonValue::From(JsonObject{}));
+
+ return array.back().ToObject();
+ }
+};
+
+// Base class for Requests, Responses and Notifications.
+// In contrast to "BaseObject", a Message owns the backing JsonValue of the
+// whole object tree; i.e. value_ serves as root.
+class Message : public BaseJsonAccessor {
+ public:
+ Message() {
+ value_ = JsonValue::From(JsonObject{});
+ set_jsonrpc("2.0");
+ }
+ explicit Message(JsonValue& value) : value_(std::move(value)) {
+ CHECK(value_.tag == JsonValue::OBJECT);
+ }
+
+ JsonValue& GetJsonValue() { return value_; }
+
+ JSON_STRING_ACCESSORS(jsonrpc)
+
+ protected:
+ const JsonObject& object() const { return value_.ToObject(); }
+ JsonObject& object() { return value_.ToObject(); }
+
+ private:
+ JsonValue value_;
+};
+
+// Base class for complex type that might be part of a Message.
+// Instead of creating theses directly, use the accessors on the
+// root Message or a parent object.
+class NestedJsonAccessor : public BaseJsonAccessor {
+ public:
+ explicit NestedJsonAccessor(JsonObject& object) : object_(object) {}
+
+ const JsonObject& object() const { return object_; }
+ JsonObject& object() { return object_; }
+
+ private:
+ JsonObject& object_;
+};
+
+class ResponseError : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_INT_ACCESSORS(code)
+ JSON_STRING_ACCESSORS(message)
+};
+
+class InitializeParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_INT_ACCESSORS(processId)
+ JSON_STRING_ACCESSORS(rootPath)
+ JSON_STRING_ACCESSORS(rootUri)
+ JSON_STRING_ACCESSORS(trace)
+};
+
+class FileListParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ // TODO(szuend): Implement read accessor for string
+ // arrays. "files" is managed directly.
+};
+
+class FileSystemWatcher : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(globPattern)
+ JSON_INT_ACCESSORS(kind)
+
+ enum WatchKind {
+ kCreate = 1,
+ kChange = 2,
+ kDelete = 4,
+
+ kAll = kCreate | kChange | kDelete,
+ };
+};
+
+class DidChangeWatchedFilesRegistrationOptions : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_ARRAY_OBJECT_ACCESSORS(FileSystemWatcher, watchers)
+};
+
+class FileEvent : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(uri)
+ JSON_INT_ACCESSORS(type)
+};
+
+class DidChangeWatchedFilesParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_ARRAY_OBJECT_ACCESSORS(FileEvent, changes)
+};
+
+class SaveOptions : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_BOOL_ACCESSORS(includeText)
+};
+
+class TextDocumentSyncOptions : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_BOOL_ACCESSORS(openClose)
+ JSON_INT_ACCESSORS(change)
+ JSON_BOOL_ACCESSORS(willSave)
+ JSON_BOOL_ACCESSORS(willSaveWaitUntil)
+ JSON_OBJECT_ACCESSORS(SaveOptions, save)
+};
+
+class ServerCapabilities : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_OBJECT_ACCESSORS(TextDocumentSyncOptions, textDocumentSync)
+ JSON_BOOL_ACCESSORS(definitionProvider)
+};
+
+class InitializeResult : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_OBJECT_ACCESSORS(ServerCapabilities, capabilities)
+};
+
+class Registration : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(id)
+ JSON_STRING_ACCESSORS(method)
+ JSON_DYNAMIC_OBJECT_ACCESSORS(registerOptions)
+};
+
+class RegistrationParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_ARRAY_OBJECT_ACCESSORS(Registration, registrations)
+};
+
+class JsonPosition : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_INT_ACCESSORS(line)
+ JSON_INT_ACCESSORS(character)
+};
+
+class Range : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_OBJECT_ACCESSORS(JsonPosition, start)
+ JSON_OBJECT_ACCESSORS(JsonPosition, end)
+};
+
+class Location : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(uri)
+ JSON_OBJECT_ACCESSORS(Range, range)
+};
+
+class TextDocumentIdentifier : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(uri)
+};
+
+class TextDocumentPositionParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_OBJECT_ACCESSORS(TextDocumentIdentifier, textDocument)
+ JSON_OBJECT_ACCESSORS(JsonPosition, position)
+};
+
+template <class T>
+class Request : public Message {
+ public:
+ explicit Request(JsonValue& value) : Message(value) {}
+ Request() : Message() {}
+
+ JSON_INT_ACCESSORS(id)
+ JSON_STRING_ACCESSORS(method)
+ JSON_OBJECT_ACCESSORS(T, params)
+};
+using InitializeRequest = Request<InitializeParams>;
+using RegistrationRequest = Request<RegistrationParams>;
+using TorqueFileListNotification = Request<FileListParams>;
+using GotoDefinitionRequest = Request<TextDocumentPositionParams>;
+using DidChangeWatchedFilesNotification = Request<DidChangeWatchedFilesParams>;
+
+template <class T>
+class Response : public Message {
+ public:
+ explicit Response(JsonValue& value) : Message(value) {}
+ Response() : Message() {}
+
+ JSON_INT_ACCESSORS(id)
+ JSON_OBJECT_ACCESSORS(ResponseError, error)
+ JSON_OBJECT_ACCESSORS(T, result)
+};
+using InitializeResponse = Response<InitializeResult>;
+using GotoDefinitionResponse = Response<Location>;
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_LS_MESSAGE_H_
diff --git a/deps/v8/src/torque/ls/torque-language-server.cc b/deps/v8/src/torque/ls/torque-language-server.cc
new file mode 100644
index 0000000000..e8b16f641c
--- /dev/null
+++ b/deps/v8/src/torque/ls/torque-language-server.cc
@@ -0,0 +1,52 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fstream>
+#include <iostream>
+#include <sstream>
+
+#include "src/torque/ls/globals.h"
+#include "src/torque/ls/message-handler.h"
+#include "src/torque/ls/message-pipe.h"
+#include "src/torque/server-data.h"
+#include "src/torque/source-positions.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+int WrappedMain(int argc, const char** argv) {
+ Logger::Scope log_scope;
+ TorqueFileList::Scope files_scope;
+ LanguageServerData::Scope server_data_scope;
+ SourceFileMap::Scope source_file_map_scope;
+
+ for (int i = 1; i < argc; ++i) {
+ if (!strcmp("-l", argv[i])) {
+ Logger::Enable(argv[++i]);
+ break;
+ }
+ }
+
+ while (true) {
+ auto message = ReadMessage();
+
+ // TODO(szuend): We should probably offload the actual message handling
+ // (even the parsing) to a background thread, so we can
+ // keep receiving messages. We might also receive
+ // $/cancelRequests or contet updates, that require restarts.
+ HandleMessage(message, &WriteMessage);
+ }
+ return 0;
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+int main(int argc, const char** argv) {
+ return v8::internal::torque::ls::WrappedMain(argc, argv);
+}
diff --git a/deps/v8/src/torque/server-data.cc b/deps/v8/src/torque/server-data.cc
new file mode 100644
index 0000000000..2dc92a4960
--- /dev/null
+++ b/deps/v8/src/torque/server-data.cc
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/server-data.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(LanguageServerData)
+
+void LanguageServerData::AddDefinition(SourcePosition token,
+ SourcePosition definition) {
+ Get().definitions_map_[token.source].emplace_back(token, definition);
+}
+
+base::Optional<SourcePosition> LanguageServerData::FindDefinition(
+ SourceId source, LineAndColumn pos) {
+ for (const DefinitionMapping& mapping : Get().definitions_map_.at(source)) {
+ SourcePosition current = mapping.first;
+ if (current.Contains(pos)) return mapping.second;
+ }
+
+ return base::nullopt;
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
new file mode 100644
index 0000000000..1377fae3d9
--- /dev/null
+++ b/deps/v8/src/torque/server-data.h
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_SERVER_DATA_H_
+#define V8_TORQUE_SERVER_DATA_H_
+
+#include <map>
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/base/optional.h"
+#include "src/torque/source-positions.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+// The definition of the token in the first element, can be found at the second.
+using DefinitionMapping = std::pair<SourcePosition, SourcePosition>;
+// TODO(szuend): Support overlapping source positions when we start adding them.
+using Definitions = std::vector<DefinitionMapping>;
+using DefinitionsMap = std::map<SourceId, Definitions>;
+
+// This contextual class holds all the necessary data to answer incoming
+// LSP requests. It is reset for each compilation step and all information
+// is calculated eagerly during compilation.
+class LanguageServerData : public ContextualClass<LanguageServerData> {
+ public:
+ LanguageServerData() = default;
+
+ V8_EXPORT_PRIVATE static void AddDefinition(SourcePosition token,
+ SourcePosition definition);
+
+ V8_EXPORT_PRIVATE static base::Optional<SourcePosition> FindDefinition(
+ SourceId source, LineAndColumn pos);
+
+ private:
+ DefinitionsMap definitions_map_;
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_SERVER_DATA_H_
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index 7b6f7a32ca..88d5aa2400 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -13,26 +13,61 @@ namespace v8 {
namespace internal {
namespace torque {
+struct SourcePosition;
+
class SourceId {
+ public:
+ static SourceId Invalid() { return SourceId(-1); }
+ bool IsValid() const { return id_ != -1; }
+ int operator==(const SourceId& s) const { return id_ == s.id_; }
+ bool operator<(const SourceId& s) const { return id_ < s.id_; }
+
private:
explicit SourceId(int id) : id_(id) {}
int id_;
+ friend struct SourcePosition;
friend class SourceFileMap;
};
-struct SourcePosition {
- SourceId source;
+struct LineAndColumn {
int line;
int column;
+
+ static LineAndColumn Invalid() { return {-1, -1}; }
};
-DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId)
-DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
+struct SourcePosition {
+ SourceId source;
+ LineAndColumn start;
+ LineAndColumn end;
+
+ static SourcePosition Invalid() {
+ SourcePosition pos{SourceId::Invalid(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()};
+ return pos;
+ }
+
+ bool CompareStartIgnoreColumn(const SourcePosition& pos) const {
+ return start.line == pos.start.line && source == pos.source;
+ }
+
+ bool Contains(LineAndColumn pos) const {
+ if (pos.line < start.line || pos.line > end.line) return false;
+
+ if (pos.line == start.line && pos.column < start.column) return false;
+ if (pos.line == end.line && pos.column >= end.column) return false;
+ return true;
+ }
+};
+
+DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId);
+DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition);
class SourceFileMap : public ContextualClass<SourceFileMap> {
public:
SourceFileMap() = default;
static const std::string& GetSource(SourceId source) {
+ CHECK(source.IsValid());
return Get().sources_[source.id_];
}
@@ -41,13 +76,23 @@ class SourceFileMap : public ContextualClass<SourceFileMap> {
return SourceId(static_cast<int>(Get().sources_.size()) - 1);
}
+ static SourceId GetSourceId(const std::string& path) {
+ for (size_t i = 0; i < Get().sources_.size(); ++i) {
+ if (Get().sources_[i] == path) {
+ return SourceId(static_cast<int>(i));
+ }
+ }
+ return SourceId::Invalid();
+ }
+
private:
std::vector<std::string> sources_;
};
inline std::string PositionAsString(SourcePosition pos) {
return SourceFileMap::GetSource(pos.source) + ":" +
- std::to_string(pos.line + 1) + ":" + std::to_string(pos.column + 1);
+ std::to_string(pos.start.line + 1) + ":" +
+ std::to_string(pos.start.column + 1);
}
inline std::ostream& operator<<(std::ostream& out, SourcePosition pos) {
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
new file mode 100644
index 0000000000..2d1b4688b2
--- /dev/null
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -0,0 +1,97 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/torque-compiler.h"
+
+#include <fstream>
+#include "src/torque/declarable.h"
+#include "src/torque/declaration-visitor.h"
+#include "src/torque/global-context.h"
+#include "src/torque/implementation-visitor.h"
+#include "src/torque/torque-parser.h"
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+namespace {
+
+base::Optional<std::string> ReadFile(const std::string& path) {
+ std::ifstream file_stream(path);
+ if (!file_stream.good()) return base::nullopt;
+
+ return std::string{std::istreambuf_iterator<char>(file_stream),
+ std::istreambuf_iterator<char>()};
+}
+
+void ReadAndParseTorqueFile(const std::string& path) {
+ SourceId source_id = SourceFileMap::AddSource(path);
+ CurrentSourceFile::Scope source_id_scope(source_id);
+
+ // path might be either a normal file path or an encoded URI.
+ auto maybe_content = ReadFile(path);
+ if (!maybe_content) {
+ if (auto maybe_path = FileUriDecode(path)) {
+ maybe_content = ReadFile(*maybe_path);
+ }
+ }
+
+ if (!maybe_content) {
+ ReportErrorWithoutPosition("Cannot open file path/uri: ", path);
+ }
+
+ ParseTorque(*maybe_content);
+}
+
+} // namespace
+
+void CompileTorque(std::vector<std::string> files,
+ TorqueCompilerOptions options) {
+ CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
+ CurrentAst::Scope ast_scope_;
+ LintErrorStatus::Scope lint_error_status_scope_;
+
+ for (const auto& path : files) ReadAndParseTorqueFile(path);
+
+ GlobalContext::Scope global_context(std::move(CurrentAst::Get()));
+ if (options.verbose) GlobalContext::SetVerbose();
+ if (options.collect_language_server_data) {
+ GlobalContext::SetCollectLanguageServerData();
+ }
+ TypeOracle::Scope type_oracle;
+
+ DeclarationVisitor declaration_visitor;
+
+ declaration_visitor.Visit(GlobalContext::Get().ast());
+ declaration_visitor.FinalizeStructsAndClasses();
+
+ ImplementationVisitor implementation_visitor;
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ implementation_visitor.BeginNamespaceFile(n);
+ }
+
+ implementation_visitor.VisitAllDeclarables();
+
+ std::string output_directory = options.output_directory;
+ if (output_directory.length() != 0) {
+ std::string output_header_path = output_directory;
+ output_header_path += "/builtin-definitions-from-dsl.h";
+ implementation_visitor.GenerateBuiltinDefinitions(output_header_path);
+
+ output_header_path = output_directory + "/class-definitions-from-dsl.h";
+ implementation_visitor.GenerateClassDefinitions(output_header_path);
+
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ implementation_visitor.EndNamespaceFile(n);
+ implementation_visitor.GenerateImplementation(output_directory, n);
+ }
+ }
+
+ if (LintErrorStatus::HasLintErrors()) std::abort();
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
new file mode 100644
index 0000000000..ee8cf19b1a
--- /dev/null
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TORQUE_COMPILER_H_
+#define V8_TORQUE_TORQUE_COMPILER_H_
+
+#include "src/torque/ast.h"
+#include "src/torque/contextual.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+struct TorqueCompilerOptions {
+ std::string output_directory;
+ bool verbose;
+ bool collect_language_server_data;
+ bool abort_on_lint_errors;
+
+ static TorqueCompilerOptions Default() { return {"", false, false, false}; }
+};
+
+void CompileTorque(std::vector<std::string> files,
+ TorqueCompilerOptions = TorqueCompilerOptions::Default());
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_TORQUE_COMPILER_H_
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 6f00686cef..0ea8ebaec2 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -12,10 +12,10 @@ namespace v8 {
namespace internal {
namespace torque {
-DEFINE_CONTEXTUAL_VARIABLE(CurrentAst);
+DEFINE_CONTEXTUAL_VARIABLE(CurrentAst)
using TypeList = std::vector<TypeExpression*>;
-using GenericParameters = std::vector<std::string>;
+using GenericParameters = std::vector<Identifier*>;
struct ExpressionWithSource {
Expression* expression;
@@ -29,43 +29,6 @@ struct TypeswitchCase {
Statement* block;
};
-enum class ParseResultHolderBase::TypeId {
- kStdString,
- kBool,
- kStdVectorOfString,
- kExpressionPtr,
- kLocationExpressionPtr,
- kStatementPtr,
- kDeclarationPtr,
- kTypeExpressionPtr,
- kLabelBlockPtr,
- kOptionalLabelBlockPtr,
- kNameAndTypeExpression,
- kClassFieldExpression,
- kStructFieldExpression,
- kStdVectorOfNameAndTypeExpression,
- kStdVectorOfClassFieldExpression,
- kStdVectorOfStructFieldExpression,
- kIncrementDecrementOperator,
- kOptionalStdString,
- kStdVectorOfStatementPtr,
- kStdVectorOfDeclarationPtr,
- kStdVectorOfExpressionPtr,
- kExpressionWithSource,
- kParameterList,
- kRangeExpression,
- kOptionalRangeExpression,
- kTypeList,
- kOptionalTypeList,
- kLabelAndTypes,
- kStdVectorOfLabelAndTypes,
- kStdVectorOfLabelBlockPtr,
- kOptionalStatementPtr,
- kOptionalExpressionPtr,
- kTypeswitchCase,
- kStdVectorOfTypeswitchCase
-};
-
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<std::string>::id =
ParseResultTypeId::kStdString;
@@ -84,6 +47,10 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<TypeExpression*>::id =
ParseResultTypeId::kTypeExpressionPtr;
template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<TypeExpression*>>::id =
+ ParseResultTypeId::kOptionalTypeExpressionPtr;
+template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<LabelBlock*>::id =
ParseResultTypeId::kLabelBlockPtr;
template <>
@@ -94,6 +61,9 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Expression*>::id =
ParseResultTypeId::kExpressionPtr;
template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Identifier*>::id =
+ ParseResultTypeId::kIdentifierPtr;
+template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<LocationExpression*>::id =
ParseResultTypeId::kLocationExpressionPtr;
@@ -192,6 +162,10 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<TypeswitchCase>>::id =
ParseResultTypeId::kStdVectorOfTypeswitchCase;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<Identifier*>>::id =
+ ParseResultTypeId::kStdVectorOfIdentifierPtr;
namespace {
@@ -203,9 +177,10 @@ base::Optional<ParseResult> AddGlobalDeclaration(
}
void LintGenericParameters(const GenericParameters& parameters) {
- for (const std::string& parameter : parameters) {
- if (!IsUpperCamelCase(parameter)) {
- NamingConventionError("Generic parameter", parameter, "UpperCamelCase");
+ for (const Identifier* parameter : parameters) {
+ if (!IsUpperCamelCase(parameter->value)) {
+ NamingConventionError("Generic parameter", parameter->value,
+ "UpperCamelCase");
}
}
}
@@ -238,7 +213,7 @@ Expression* MakeCall(IdentifierExpression* callee,
if (id->generic_arguments.size() != 0) {
ReportError("An otherwise label cannot have generic parameters");
}
- labels.push_back(id->name);
+ labels.push_back(id->name->value);
continue;
}
}
@@ -268,7 +243,8 @@ Expression* MakeCall(const std::string& callee,
const std::vector<TypeExpression*>& generic_arguments,
const std::vector<Expression*>& arguments,
const std::vector<Statement*>& otherwise) {
- return MakeCall(MakeNode<IdentifierExpression>(callee, generic_arguments),
+ return MakeCall(MakeNode<IdentifierExpression>(MakeNode<Identifier>(callee),
+ generic_arguments),
base::nullopt, arguments, otherwise);
}
@@ -277,18 +253,7 @@ base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
auto args = child_results->NextAs<std::vector<Expression*>>();
auto otherwise = child_results->NextAs<std::vector<Statement*>>();
IdentifierExpression* target = IdentifierExpression::cast(callee);
- if (target->name == kSuperMethodName) {
- if (target->namespace_qualification.size() != 0) {
- ReportError(
- "\"super\" invocation cannot be used with namespace qualification");
- }
- target = MakeNode<IdentifierExpression>(kSuperMethodName);
- return ParseResult{
- MakeCall(target, MakeNode<IdentifierExpression>(kThisParameterName),
- args, otherwise)};
- } else {
return ParseResult{MakeCall(target, base::nullopt, args, otherwise)};
- }
}
base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
@@ -296,8 +261,9 @@ base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
auto callee = child_results->NextAs<std::string>();
auto args = child_results->NextAs<std::vector<Expression*>>();
auto otherwise = child_results->NextAs<std::vector<Statement*>>();
- return ParseResult{MakeCall(MakeNode<IdentifierExpression>(callee), this_arg,
- args, otherwise)};
+ return ParseResult{
+ MakeCall(MakeNode<IdentifierExpression>(MakeNode<Identifier>(callee)),
+ this_arg, args, otherwise)};
}
base::Optional<ParseResult> MakeNew(ParseResultIterator* child_results) {
@@ -346,8 +312,9 @@ base::Optional<ParseResult> MakeParameterListFromTypes(
result.has_varargs = has_varargs;
result.implicit_count = implicit_params.size();
for (NameAndTypeExpression& implicit_param : implicit_params) {
- if (!IsLowerCamelCase(implicit_param.name)) {
- NamingConventionError("Parameter", implicit_param.name, "lowerCamelCase");
+ if (!IsLowerCamelCase(implicit_param.name->value)) {
+ NamingConventionError("Parameter", implicit_param.name->value,
+ "lowerCamelCase");
}
result.names.push_back(implicit_param.name);
result.types.push_back(implicit_param.type);
@@ -371,19 +338,19 @@ base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
}
ParameterList result;
for (NameAndTypeExpression& pair : implicit_params) {
- if (!IsLowerCamelCase(pair.name)) {
- NamingConventionError("Parameter", pair.name, "lowerCamelCase");
+ if (!IsLowerCamelCase(pair.name->value)) {
+ NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
}
result.names.push_back(std::move(pair.name));
result.types.push_back(pair.type);
}
for (NameAndTypeExpression& pair : explicit_params) {
- if (!IsLowerCamelCase(pair.name)) {
- NamingConventionError("Parameter", pair.name, "lowerCamelCase");
+ if (!IsLowerCamelCase(pair.name->value)) {
+ NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
}
- result.names.push_back(std::move(pair.name));
+ result.names.push_back(pair.name);
result.types.push_back(pair.type);
}
result.implicit_count = implicit_params.size();
@@ -518,49 +485,48 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
base::Optional<ParseResult> MakeConstDeclaration(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
- if (!IsValidNamespaceConstName(name)) {
- NamingConventionError("Constant", name, "kUpperCamelCase");
+ auto name = child_results->NextAs<Identifier*>();
+ if (!IsValidNamespaceConstName(name->value)) {
+ NamingConventionError("Constant", name->value, "kUpperCamelCase");
}
auto type = child_results->NextAs<TypeExpression*>();
auto expression = child_results->NextAs<Expression*>();
- Declaration* result =
- MakeNode<ConstDeclaration>(std::move(name), type, expression);
+ Declaration* result = MakeNode<ConstDeclaration>(name, type, expression);
return ParseResult{result};
}
base::Optional<ParseResult> MakeExternConstDeclaration(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto type = child_results->NextAs<TypeExpression*>();
auto literal = child_results->NextAs<std::string>();
- Declaration* result = MakeNode<ExternConstDeclaration>(std::move(name), type,
- std::move(literal));
+ Declaration* result =
+ MakeNode<ExternConstDeclaration>(name, type, std::move(literal));
return ParseResult{result};
}
base::Optional<ParseResult> MakeTypeAliasDeclaration(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto type = child_results->NextAs<TypeExpression*>();
- Declaration* result = MakeNode<TypeAliasDeclaration>(std::move(name), type);
+ Declaration* result = MakeNode<TypeAliasDeclaration>(name, type);
return ParseResult{result};
}
base::Optional<ParseResult> MakeTypeDeclaration(
ParseResultIterator* child_results) {
auto transient = child_results->NextAs<bool>();
- auto name = child_results->NextAs<std::string>();
- if (!IsValidTypeName(name)) {
- NamingConventionError("Type", name, "UpperCamelCase");
+ auto name = child_results->NextAs<Identifier*>();
+ if (!IsValidTypeName(name->value)) {
+ NamingConventionError("Type", name->value, "UpperCamelCase");
}
auto extends = child_results->NextAs<base::Optional<std::string>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
auto constexpr_generates =
child_results->NextAs<base::Optional<std::string>>();
Declaration* result = MakeNode<TypeDeclaration>(
- std::move(name), transient, std::move(extends), std::move(generates),
+ name, transient, std::move(extends), std::move(generates),
std::move(constexpr_generates));
return ParseResult{result};
}
@@ -570,7 +536,7 @@ base::Optional<ParseResult> MakeMethodDeclaration(
auto transitioning = child_results->NextAs<bool>();
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
- if (name != kConstructMethodName && !IsUpperCamelCase(name)) {
+ if (!IsUpperCamelCase(name)) {
NamingConventionError("Method", name, "UpperCamelCase");
}
@@ -586,17 +552,18 @@ base::Optional<ParseResult> MakeMethodDeclaration(
base::Optional<ParseResult> MakeClassDeclaration(
ParseResultIterator* child_results) {
+ auto is_extern = child_results->NextAs<bool>();
auto transient = child_results->NextAs<bool>();
- auto name = child_results->NextAs<std::string>();
- if (!IsValidTypeName(name)) {
- NamingConventionError("Type", name, "UpperCamelCase");
+ auto name = child_results->NextAs<Identifier*>();
+ if (!IsValidTypeName(name->value)) {
+ NamingConventionError("Type", name->value, "UpperCamelCase");
}
- auto extends = child_results->NextAs<std::string>();
+ auto extends = child_results->NextAs<base::Optional<std::string>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
auto methods = child_results->NextAs<std::vector<Declaration*>>();
auto fields = child_results->NextAs<std::vector<ClassFieldExpression>>();
Declaration* result = MakeNode<ClassDeclaration>(
- std::move(name), transient, std::move(extends), std::move(generates),
+ name, is_extern, transient, std::move(extends), std::move(generates),
std::move(methods), fields);
return ParseResult{result};
}
@@ -631,11 +598,11 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto methods = child_results->NextAs<std::vector<Declaration*>>();
auto fields = child_results->NextAs<std::vector<StructFieldExpression>>();
- Declaration* result = MakeNode<StructDeclaration>(
- std::move(name), std::move(methods), std::move(fields));
+ Declaration* result =
+ MakeNode<StructDeclaration>(name, std::move(methods), std::move(fields));
return ParseResult{result};
}
@@ -782,13 +749,14 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
{
CurrentSourcePosition::Scope current_source_position(expression->pos);
current_block->statements.push_back(MakeNode<VarDeclarationStatement>(
- true, "_value", base::nullopt, expression));
+ true, MakeNode<Identifier>("_value"), base::nullopt, expression));
}
TypeExpression* accumulated_types;
for (size_t i = 0; i < cases.size(); ++i) {
CurrentSourcePosition::Scope current_source_position(cases[i].pos);
- Expression* value = MakeNode<IdentifierExpression>("_value");
+ Expression* value =
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>("_value"));
if (i >= 1) {
value =
MakeNode<AssumeTypeImpossibleExpression>(accumulated_types, value);
@@ -798,15 +766,16 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
value = MakeCall("Cast", std::vector<TypeExpression*>{cases[i].type},
std::vector<Expression*>{value},
std::vector<Statement*>{MakeNode<ExpressionStatement>(
- MakeNode<IdentifierExpression>("_NextCase"))});
+ MakeNode<IdentifierExpression>(
+ MakeNode<Identifier>("_NextCase")))});
case_block = MakeNode<BlockStatement>();
} else {
case_block = current_block;
}
std::string name = "_case_value";
if (cases[i].name) name = *cases[i].name;
- case_block->statements.push_back(
- MakeNode<VarDeclarationStatement>(true, name, cases[i].type, value));
+ case_block->statements.push_back(MakeNode<VarDeclarationStatement>(
+ true, MakeNode<Identifier>(name), cases[i].type, value));
case_block->statements.push_back(cases[i].block);
if (i < cases.size() - 1) {
BlockStatement* next_block = MakeNode<BlockStatement>();
@@ -861,17 +830,20 @@ base::Optional<ParseResult> MakeVarDeclarationStatement(
auto kind = child_results->NextAs<std::string>();
bool const_qualified = kind == "const";
if (!const_qualified) DCHECK_EQ("let", kind);
- auto name = child_results->NextAs<std::string>();
- if (!IsLowerCamelCase(name)) {
- NamingConventionError("Variable", name, "lowerCamelCase");
+ auto name = child_results->NextAs<Identifier*>();
+ if (!IsLowerCamelCase(name->value)) {
+ NamingConventionError("Variable", name->value, "lowerCamelCase");
}
- auto type = child_results->NextAs<TypeExpression*>();
+ auto type = child_results->NextAs<base::Optional<TypeExpression*>>();
base::Optional<Expression*> initializer;
if (child_results->HasNext())
initializer = child_results->NextAs<Expression*>();
- Statement* result = MakeNode<VarDeclarationStatement>(
- const_qualified, std::move(name), type, initializer);
+ if (!initializer && !type) {
+ ReportError("Declaration is missing a type.");
+ }
+ Statement* result = MakeNode<VarDeclarationStatement>(const_qualified, name,
+ type, initializer);
return ParseResult{result};
}
@@ -971,7 +943,7 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
NamingConventionError("Exception", variable, "lowerCamelCase");
}
ParameterList parameters;
- parameters.names.push_back(variable);
+ parameters.names.push_back(MakeNode<Identifier>(variable));
parameters.types.push_back(MakeNode<BasicTypeExpression>(
std::vector<std::string>{}, false, "Object"));
parameters.has_varargs = false;
@@ -995,16 +967,21 @@ base::Optional<ParseResult> MakeExpressionWithSource(
ExpressionWithSource{e, child_results->matched_input().ToString()}};
}
+base::Optional<ParseResult> MakeIdentifier(ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ Identifier* result = MakeNode<Identifier>(std::move(name));
+ return ParseResult{result};
+}
+
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
auto namespace_qualification =
child_results->NextAs<std::vector<std::string>>();
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto generic_arguments =
child_results->NextAs<std::vector<TypeExpression*>>();
LocationExpression* result = MakeNode<IdentifierExpression>(
- std::move(namespace_qualification), std::move(name),
- std::move(generic_arguments));
+ std::move(namespace_qualification), name, std::move(generic_arguments));
return ParseResult{result};
}
@@ -1117,23 +1094,24 @@ base::Optional<ParseResult> MakeLabelAndTypes(
base::Optional<ParseResult> MakeNameAndType(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto type = child_results->NextAs<TypeExpression*>();
- return ParseResult{NameAndTypeExpression{std::move(name), type}};
+ return ParseResult{NameAndTypeExpression{name, type}};
}
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
auto weak = child_results->NextAs<bool>();
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
+ auto index = child_results->NextAs<base::Optional<std::string>>();
auto type = child_results->NextAs<TypeExpression*>();
- return ParseResult{ClassFieldExpression{{std::move(name), type}, weak}};
+ return ParseResult{ClassFieldExpression{{name, type}, index, weak}};
}
base::Optional<ParseResult> MakeStructField(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto type = child_results->NextAs<TypeExpression*>();
- return ParseResult{StructFieldExpression{{std::move(name), type}}};
+ return ParseResult{StructFieldExpression{{name, type}}};
}
base::Optional<ParseResult> ExtractAssignmentOperator(
@@ -1236,6 +1214,9 @@ struct TorqueGrammar : Grammar {
// Result: std::string
Symbol identifier = {Rule({Pattern(MatchIdentifier)}, YieldMatchedInput)};
+ // Result: Identifier*
+ Symbol name = {Rule({&identifier}, MakeIdentifier)};
+
// Result: std::string
Symbol intrinsicName = {
Rule({Pattern(MatchIntrinsicName)}, YieldMatchedInput)};
@@ -1272,8 +1253,8 @@ struct TorqueGrammar : Grammar {
// Result: GenericParameters
Symbol genericParameters = {
Rule({Token("<"),
- List<std::string>(
- Sequence({&identifier, Token(":"), Token("type")}), Token(",")),
+ List<Identifier*>(Sequence({&name, Token(":"), Token("type")}),
+ Token(",")),
Token(">")})};
// Result: TypeList
@@ -1319,15 +1300,18 @@ struct TorqueGrammar : Grammar {
NonemptyList<Statement*>(&atomarStatement, Token(","))}))};
// Result: NameAndTypeExpression
- Symbol nameAndType = {
- Rule({&identifier, Token(":"), &type}, MakeNameAndType)};
+ Symbol nameAndType = {Rule({&name, Token(":"), &type}, MakeNameAndType)};
+
+ Symbol* optionalArraySpecifier = {
+ Optional<std::string>(Sequence({Token("["), &identifier, Token("]")}))};
Symbol classField = {
- Rule({CheckIf(Token("weak")), &identifier, Token(":"), &type, Token(";")},
+ Rule({CheckIf(Token("weak")), &name, optionalArraySpecifier, Token(":"),
+ &type, Token(";")},
MakeClassField)};
Symbol structField = {
- Rule({&identifier, Token(":"), &type, Token(";")}, MakeStructField)};
+ Rule({&name, Token(":"), &type, Token(";")}, MakeStructField)};
// Result: ParameterList
Symbol parameterListNoVararg = {
@@ -1374,10 +1358,9 @@ struct TorqueGrammar : Grammar {
// Result: LocationExpression*
Symbol identifierExpression = {
- Rule(
- {List<std::string>(Sequence({&identifier, Token("::")})), &identifier,
- TryOrDefault<TypeList>(&genericSpecializationTypeList)},
- MakeIdentifierExpression),
+ Rule({List<std::string>(Sequence({&identifier, Token("::")})), &name,
+ TryOrDefault<TypeList>(&genericSpecializationTypeList)},
+ MakeIdentifierExpression),
};
// Result: LocationExpression*
@@ -1522,14 +1505,17 @@ struct TorqueGrammar : Grammar {
Optional<Expression*>(expression), Token("]")},
MakeRangeExpression)};
+ Symbol* optionalTypeSpecifier =
+ Optional<TypeExpression*>(Sequence({Token(":"), &type}));
+
// Result: Statement*
Symbol varDeclaration = {
- Rule({OneOf({"let", "const"}), &identifier, Token(":"), &type},
+ Rule({OneOf({"let", "const"}), &name, optionalTypeSpecifier},
MakeVarDeclarationStatement)};
// Result: Statement*
Symbol varDeclarationWithInitialization = {
- Rule({OneOf({"let", "const"}), &identifier, Token(":"), &type, Token("="),
+ Rule({OneOf({"let", "const"}), &name, optionalTypeSpecifier, Token("="),
expression},
MakeVarDeclarationStatement)};
@@ -1602,24 +1588,24 @@ struct TorqueGrammar : Grammar {
// Result: Declaration*
Symbol declaration = {
- Rule({Token("const"), &identifier, Token(":"), &type, Token("="),
- expression, Token(";")},
+ Rule({Token("const"), &name, Token(":"), &type, Token("="), expression,
+ Token(";")},
MakeConstDeclaration),
- Rule({Token("const"), &identifier, Token(":"), &type, Token("generates"),
+ Rule({Token("const"), &name, Token(":"), &type, Token("generates"),
&externalString, Token(";")},
MakeExternConstDeclaration),
- Rule({CheckIf(Token("transient")), Token("class"), &identifier,
- Sequence({Token("extends"), &identifier}),
+ Rule({CheckIf(Token("extern")), CheckIf(Token("transient")),
+ Token("class"), &name,
+ Optional<std::string>(Sequence({Token("extends"), &identifier})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
Token("{"), List<Declaration*>(&method),
List<ClassFieldExpression>(&classField), Token("}")},
MakeClassDeclaration),
- Rule({Token("struct"), &identifier, Token("{"),
- List<Declaration*>(&method),
+ Rule({Token("struct"), &name, Token("{"), List<Declaration*>(&method),
List<StructFieldExpression>(&structField), Token("}")},
MakeStructDeclaration),
- Rule({CheckIf(Token("transient")), Token("type"), &identifier,
+ Rule({CheckIf(Token("transient")), Token("type"), &name,
Optional<std::string>(Sequence({Token("extends"), &identifier})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
@@ -1627,7 +1613,7 @@ struct TorqueGrammar : Grammar {
Sequence({Token("constexpr"), &externalString})),
Token(";")},
MakeTypeDeclaration),
- Rule({Token("type"), &identifier, Token("="), &type, Token(";")},
+ Rule({Token("type"), &name, Token("="), &type, Token(";")},
MakeTypeAliasDeclaration),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index 05dabdccca..25872732d5 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -2,17 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <fstream>
-#include <iostream>
-
-#include "src/torque/declarable.h"
-#include "src/torque/declaration-visitor.h"
-#include "src/torque/global-context.h"
-#include "src/torque/implementation-visitor.h"
-#include "src/torque/torque-parser.h"
-#include "src/torque/type-oracle.h"
-#include "src/torque/types.h"
-#include "src/torque/utils.h"
+#include "src/torque/torque-compiler.h"
namespace v8 {
namespace internal {
@@ -21,11 +11,7 @@ namespace torque {
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
bool verbose = false;
- SourceFileMap::Scope source_file_map_scope;
- CurrentSourceFile::Scope unknown_sourcefile_scope(
- SourceFileMap::AddSource("<unknown>"));
- CurrentAst::Scope ast_scope;
- LintErrorStatus::Scope lint_error_status_scope;
+ std::vector<std::string> files;
for (int i = 1; i < argc; ++i) {
// Check for options
@@ -38,46 +24,19 @@ int WrappedMain(int argc, const char** argv) {
continue;
}
- // Otherwise it's a .tq
- // file, parse it and
- // remember the syntax tree
- std::string path = argv[i];
- SourceId source_id = SourceFileMap::AddSource(path);
- CurrentSourceFile::Scope source_id_scope(source_id);
- std::ifstream file_stream(path);
- std::string file_content = {std::istreambuf_iterator<char>(file_stream),
- std::istreambuf_iterator<char>()};
- ParseTorque(file_content);
+ // Otherwise it's a .tq file. Remember it for compilation.
+ files.emplace_back(argv[i]);
}
- GlobalContext::Scope global_context(std::move(CurrentAst::Get()));
- if (verbose) GlobalContext::SetVerbose();
- TypeOracle::Scope type_oracle;
-
- if (output_directory.length() != 0) {
- DeclarationVisitor().Visit(GlobalContext::Get().ast());
-
- ImplementationVisitor visitor;
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- visitor.BeginNamespaceFile(n);
- }
-
- visitor.VisitAllDeclarables();
-
- std::string output_header_path = output_directory;
- output_header_path += "/builtin-definitions-from-dsl.h";
- visitor.GenerateBuiltinDefinitions(output_header_path);
-
- output_header_path = output_directory + "/class-definitions-from-dsl.h";
- visitor.GenerateClassDefinitions(output_header_path);
+ SourceFileMap::Scope source_file_map_scope;
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- visitor.EndNamespaceFile(n);
- visitor.GenerateImplementation(output_directory, n);
- }
- }
+ TorqueCompilerOptions options;
+ options.output_directory = output_directory;
+ options.verbose = verbose;
+ options.collect_language_server_data = false;
+ options.abort_on_lint_errors = true;
- if (LintErrorStatus::HasLintErrors()) std::abort();
+ CompileTorque(files, options);
return 0;
}
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index ee1b5cee1c..9a8e4ee95d 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -28,20 +28,17 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return result;
}
- static StructType* GetStructType(const std::string& name,
- const std::vector<Field>& fields) {
- StructType* result = new StructType(CurrentNamespace(), name, fields);
+ static StructType* GetStructType(const std::string& name) {
+ StructType* result = new StructType(CurrentNamespace(), name);
Get().struct_types_.push_back(std::unique_ptr<StructType>(result));
return result;
}
static ClassType* GetClassType(const Type* parent, const std::string& name,
- bool transient, const std::string& generates,
- const std::vector<Field>& fields,
- StructType* this_struct, size_t size) {
- ClassType* result =
- new ClassType(parent, CurrentNamespace(), name, transient, generates,
- fields, this_struct, size);
+ bool is_extern, bool transient,
+ const std::string& generates) {
+ ClassType* result = new ClassType(parent, CurrentNamespace(), name,
+ is_extern, transient, generates);
Get().struct_types_.push_back(std::unique_ptr<ClassType>(result));
return result;
}
@@ -99,6 +96,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONSTEXPR_BOOL_TYPE_STRING);
}
+ static const Type* GetConstexprIntPtrType() {
+ return Get().GetBuiltinType(CONSTEXPR_INTPTR_TYPE_STRING);
+ }
+
static const Type* GetVoidType() {
return Get().GetBuiltinType(VOID_TYPE_STRING);
}
@@ -107,10 +108,18 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(RAWPTR_TYPE_STRING);
}
+ static const Type* GetMapType() {
+ return Get().GetBuiltinType(MAP_TYPE_STRING);
+ }
+
static const Type* GetObjectType() {
return Get().GetBuiltinType(OBJECT_TYPE_STRING);
}
+ static const Type* GetJSObjectType() {
+ return Get().GetBuiltinType(JSOBJECT_TYPE_STRING);
+ }
+
static const Type* GetTaggedType() {
return Get().GetBuiltinType(TAGGED_TYPE_STRING);
}
@@ -143,6 +152,30 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(INT32_TYPE_STRING);
}
+ static const Type* GetUint32Type() {
+ return Get().GetBuiltinType(UINT32_TYPE_STRING);
+ }
+
+ static const Type* GetInt16Type() {
+ return Get().GetBuiltinType(INT16_TYPE_STRING);
+ }
+
+ static const Type* GetUint16Type() {
+ return Get().GetBuiltinType(UINT16_TYPE_STRING);
+ }
+
+ static const Type* GetInt8Type() {
+ return Get().GetBuiltinType(INT8_TYPE_STRING);
+ }
+
+ static const Type* GetUint8Type() {
+ return Get().GetBuiltinType(UINT8_TYPE_STRING);
+ }
+
+ static const Type* GetFloat64Type() {
+ return Get().GetBuiltinType(FLOAT64_TYPE_STRING);
+ }
+
static const Type* GetNeverType() {
return Get().GetBuiltinType(NEVER_TYPE_STRING);
}
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 86a2020d21..6b15b9f6f2 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -4,6 +4,7 @@
#include <iostream>
+#include "src/globals.h"
#include "src/torque/declarable.h"
#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
@@ -74,11 +75,27 @@ bool Type::IsAbstractName(const std::string& name) const {
return AbstractType::cast(this)->name() == name;
}
-std::string AbstractType::GetGeneratedTNodeTypeName() const {
- return generated_type_;
+std::string Type::GetGeneratedTypeName() const {
+ std::string result = GetGeneratedTypeNameImpl();
+ if (result.empty() || result == "compiler::TNode<>") {
+ ReportError("Generated type is required for type '", ToString(),
+ "'. Use 'generates' clause in definition.");
+ }
+ return result;
}
-std::string ClassType::GetGeneratedTNodeTypeName() const { return generates_; }
+std::string Type::GetGeneratedTNodeTypeName() const {
+ std::string result = GetGeneratedTNodeTypeNameImpl();
+ if (result.empty()) {
+ ReportError("Generated TNode type is required for type '", ToString(),
+ "'. Use 'generates' clause in definition.");
+ }
+ return result;
+}
+
+std::string AbstractType::GetGeneratedTNodeTypeNameImpl() const {
+ return generated_type_;
+}
std::string BuiltinPointerType::ToExplicitString() const {
std::stringstream result;
@@ -125,7 +142,7 @@ std::string UnionType::MangledName() const {
return result.str();
}
-std::string UnionType::GetGeneratedTNodeTypeName() const {
+std::string UnionType::GetGeneratedTNodeTypeNameImpl() const {
if (types_.size() <= 3) {
std::set<std::string> members;
for (const Type* t : types_) {
@@ -227,6 +244,18 @@ std::vector<const AggregateType*> AggregateType::GetHierarchy() {
return hierarchy;
}
+bool AggregateType::HasField(const std::string& name) const {
+ for (auto& field : fields_) {
+ if (field.name_and_type.name == name) return true;
+ }
+ if (parent() != nullptr) {
+ if (auto parent_class = ClassType::DynamicCast(parent())) {
+ return parent_class->HasField(name);
+ }
+ }
+ return false;
+}
+
const Field& AggregateType::LookupField(const std::string& name) const {
for (auto& field : fields_) {
if (field.name_and_type.name == name) return field;
@@ -236,10 +265,10 @@ const Field& AggregateType::LookupField(const std::string& name) const {
return parent_class->LookupField(name);
}
}
- ReportError("no field ", name, "found");
+ ReportError("no field ", name, " found");
}
-std::string StructType::GetGeneratedTypeName() const {
+std::string StructType::GetGeneratedTypeNameImpl() const {
return nspace()->ExternalName() + "::" + name();
}
@@ -250,10 +279,6 @@ std::vector<Method*> AggregateType::Methods(const std::string& name) const {
return result;
}
-std::vector<Method*> AggregateType::Constructors() const {
- return Methods(kConstructMethodName);
-}
-
std::string StructType::ToExplicitString() const {
std::stringstream result;
result << "struct " << name() << "{";
@@ -262,6 +287,45 @@ std::string StructType::ToExplicitString() const {
return result.str();
}
+ClassType::ClassType(const Type* parent, Namespace* nspace,
+ const std::string& name, bool is_extern, bool transient,
+ const std::string& generates)
+ : AggregateType(Kind::kClassType, parent, nspace, name),
+ is_extern_(is_extern),
+ transient_(transient),
+ size_(0),
+ has_indexed_field_(false),
+ generates_(generates) {
+ CheckForDuplicateFields();
+ if (parent) {
+ if (const ClassType* super_class = ClassType::DynamicCast(parent)) {
+ if (super_class->HasIndexedField()) {
+ has_indexed_field_ = true;
+ }
+ }
+ }
+}
+
+bool ClassType::HasIndexedField() const {
+ if (has_indexed_field_) return true;
+ const ClassType* super_class = GetSuperClass();
+ if (super_class) return super_class->HasIndexedField();
+ return false;
+}
+
+std::string ClassType::GetGeneratedTNodeTypeNameImpl() const {
+ if (!IsExtern()) return generates_;
+ std::string prefix = nspace()->IsDefaultNamespace()
+ ? std::string{}
+ : (nspace()->ExternalName() + "::");
+ return prefix + generates_;
+}
+
+std::string ClassType::GetGeneratedTypeNameImpl() const {
+ return IsConstexpr() ? GetGeneratedTNodeTypeName()
+ : "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">";
+}
+
std::string ClassType::ToExplicitString() const {
std::stringstream result;
result << "class " << name() << "{";
@@ -270,6 +334,10 @@ std::string ClassType::ToExplicitString() const {
return result.str();
}
+bool ClassType::AllowInstantiation() const {
+ return !IsExtern() || nspace()->IsDefaultNamespace();
+}
+
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << "(";
for (size_t i = 0; i < sig.parameter_types.types.size(); ++i) {
@@ -339,8 +407,8 @@ std::ostream& operator<<(std::ostream& os, const ParameterTypes& p) {
bool Signature::HasSameTypesAs(const Signature& other,
ParameterMode mode) const {
- auto compare_types = GetTypes();
- auto other_compare_types = other.GetTypes();
+ auto compare_types = types();
+ auto other_compare_types = other.types();
if (mode == ParameterMode::kIgnoreImplicit) {
compare_types = GetExplicitTypes();
other_compare_types = other.GetExplicitTypes();
@@ -372,8 +440,7 @@ bool operator<(const Type& a, const Type& b) {
return a.MangledName() < b.MangledName();
}
-VisitResult ProjectStructField(const StructType* original_struct,
- VisitResult structure,
+VisitResult ProjectStructField(VisitResult structure,
const std::string& fieldname) {
BottomOffset begin = structure.stack_range().begin();
@@ -388,38 +455,8 @@ VisitResult ProjectStructField(const StructType* original_struct,
begin = end;
}
- if (fields.size() > 0 &&
- fields[0].name_and_type.name == kConstructorStructSuperFieldName) {
- structure = ProjectStructField(original_struct, structure,
- kConstructorStructSuperFieldName);
- return ProjectStructField(original_struct, structure, fieldname);
- } else {
- base::Optional<const ClassType*> class_type =
- original_struct->GetDerivedFrom();
- if (original_struct == type) {
- if (class_type) {
- ReportError("class '", (*class_type)->name(),
- "' doesn't contain a field '", fieldname, "'");
- } else {
- ReportError("struct '", original_struct->name(),
- "' doesn't contain a field '", fieldname, "'");
- }
- } else {
- DCHECK(class_type);
- ReportError(
- "class '", (*class_type)->name(),
- "' or one of its derived-from classes doesn't contain a field '",
- fieldname, "'");
- }
- }
-}
-
-VisitResult ProjectStructField(VisitResult structure,
- const std::string& fieldname) {
- DCHECK(structure.IsOnStack());
- DCHECK(structure.type()->IsStructType());
- const StructType* type = StructType::cast(structure.type());
- return ProjectStructField(type, structure, fieldname);
+ ReportError("struct '", type->name(), "' doesn't contain a field '",
+ fieldname, "'");
}
namespace {
@@ -469,6 +506,64 @@ VisitResult VisitResult::NeverResult() {
return result;
}
+std::tuple<size_t, std::string, std::string> Field::GetFieldSizeInformation()
+ const {
+ std::string size_string = "#no size";
+ std::string machine_type = "#no machine type";
+ const Type* field_type = this->name_and_type.type;
+ size_t field_size = 0;
+ if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ field_size = kTaggedSize;
+ size_string = "kTaggedSize";
+ machine_type = field_type->IsSubtypeOf(TypeOracle::GetSmiType())
+ ? "MachineType::TaggedSigned()"
+ : "MachineType::AnyTagged()";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
+ field_size = kSystemPointerSize;
+ size_string = "kSystemPointerSize";
+ machine_type = "MachineType::Pointer()";
+ } else if (field_type == TypeOracle::GetInt32Type()) {
+ field_size = kInt32Size;
+ size_string = "kInt32Size";
+ machine_type = "MachineType::Int32()";
+ } else if (field_type == TypeOracle::GetUint32Type()) {
+ field_size = kInt32Size;
+ size_string = "kInt32Size";
+ machine_type = "MachineType::Uint32()";
+ } else if (field_type == TypeOracle::GetInt16Type()) {
+ field_size = kUInt16Size;
+ size_string = "kUInt16Size";
+ machine_type = "MachineType::Int16()";
+ } else if (field_type == TypeOracle::GetUint16Type()) {
+ field_size = kUInt16Size;
+ size_string = "kUInt16Size";
+ machine_type = "MachineType::Uint16()";
+ } else if (field_type == TypeOracle::GetInt8Type()) {
+ field_size = kUInt8Size;
+ size_string = "kUInt8Size";
+ machine_type = "MachineType::Int8()";
+ } else if (field_type == TypeOracle::GetUint8Type()) {
+ field_size = kUInt8Size;
+ size_string = "kUInt8Size";
+ machine_type = "MachineType::Uint8()";
+ } else if (field_type == TypeOracle::GetFloat64Type()) {
+ field_size = kDoubleSize;
+ size_string = "kDoubleSize";
+ machine_type = "MachineType::Float64()";
+ } else if (field_type == TypeOracle::GetIntPtrType()) {
+ field_size = kIntptrSize;
+ size_string = "kIntptrSize";
+ machine_type = "MachineType::IntPtr()";
+ } else if (field_type == TypeOracle::GetUIntPtrType()) {
+ field_size = kIntptrSize;
+ size_string = "kIntptrSize";
+ machine_type = "MachineType::IntPtr()";
+ } else {
+ ReportError("fields of type ", *field_type, " are not (yet) supported");
+ }
+ return std::make_tuple(field_size, size_string, machine_type);
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 6d189068f2..47e8a97b8e 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -22,11 +22,14 @@ namespace torque {
static const char* const CONSTEXPR_TYPE_PREFIX = "constexpr ";
static const char* const NEVER_TYPE_STRING = "never";
static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
+static const char* const CONSTEXPR_INTPTR_TYPE_STRING = "constexpr intptr";
static const char* const BOOL_TYPE_STRING = "bool";
static const char* const VOID_TYPE_STRING = "void";
static const char* const ARGUMENTS_TYPE_STRING = "constexpr Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
+static const char* const MAP_TYPE_STRING = "Map";
static const char* const OBJECT_TYPE_STRING = "Object";
+static const char* const JSOBJECT_TYPE_STRING = "JSObject";
static const char* const SMI_TYPE_STRING = "Smi";
static const char* const TAGGED_TYPE_STRING = "Tagged";
static const char* const RAWPTR_TYPE_STRING = "RawPtr";
@@ -37,10 +40,18 @@ static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
static const char* const INTPTR_TYPE_STRING = "intptr";
static const char* const UINTPTR_TYPE_STRING = "uintptr";
static const char* const INT32_TYPE_STRING = "int32";
+static const char* const UINT32_TYPE_STRING = "uint32";
+static const char* const INT16_TYPE_STRING = "int16";
+static const char* const UINT16_TYPE_STRING = "uint16";
+static const char* const INT8_TYPE_STRING = "int8";
+static const char* const UINT8_TYPE_STRING = "uint8";
+static const char* const FLOAT64_TYPE_STRING = "float64";
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
+class AggregateType;
+struct Identifier;
class Macro;
class Method;
class StructType;
@@ -110,8 +121,8 @@ class Type : public TypeBase {
return IsAbstractName(CONSTEXPR_BOOL_TYPE_STRING);
}
bool IsVoidOrNever() const { return IsVoid() || IsNever(); }
- virtual std::string GetGeneratedTypeName() const = 0;
- virtual std::string GetGeneratedTNodeTypeName() const = 0;
+ std::string GetGeneratedTypeName() const;
+ std::string GetGeneratedTNodeTypeName() const;
virtual bool IsConstexpr() const = 0;
virtual bool IsTransient() const { return false; }
virtual const Type* NonConstexprVersion() const = 0;
@@ -125,6 +136,8 @@ class Type : public TypeBase {
void set_parent(const Type* t) { parent_ = t; }
int Depth() const;
virtual std::string ToExplicitString() const = 0;
+ virtual std::string GetGeneratedTypeNameImpl() const = 0;
+ virtual std::string GetGeneratedTNodeTypeNameImpl() const = 0;
private:
bool IsAbstractName(const std::string& name) const;
@@ -152,7 +165,14 @@ struct NameAndType {
std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type);
struct Field {
+ // TODO(danno): This likely should be refactored, the handling of the types
+ // using the universal grab-bag utility with std::tie, as well as the
+ // reliance of string types is quite clunky.
+ std::tuple<size_t, std::string, std::string> GetFieldSizeInformation() const;
+
SourcePosition pos;
+ const AggregateType* aggregate;
+ base::Optional<const Field*> index;
NameAndType name_and_type;
size_t offset;
bool is_weak;
@@ -162,15 +182,15 @@ std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
class TopType final : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(TopType);
- virtual std::string MangledName() const { return "top"; }
- virtual std::string GetGeneratedTypeName() const { UNREACHABLE(); }
- virtual std::string GetGeneratedTNodeTypeName() const {
+ DECLARE_TYPE_BOILERPLATE(TopType)
+ std::string MangledName() const override { return "top"; }
+ std::string GetGeneratedTypeNameImpl() const override { UNREACHABLE(); }
+ std::string GetGeneratedTNodeTypeNameImpl() const override {
return source_type_->GetGeneratedTNodeTypeName();
}
- virtual bool IsConstexpr() const { return false; }
- virtual const Type* NonConstexprVersion() const { return nullptr; }
- virtual std::string ToExplicitString() const {
+ bool IsConstexpr() const override { return false; }
+ const Type* NonConstexprVersion() const override { return nullptr; }
+ std::string ToExplicitString() const override {
std::stringstream s;
s << "inaccessible " + source_type_->ToString();
return s.str();
@@ -191,7 +211,7 @@ class TopType final : public Type {
class AbstractType final : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(AbstractType);
+ DECLARE_TYPE_BOILERPLATE(AbstractType)
const std::string& name() const { return name_; }
std::string ToExplicitString() const override { return name(); }
std::string MangledName() const override {
@@ -199,11 +219,11 @@ class AbstractType final : public Type {
std::replace(str.begin(), str.end(), ' ', '_');
return "AT" + str;
}
- std::string GetGeneratedTypeName() const override {
+ std::string GetGeneratedTypeNameImpl() const override {
return IsConstexpr() ? generated_type_
: "compiler::TNode<" + generated_type_ + ">";
}
- std::string GetGeneratedTNodeTypeName() const override;
+ std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsConstexpr() const override {
return name().substr(0, strlen(CONSTEXPR_TYPE_PREFIX)) ==
CONSTEXPR_TYPE_PREFIX;
@@ -238,13 +258,13 @@ class AbstractType final : public Type {
// For now, builtin pointers are restricted to Torque-defined builtins.
class BuiltinPointerType final : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(BuiltinPointerType);
+ DECLARE_TYPE_BOILERPLATE(BuiltinPointerType)
std::string ToExplicitString() const override;
std::string MangledName() const override;
- std::string GetGeneratedTypeName() const override {
+ std::string GetGeneratedTypeNameImpl() const override {
return parent()->GetGeneratedTypeName();
}
- std::string GetGeneratedTNodeTypeName() const override {
+ std::string GetGeneratedTNodeTypeNameImpl() const override {
return parent()->GetGeneratedTNodeTypeName();
}
bool IsConstexpr() const override {
@@ -292,13 +312,13 @@ struct TypeLess {
class UnionType final : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(UnionType);
+ DECLARE_TYPE_BOILERPLATE(UnionType)
std::string ToExplicitString() const override;
std::string MangledName() const override;
- std::string GetGeneratedTypeName() const override {
+ std::string GetGeneratedTypeNameImpl() const override {
return "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">";
}
- std::string GetGeneratedTNodeTypeName() const override;
+ std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsConstexpr() const override {
DCHECK_EQ(false, parent()->IsConstexpr());
@@ -382,16 +402,18 @@ const Type* SubtractType(const Type* a, const Type* b);
class AggregateType : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(AggregateType);
+ DECLARE_TYPE_BOILERPLATE(AggregateType)
std::string MangledName() const override { return name_; }
- std::string GetGeneratedTypeName() const override { UNREACHABLE(); };
- std::string GetGeneratedTNodeTypeName() const override { UNREACHABLE(); }
+ std::string GetGeneratedTypeNameImpl() const override { UNREACHABLE(); }
+ std::string GetGeneratedTNodeTypeNameImpl() const override { UNREACHABLE(); }
const Type* NonConstexprVersion() const override { return this; }
bool IsConstexpr() const override { return false; }
+ virtual bool HasIndexedField() const { return false; }
void SetFields(std::vector<Field> fields) { fields_ = std::move(fields); }
const std::vector<Field>& fields() const { return fields_; }
+ bool HasField(const std::string& name) const;
const Field& LookupField(const std::string& name) const;
const std::string& name() const { return name_; }
Namespace* nspace() const { return namespace_; }
@@ -400,8 +422,12 @@ class AggregateType : public Type {
return "_method_" + name_ + "_" + name;
}
+ virtual const Field& RegisterField(Field field) {
+ fields_.push_back(field);
+ return fields_.back();
+ }
+
void RegisterMethod(Method* method) { methods_.push_back(method); }
- std::vector<Method*> Constructors() const;
const std::vector<Method*>& Methods() const { return methods_; }
std::vector<Method*> Methods(const std::string& name) const;
@@ -409,8 +435,8 @@ class AggregateType : public Type {
protected:
AggregateType(Kind kind, const Type* parent, Namespace* nspace,
- const std::string& name, const std::vector<Field>& fields)
- : Type(kind, parent), namespace_(nspace), name_(name), fields_(fields) {}
+ const std::string& name)
+ : Type(kind, parent), namespace_(nspace), name_(name) {}
void CheckForDuplicateFields();
@@ -423,63 +449,52 @@ class AggregateType : public Type {
class StructType final : public AggregateType {
public:
- DECLARE_TYPE_BOILERPLATE(StructType);
+ DECLARE_TYPE_BOILERPLATE(StructType)
std::string ToExplicitString() const override;
- std::string GetGeneratedTypeName() const override;
-
- void SetDerivedFrom(const ClassType* derived_from) {
- derived_from_ = derived_from;
- }
- base::Optional<const ClassType*> GetDerivedFrom() const {
- return derived_from_;
- }
+ std::string GetGeneratedTypeNameImpl() const override;
private:
friend class TypeOracle;
- StructType(Namespace* nspace, const std::string& name,
- const std::vector<Field>& fields)
- : AggregateType(Kind::kStructType, nullptr, nspace, name, fields) {
+ StructType(Namespace* nspace, const std::string& name)
+ : AggregateType(Kind::kStructType, nullptr, nspace, name) {
CheckForDuplicateFields();
}
const std::string& GetStructName() const { return name(); }
-
- base::Optional<const ClassType*> derived_from_;
};
class ClassType final : public AggregateType {
public:
- DECLARE_TYPE_BOILERPLATE(ClassType);
+ DECLARE_TYPE_BOILERPLATE(ClassType)
std::string ToExplicitString() const override;
- std::string GetGeneratedTypeName() const override {
- return IsConstexpr() ? generates_ : "compiler::TNode<" + generates_ + ">";
- }
- std::string GetGeneratedTNodeTypeName() const override;
+ std::string GetGeneratedTypeNameImpl() const override;
+ std::string GetGeneratedTNodeTypeNameImpl() const override;
+ bool IsExtern() const { return is_extern_; }
bool IsTransient() const override { return transient_; }
+ bool HasIndexedField() const override;
size_t size() const { return size_; }
- StructType* struct_type() const { return this_struct_; }
const ClassType* GetSuperClass() const {
if (parent() == nullptr) return nullptr;
return parent()->IsClassType() ? ClassType::DynamicCast(parent()) : nullptr;
}
+ void SetSize(size_t size) { size_ = size; }
+ bool AllowInstantiation() const;
+ const Field& RegisterField(Field field) override {
+ if (field.index) {
+ has_indexed_field_ = true;
+ }
+ return AggregateType::RegisterField(field);
+ }
private:
friend class TypeOracle;
ClassType(const Type* parent, Namespace* nspace, const std::string& name,
- bool transient, const std::string& generates,
- const std::vector<Field>& fields, StructType* this_struct,
- size_t size)
- : AggregateType(Kind::kClassType, parent, nspace, name, fields),
- this_struct_(this_struct),
- transient_(transient),
- size_(size),
- generates_(generates) {
- CheckForDuplicateFields();
- }
+ bool is_extern, bool transient, const std::string& generates);
- StructType* this_struct_;
+ bool is_extern_;
bool transient_;
size_t size_;
+ bool has_indexed_field_;
const std::string generates_;
};
@@ -560,6 +575,8 @@ std::ostream& operator<<(std::ostream& os, const ParameterTypes& parameters);
enum class ParameterMode { kProcessImplicit, kIgnoreImplicit };
+typedef std::vector<Identifier*> NameVector;
+
struct Signature {
Signature(NameVector n, base::Optional<std::string> arguments_variable,
ParameterTypes p, size_t i, const Type* r, LabelDeclarationVector l)
@@ -580,7 +597,6 @@ struct Signature {
bool HasSameTypesAs(
const Signature& other,
ParameterMode mode = ParameterMode::kProcessImplicit) const;
- const TypeVector& GetTypes() const { return parameter_types.types; }
TypeVector GetImplicitTypes() const {
return TypeVector(parameter_types.types.begin(),
parameter_types.types.begin() + implicit_count);
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index b1fdb2b913..5328cacac6 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -73,14 +73,57 @@ std::string StringLiteralQuote(const std::string& s) {
return result.str();
}
+static const char kFileUriPrefix[] = "file://";
+static const int kFileUriPrefixLength = sizeof(kFileUriPrefix) - 1;
+
+static int HexCharToInt(unsigned char c) {
+ if (isdigit(c)) return c - '0';
+ if (isupper(c)) return c - 'A' + 10;
+ DCHECK(islower(c));
+ return c - 'a' + 10;
+}
+
+base::Optional<std::string> FileUriDecode(const std::string& uri) {
+ // Abort decoding of URIs that don't start with "file://".
+ if (uri.rfind(kFileUriPrefix) != 0) return base::nullopt;
+
+ const std::string path = uri.substr(kFileUriPrefixLength);
+ std::ostringstream decoded;
+
+ for (auto iter = path.begin(), end = path.end(); iter != end; ++iter) {
+ std::string::value_type c = (*iter);
+
+ // Normal characters are appended.
+ if (c != '%') {
+ decoded << c;
+ continue;
+ }
+
+ // If '%' is not followed by at least two hex digits, we abort.
+ if (std::distance(iter, end) <= 2) return base::nullopt;
+
+ unsigned char first = (*++iter);
+ unsigned char second = (*++iter);
+ if (!isxdigit(first) || !isxdigit(second)) return base::nullopt;
+
+ // An escaped hex value needs converting.
+ unsigned char value = HexCharToInt(first) * 16 + HexCharToInt(second);
+ decoded << value;
+ }
+
+ return decoded.str();
+}
+
std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
DEFINE_CONTEXTUAL_VARIABLE(LintErrorStatus)
-[[noreturn]] void ReportErrorString(const std::string& error) {
- std::cerr << CurrentPositionAsString() << ": Torque error: " << error << "\n";
+[[noreturn]] void ReportErrorString(const std::string& error,
+ bool print_position) {
+ if (print_position) std::cerr << CurrentPositionAsString() << ": ";
+ std::cerr << ": Torque error: " << error << "\n";
v8::base::OS::Abort();
}
@@ -125,8 +168,9 @@ bool IsKeywordLikeName(const std::string& s) {
// naming convention and are those exempt from the normal type convention.
bool IsMachineType(const std::string& s) {
static const char* const machine_types[]{
- "void", "never", "int32", "uint32", "int64", "intptr", "uintptr",
- "float32", "float64", "bool", "string", "bint", "int31"};
+ "void", "never", "int8", "uint8", "int16", "uint16",
+ "int31", "uint31", "int32", "uint32", "int64", "intptr",
+ "uintptr", "float32", "float64", "bool", "string", "bint"};
return std::find(std::begin(machine_types), std::end(machine_types), s) !=
std::end(machine_types);
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index ca90a78522..347687fef0 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -5,22 +5,28 @@
#ifndef V8_TORQUE_UTILS_H_
#define V8_TORQUE_UTILS_H_
+#include <ostream>
+#include <streambuf>
#include <string>
#include <unordered_set>
#include <vector>
#include "src/base/functional.h"
+#include "src/base/optional.h"
#include "src/torque/contextual.h"
namespace v8 {
namespace internal {
namespace torque {
-typedef std::vector<std::string> NameVector;
-
std::string StringLiteralUnquote(const std::string& s);
std::string StringLiteralQuote(const std::string& s);
+// Decodes "file://" URIs into file paths which can then be used
+// with the standard stream API.
+V8_EXPORT_PRIVATE base::Optional<std::string> FileUriDecode(
+ const std::string& s);
+
class LintErrorStatus : public ContextualClass<LintErrorStatus> {
public:
LintErrorStatus() : has_lint_errors_(false) {}
@@ -45,12 +51,19 @@ bool IsSnakeCase(const std::string& s);
bool IsValidNamespaceConstName(const std::string& s);
bool IsValidTypeName(const std::string& s);
-[[noreturn]] void ReportErrorString(const std::string& error);
+[[noreturn]] void ReportErrorString(const std::string& error,
+ bool print_position);
template <class... Args>
[[noreturn]] void ReportError(Args&&... args) {
std::stringstream s;
USE((s << std::forward<Args>(args))...);
- ReportErrorString(s.str());
+ ReportErrorString(s.str(), true);
+}
+template <class... Args>
+[[noreturn]] void ReportErrorWithoutPosition(Args&&... args) {
+ std::stringstream s;
+ USE((s << std::forward<Args>(args))...);
+ ReportErrorString(s.str(), false);
}
std::string CapifyStringWithUnderscores(const std::string& camellified_string);
@@ -268,12 +281,8 @@ class ToString {
std::stringstream s_;
};
-constexpr int kTaggedSize = sizeof(void*);
-
-static const char* const kConstructMethodName = "constructor";
-static const char* const kSuperMethodName = "super";
-static const char* const kConstructorStructSuperFieldName = "_super";
-static const char* const kClassConstructorThisStructPrefix = "_ThisStruct";
+static const char* const kBaseNamespaceName = "base";
+static const char* const kTestNamespaceName = "test";
// Erase elements of a container that has a constant-time erase function, like
// std::set or std::list. Calling this on std::vector would have quadratic
@@ -289,6 +298,25 @@ void EraseIf(Container* container, F f) {
}
}
+class NullStreambuf : public std::streambuf {
+ public:
+ virtual int overflow(int c) {
+ setp(buffer_, buffer_ + sizeof(buffer_));
+ return (c == traits_type::eof()) ? '\0' : c;
+ }
+
+ private:
+ char buffer_[64];
+};
+
+class NullOStream : public std::ostream {
+ public:
+ NullOStream() : std::ostream(&buffer_) {}
+
+ private:
+ NullStreambuf buffer_;
+};
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 2f530f9279..3d4375e384 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -270,27 +270,6 @@ enum CategoryGroupEnabledFlags {
INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
phase, category_group, name, id, timestamp, flags, ##__VA_ARGS__)
-// Enter and leave a context based on the current scope.
-#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
- struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \
- public: \
- INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \
- TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \
- } \
- ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \
- TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \
- } \
- \
- private: \
- /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \
- INTERNAL_TRACE_EVENT_UID(ScopedContext) \
- (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
- void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
- uint64_t cid_; \
- }; \
- INTERNAL_TRACE_EVENT_UID(ScopedContext) \
- INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
-
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
@@ -646,9 +625,11 @@ class ScopedTracer {
ScopedTracer() : p_data_(nullptr) {}
~ScopedTracer() {
- if (p_data_ && *data_.category_group_enabled)
+ if (p_data_ && base::Relaxed_Load(reinterpret_cast<const base::Atomic8*>(
+ data_.category_group_enabled))) {
TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
data_.category_group_enabled, data_.name, data_.event_handle);
+ }
}
void Initialize(const uint8_t* category_group_enabled, const char* name,
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index d019b3b5b4..eeb41c4974 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -6,6 +6,7 @@
#include "src/base/platform/platform.h"
#include "src/conversions.h"
+#include "src/vector.h"
namespace v8 {
namespace tracing {
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 3dbc1602fa..bdf16d7eef 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -690,5 +690,23 @@ void TransitionArray::Sort() {
DCHECK(IsSortedNoDuplicates());
}
+bool TransitionsAccessor::HasIntegrityLevelTransitionTo(
+ Map to, Symbol* out_symbol, PropertyAttributes* out_integrity_level) {
+ ReadOnlyRoots roots(isolate_);
+ if (SearchSpecial(roots.frozen_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = FROZEN;
+ if (out_symbol) *out_symbol = roots.frozen_symbol();
+ } else if (SearchSpecial(roots.sealed_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = SEALED;
+ if (out_symbol) *out_symbol = roots.sealed_symbol();
+ } else if (SearchSpecial(roots.nonextensible_symbol()) == to) {
+ if (out_integrity_level) *out_integrity_level = NONE;
+ if (out_symbol) *out_symbol = roots.nonextensible_symbol();
+ } else {
+ return false;
+ }
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 58268907e3..55bc14ce5a 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -86,6 +86,10 @@ class TransitionsAccessor {
static bool IsMatchingMap(Map target, Name name, PropertyKind kind,
PropertyAttributes attributes);
+ bool HasIntegrityLevelTransitionTo(
+ Map to, Symbol* out_symbol = nullptr,
+ PropertyAttributes* out_integrity_level = nullptr);
+
// ===== ITERATION =====
typedef void (*TraverseCallback)(Map map, void* data);
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 565289e18b..1efbc398d0 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -13,7 +13,7 @@
// should be as self-contained as possible to make it easy to audit the code.
//
// 2. Any changes must be reviewed by someone from the crash reporting
-// or security team. Se OWNERS for suggested reviewers.
+// or security team. See OWNERS for suggested reviewers.
//
// For more information, see https://goo.gl/yMeyUY.
//
@@ -33,10 +33,10 @@
namespace {
size_t gNextCodeObject = 0;
-#ifdef DEBUG
-constexpr bool kEnableDebug = true;
+#ifdef ENABLE_SLOW_DCHECKS
+constexpr bool kEnableSlowChecks = true;
#else
-constexpr bool kEnableDebug = false;
+constexpr bool kEnableSlowChecks = false;
#endif
}
@@ -143,7 +143,7 @@ int RegisterHandlerData(
MetadataLock lock;
- if (kEnableDebug) {
+ if (kEnableSlowChecks) {
VerifyCodeRangeIsDisjoint(data);
}
@@ -196,7 +196,7 @@ int RegisterHandlerData(
if (i <= int_max) {
gCodeObjects[i].code_info = data;
- if (kEnableDebug) {
+ if (kEnableSlowChecks) {
ValidateCodeObjects();
}
@@ -224,7 +224,7 @@ void ReleaseHandlerData(int index) {
gCodeObjects[index].next_free = gNextCodeObject;
gNextCodeObject = index;
- if (kEnableDebug) {
+ if (kEnableSlowChecks) {
ValidateCodeObjects();
}
}
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/turbo-assembler.cc
index 65e05aa45c..0a95775c2d 100644
--- a/deps/v8/src/turbo-assembler.cc
+++ b/deps/v8/src/turbo-assembler.cc
@@ -47,7 +47,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
LoadRootRelative(destination,
RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
} else {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
+ CHECK(isolate()->IsGeneratingEmbeddedBuiltins());
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index 0140858115..c96d78438a 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -56,6 +56,53 @@ template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
}
}
+// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
+// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
+// when the stream is complete, to ensure incomplete sequences are handled.
+uchar Utf8::ValueOfIncremental(const byte** cursor, State* state,
+ Utf8IncrementalBuffer* buffer) {
+ DCHECK_NOT_NULL(buffer);
+ State old_state = *state;
+ byte next = **cursor;
+ *cursor += 1;
+
+ if (V8_LIKELY(next <= kMaxOneByteChar && old_state == State::kAccept)) {
+ DCHECK_EQ(0u, *buffer);
+ return static_cast<uchar>(next);
+ }
+
+ // So we're at the lead byte of a 2/3/4 sequence, or we're at a continuation
+ // char in that sequence.
+ Utf8DfaDecoder::Decode(next, state, buffer);
+
+ switch (*state) {
+ case State::kAccept: {
+ uchar t = *buffer;
+ *buffer = 0;
+ return t;
+ }
+
+ case State::kReject:
+ *state = State::kAccept;
+ *buffer = 0;
+
+ // If we hit a bad byte, we need to determine if we were trying to start
+ // a sequence or continue one. If we were trying to start a sequence,
+ // that means it's just an invalid lead byte and we need to continue to
+ // the next (which we already did above). If we were already in a
+ // sequence, we need to reprocess this same byte after resetting to the
+ // initial state.
+ if (old_state != State::kAccept) {
+ // We were trying to continue a sequence, so let's reprocess this byte
+ // next time.
+ *cursor -= 1;
+ }
+ return kBadChar;
+
+ default:
+ return kIncomplete;
+ }
+}
unsigned Utf8::EncodeOneByte(char* str, uint8_t c) {
static const int kMask = ~(1 << 6);
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 4d7896ec37..c7818dbaa0 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -203,62 +203,17 @@ uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
Utf8IncrementalBuffer buffer = 0;
uchar t;
- size_t i = 0;
+ const byte* start = str;
+ const byte* end = str + max_length;
+
do {
- t = ValueOfIncremental(str[i], &i, &state, &buffer);
- } while (i < max_length && t == kIncomplete);
+ t = ValueOfIncremental(&str, &state, &buffer);
+ } while (str < end && t == kIncomplete);
- *cursor += i;
+ *cursor += str - start;
return (state == State::kAccept) ? t : kBadChar;
}
-// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
-// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
-// when the stream is complete, to ensure incomplete sequences are handled.
-uchar Utf8::ValueOfIncremental(byte next, size_t* cursor, State* state,
- Utf8IncrementalBuffer* buffer) {
- DCHECK_NOT_NULL(buffer);
- State old_state = *state;
- *cursor += 1;
-
- if (V8_LIKELY(next <= kMaxOneByteChar && old_state == State::kAccept)) {
- DCHECK_EQ(0u, *buffer);
- return static_cast<uchar>(next);
- }
-
- // So we're at the lead byte of a 2/3/4 sequence, or we're at a continuation
- // char in that sequence.
- Utf8DfaDecoder::Decode(next, state, buffer);
-
- switch (*state) {
- case State::kAccept: {
- uchar t = *buffer;
- *buffer = 0;
- return t;
- }
-
- case State::kReject:
- *state = State::kAccept;
- *buffer = 0;
-
- // If we hit a bad byte, we need to determine if we were trying to start
- // a sequence or continue one. If we were trying to start a sequence,
- // that means it's just an invalid lead byte and we need to continue to
- // the next (which we already did above). If we were already in a
- // sequence, we need to reprocess this same byte after resetting to the
- // initial state.
- if (old_state != State::kAccept) {
- // We were trying to continue a sequence, so let's reprocess this byte
- // next time.
- *cursor -= 1;
- }
- return kBadChar;
-
- default:
- return kIncomplete;
- }
-}
-
// Finishes the incremental decoding, ensuring that if an unfinished sequence
// is left that it is replaced by a replacement char.
uchar Utf8::ValueOfIncrementalFinish(State* state) {
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 68e69324f9..1bebfe3e8a 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -163,8 +163,8 @@ class V8_EXPORT_PRIVATE Utf8 {
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
typedef uint32_t Utf8IncrementalBuffer;
- static uchar ValueOfIncremental(byte next_byte, size_t* cursor, State* state,
- Utf8IncrementalBuffer* buffer);
+ static inline uchar ValueOfIncremental(const byte** cursor, State* state,
+ Utf8IncrementalBuffer* buffer);
static uchar ValueOfIncrementalFinish(State* state);
// Excludes non-characters from the set of valid code points.
diff --git a/deps/v8/src/unoptimized-compilation-info.cc b/deps/v8/src/unoptimized-compilation-info.cc
index 1211af3715..b1804ea51f 100644
--- a/deps/v8/src/unoptimized-compilation-info.cc
+++ b/deps/v8/src/unoptimized-compilation-info.cc
@@ -33,6 +33,9 @@ UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
if (parse_info->is_native()) MarkAsNative();
if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
if (parse_info->might_always_opt()) MarkAsMightAlwaysOpt();
+ if (parse_info->collect_source_positions()) {
+ MarkAsForceCollectSourcePositions();
+ }
}
DeclarationScope* UnoptimizedCompilationInfo::scope() const {
@@ -50,8 +53,20 @@ int UnoptimizedCompilationInfo::num_parameters_including_this() const {
SourcePositionTableBuilder::RecordingMode
UnoptimizedCompilationInfo::SourcePositionRecordingMode() const {
- return is_native() ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
- : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
+ if (is_native()) {
+ DCHECK(!collect_source_positions());
+ return SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS;
+ }
+
+ if (collect_source_positions()) {
+ return SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
+ }
+
+ // Always collect source positions for functions that cannot be lazily
+ // compiled, e.g. class member initializer functions.
+ return !literal_->AllowsLazyCompilation()
+ ? SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS
+ : SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS;
}
} // namespace internal
diff --git a/deps/v8/src/unoptimized-compilation-info.h b/deps/v8/src/unoptimized-compilation-info.h
index a70dc88651..7d68092f96 100644
--- a/deps/v8/src/unoptimized-compilation-info.h
+++ b/deps/v8/src/unoptimized-compilation-info.h
@@ -46,6 +46,11 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
+ void MarkAsForceCollectSourcePositions() { SetFlag(kCollectSourcePositions); }
+ bool collect_source_positions() const {
+ return GetFlag(kCollectSourcePositions);
+ }
+
void MarkAsMightAlwaysOpt() { SetFlag(kMightAlwaysOpt); }
bool might_always_opt() const { return GetFlag(kMightAlwaysOpt); }
@@ -102,6 +107,7 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
kIsNative = 1 << 1,
kCollectTypeProfile = 1 << 2,
kMightAlwaysOpt = 1 << 3,
+ kCollectSourcePositions = 1 << 4,
};
void SetFlag(Flag flag) { flags_ |= flag; }
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 7ba6ba487d..a6b3502eec 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -52,11 +52,6 @@ inline char HexCharOfValue(int value) {
inline int BoolToInt(bool b) { return b ? 1 : 0; }
-// Same as strcmp, but can handle NULL arguments.
-inline bool CStringEquals(const char* s1, const char* s2) {
- return (s1 == s2) || (s1 != nullptr && s2 != nullptr && strcmp(s1, s2) == 0);
-}
-
// Checks if value is in range [lower_limit, higher_limit] using a single
// branch.
template <typename T, typename U>
@@ -220,14 +215,6 @@ T Nabs(T a) {
return a < 0 ? a : -a;
}
-// Floor(-0.0) == 0.0
-inline double Floor(double x) {
-#if V8_CC_MSVC
- if (x == 0) return x; // Fix for issue 3477.
-#endif
- return std::floor(x);
-}
-
inline double Modulo(double x, double y) {
#if defined(V8_OS_WIN)
// Workaround MS fmod bugs. ECMA-262 says:
@@ -253,35 +240,6 @@ inline double Modulo(double x, double y) {
#endif
}
-inline double Pow(double x, double y) {
- if (y == 0.0) return 1.0;
- if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- return std::numeric_limits<double>::quiet_NaN();
- }
-#if (defined(__MINGW64_VERSION_MAJOR) && \
- (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
- defined(V8_OS_AIX)
- // MinGW64 and AIX have a custom implementation for pow. This handles certain
- // special cases that are different.
- if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
- double f;
- double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
- /* retain sign if odd integer exponent */
- return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
- ? copysign(result, x)
- : result;
- }
-
- if (x == 2.0) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return std::ldexp(1.0, y_int);
- }
- }
-#endif
- return std::pow(x, y);
-}
-
template <typename T>
T SaturateAdd(T a, T b) {
if (std::is_signed<T>::value) {
@@ -614,36 +572,6 @@ class SetOncePointer {
T* pointer_ = nullptr;
};
-
-template <typename T, int kSize>
-class EmbeddedVector : public Vector<T> {
- public:
- EmbeddedVector() : Vector<T>(buffer_, kSize) { }
-
- explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
- for (int i = 0; i < kSize; ++i) {
- buffer_[i] = initial_value;
- }
- }
-
- // When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs) V8_NOEXCEPT : Vector<T>(rhs) {
- MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- }
-
- EmbeddedVector& operator=(const EmbeddedVector& rhs) V8_NOEXCEPT {
- if (this == &rhs) return *this;
- Vector<T>::operator=(rhs);
- MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- return *this;
- }
-
- private:
- T buffer_[kSize];
-};
-
// Compare 8bit/16bit chars to 8bit/16bit chars.
template <typename lchar, typename rchar>
inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
@@ -1065,34 +993,30 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index);
// return an address significantly above the actual current stack position.
V8_NOINLINE uintptr_t GetCurrentStackPosition();
-template <typename V>
-static inline V ByteReverse(V value) {
- size_t size_of_v = sizeof(value);
- switch (size_of_v) {
- case 2:
+static inline uint16_t ByteReverse16(uint16_t value) {
#if V8_HAS_BUILTIN_BSWAP16
- return static_cast<V>(__builtin_bswap16(static_cast<uint16_t>(value)));
+ return __builtin_bswap16(value);
#else
return value << 8 | (value >> 8 & 0x00FF);
#endif
- case 4:
+}
+
+static inline uint32_t ByteReverse32(uint32_t value) {
#if V8_HAS_BUILTIN_BSWAP32
- return static_cast<V>(__builtin_bswap32(static_cast<uint32_t>(value)));
+ return __builtin_bswap32(value);
#else
- {
- size_t bits_of_v = size_of_v * kBitsPerByte;
- return value << (bits_of_v - 8) |
- ((value << (bits_of_v - 24)) & 0x00FF0000) |
- ((value >> (bits_of_v - 24)) & 0x0000FF00) |
- ((value >> (bits_of_v - 8)) & 0x00000FF);
- }
+ return value << 24 |
+ ((value << 8) & 0x00FF0000) |
+ ((value >> 8) & 0x0000FF00) |
+ ((value >> 24) & 0x00000FF);
#endif
- case 8:
+}
+
+static inline uint64_t ByteReverse64(uint64_t value) {
#if V8_HAS_BUILTIN_BSWAP64
- return static_cast<V>(__builtin_bswap64(static_cast<uint64_t>(value)));
+ return __builtin_bswap64(value);
#else
- {
- size_t bits_of_v = size_of_v * kBitsPerByte;
+ size_t bits_of_v = sizeof(value) * kBitsPerByte;
return value << (bits_of_v - 8) |
((value << (bits_of_v - 24)) & 0x00FF000000000000) |
((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
@@ -1101,8 +1025,21 @@ static inline V ByteReverse(V value) {
((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
((value >> (bits_of_v - 8)) & 0x00000000000000FF);
- }
#endif
+}
+
+template <typename V>
+static inline V ByteReverse(V value) {
+ size_t size_of_v = sizeof(value);
+ switch (size_of_v) {
+ case 1:
+ return value;
+ case 2:
+ return static_cast<V>(ByteReverse16(static_cast<uint16_t>(value)));
+ case 4:
+ return static_cast<V>(ByteReverse32(static_cast<uint32_t>(value)));
+ case 8:
+ return static_cast<V>(ByteReverse64(static_cast<uint64_t>(value)));
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index ee2a3ba8ce..318e846d61 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -21,7 +21,6 @@
#include "src/libsampler/sampler.h"
#include "src/objects-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/reloc-info.h"
#include "src/runtime-profiler.h"
#include "src/simulator.h"
#include "src/snapshot/natives.h"
@@ -48,12 +47,11 @@ bool V8::Initialize() {
void V8::TearDown() {
+ wasm::WasmEngine::GlobalTearDown();
#if defined(USE_SIMULATOR)
Simulator::GlobalTearDown();
#endif
- wasm::WasmEngine::GlobalTearDown();
CallDescriptors::TearDown();
- Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
RegisteredExtension::UnregisterAll();
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
@@ -80,6 +78,20 @@ void V8::InitializeOncePerProcessImpl() {
std::ios_base::trunc);
}
+ // Do not expose wasm in jitless mode.
+ //
+ // Even in interpreter-only mode, wasm currently still creates executable
+ // memory at runtime. Unexpose wasm until this changes.
+ // The correctness fuzzers are a special case: many of their test cases are
+ // built by fetching a random property from the the global object, and thus
+ // the global object layout must not change between configs. That is why we
+ // continue exposing wasm on correctness fuzzers even in jitless mode.
+ // TODO(jgruber): Remove this once / if wasm can run without executable
+ // memory.
+ if (FLAG_jitless && !FLAG_abort_on_stack_or_string_length_overflow) {
+ FLAG_expose_wasm = false;
+ }
+
base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index bf62e3b9e6..e927962296 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -16,6 +16,8 @@ namespace internal {
// proposal (http://wg21.link/p0593r2) to make it defined behaviour though.
template <class T>
T& Memory(Address addr) {
+ // {addr} must be aligned.
+ DCHECK_EQ(0, addr & (alignof(T) - 1));
return *reinterpret_cast<T*>(addr);
}
template <class T>
@@ -26,23 +28,15 @@ T& Memory(byte* addr) {
template <typename V>
static inline V ReadUnalignedValue(Address p) {
ASSERT_TRIVIALLY_COPYABLE(V);
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
- return *reinterpret_cast<const V*>(p);
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
V r;
- memmove(&r, reinterpret_cast<void*>(p), sizeof(V));
+ memcpy(&r, reinterpret_cast<void*>(p), sizeof(V));
return r;
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
}
template <typename V>
static inline void WriteUnalignedValue(Address p, V value) {
ASSERT_TRIVIALLY_COPYABLE(V);
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
- *(reinterpret_cast<V*>(p)) = value;
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
- memmove(reinterpret_cast<void*>(p), &value, sizeof(V));
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+ memcpy(reinterpret_cast<void*>(p), &value, sizeof(V));
}
static inline double ReadFloatValue(Address p) {
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 4075fe9d63..7873ac88f5 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -279,7 +279,7 @@ void ValueSerializer::WriteBigIntContents(BigInt bigint) {
void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
uint8_t* dest;
- if (ReserveRawBytes(length).To(&dest)) {
+ if (ReserveRawBytes(length).To(&dest) && length > 0) {
memcpy(dest, source, length);
}
}
@@ -1697,7 +1697,9 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
should_initialize)) {
return MaybeHandle<JSArrayBuffer>();
}
- memcpy(array_buffer->backing_store(), position_, byte_length);
+ if (byte_length > 0) {
+ memcpy(array_buffer->backing_store(), position_, byte_length);
+ }
position_ += byte_length;
AddObjectWithID(id, array_buffer);
return array_buffer;
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 75e4a51e91..8b67d7cde3 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -313,6 +313,35 @@ inline constexpr auto VectorOf(Container&& c)
return VectorOf(c.data(), c.size());
}
+template <typename T, int kSize>
+class EmbeddedVector : public Vector<T> {
+ public:
+ EmbeddedVector() : Vector<T>(buffer_, kSize) {}
+
+ explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
+ for (int i = 0; i < kSize; ++i) {
+ buffer_[i] = initial_value;
+ }
+ }
+
+ // When copying, make underlying Vector to reference our buffer.
+ EmbeddedVector(const EmbeddedVector& rhs) V8_NOEXCEPT : Vector<T>(rhs) {
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ }
+
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) V8_NOEXCEPT {
+ if (this == &rhs) return *this;
+ Vector<T>::operator=(rhs);
+ MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
+ this->set_start(buffer_);
+ return *this;
+ }
+
+ private:
+ T buffer_[kSize];
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/visitors.cc
index a877fc30ef..4bb5c00ed7 100644
--- a/deps/v8/src/visitors.cc
+++ b/deps/v8/src/visitors.cc
@@ -4,7 +4,7 @@
#include "src/visitors.h"
-#include "src/objects/code.h"
+#include "src/reloc-info.h"
namespace v8 {
namespace internal {
@@ -23,5 +23,11 @@ const char* RootVisitor::RootName(Root root) {
return nullptr;
}
+void ObjectVisitor::VisitRelocInfo(RelocIterator* it) {
+ for (; !it->done(); it->next()) {
+ it->rinfo()->Visit(this);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index a4de6ceed7..26f63ea302 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -377,9 +377,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
// ran out of scratch registers.
if (temps.CanAcquire()) {
src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
- offset_imm + kRegisterSize);
+ offset_imm + kSystemPointerSize);
} else {
- add(src_op.rm(), src_op.rm(), Operand(kRegisterSize));
+ add(src_op.rm(), src_op.rm(), Operand(kSystemPointerSize));
}
ldr(dst.high_gp(), src_op);
break;
@@ -450,9 +450,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// ran out of scratch registers.
if (temps.CanAcquire()) {
dst_op = liftoff::GetMemOp(this, &temps, dst_addr, offset_reg,
- offset_imm + kRegisterSize);
+ offset_imm + kSystemPointerSize);
} else {
- add(dst_op.rm(), dst_op.rm(), Operand(kRegisterSize));
+ add(dst_op.rm(), dst_op.rm(), Operand(kSystemPointerSize));
}
str(src.high_gp(), dst_op);
break;
@@ -465,7 +465,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- int32_t offset = (caller_slot_idx + 1) * kRegisterSize;
+ int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
MemOperand src(fp, offset);
switch (type) {
case kWasmI32:
@@ -473,7 +473,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
break;
case kWasmI64:
ldr(dst.low_gp(), src);
- ldr(dst.high_gp(), MemOperand(fp, offset + kRegisterSize));
+ ldr(dst.high_gp(), MemOperand(fp, offset + kSystemPointerSize));
break;
case kWasmF32:
vldr(liftoff::GetFloatRegister(dst.fp()), src);
@@ -1358,7 +1358,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
break;
case kWasmI64:
str(args->low_gp(), MemOperand(sp, arg_bytes));
- str(args->high_gp(), MemOperand(sp, arg_bytes + kRegisterSize));
+ str(args->high_gp(), MemOperand(sp, arg_bytes + kSystemPointerSize));
break;
case kWasmF32:
vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
@@ -1490,7 +1490,7 @@ void LiftoffStackSlots::Construct() {
UNREACHABLE();
}
break;
- case LiftoffAssembler::VarState::KIntConst: {
+ case LiftoffAssembler::VarState::kIntConst: {
DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index d85b9b268b..50e8e0db94 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -1025,7 +1025,7 @@ void LiftoffStackSlots::Construct() {
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
poke_offset);
break;
- case LiftoffAssembler::VarState::KIntConst:
+ case LiftoffAssembler::VarState::kIntConst:
DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
if (slot.src_.i32_const() == 0) {
Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 067c79be32..91e2139d44 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -1780,7 +1780,7 @@ void LiftoffStackSlots::Construct() {
liftoff::push(asm_, src.reg(), src.type());
}
break;
- case LiftoffAssembler::VarState::KIntConst:
+ case LiftoffAssembler::VarState::kIntConst:
// The high word is the sign extension of the low word.
asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index e7415e2079..fd380b36a6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -103,7 +103,7 @@ class StackTransferRecipe {
case VarState::kRegister:
asm_->Spill(dst_index, src.reg(), src.type());
break;
- case VarState::KIntConst:
+ case VarState::kIntConst:
asm_->Spill(dst_index, src.constant());
break;
}
@@ -111,7 +111,7 @@ class StackTransferRecipe {
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::KIntConst:
+ case VarState::kIntConst:
DCHECK_EQ(dst, src);
break;
}
@@ -128,7 +128,7 @@ class StackTransferRecipe {
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
break;
- case VarState::KIntConst:
+ case VarState::kIntConst:
LoadConstant(dst, src.constant());
break;
}
@@ -151,7 +151,7 @@ class StackTransferRecipe {
if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
break;
}
- case VarState::KIntConst:
+ case VarState::kIntConst:
int32_t value = src.i32_const();
// The high word is the sign extension of the low word.
if (half == kHighWord) value = value >> 31;
@@ -512,7 +512,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
case VarState::kRegister:
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::KIntConst: {
+ case VarState::kIntConst: {
RegClass rc =
kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
@@ -566,7 +566,7 @@ void LiftoffAssembler::Spill(uint32_t index) {
Spill(index, slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
- case VarState::KIntConst:
+ case VarState::kIntConst:
Spill(index, slot.constant());
break;
}
@@ -853,7 +853,7 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
return os << "s";
case VarState::kRegister:
return os << slot.reg();
- case VarState::KIntConst:
+ case VarState::kIntConst:
return os << "c" << slot.i32_const();
}
UNREACHABLE();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 3ff60a42ab..7ac25bf252 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -40,7 +40,7 @@ class LiftoffAssembler : public TurboAssembler {
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, KIntConst };
+ enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
explicit VarState(ValueType type, LiftoffRegister r)
@@ -48,7 +48,7 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
explicit VarState(ValueType type, int32_t i32_const)
- : loc_(KIntConst), type_(type), i32_const_(i32_const) {
+ : loc_(kIntConst), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
@@ -60,7 +60,7 @@ class LiftoffAssembler : public TurboAssembler {
return true;
case kRegister:
return reg_ == other.reg_;
- case KIntConst:
+ case kIntConst:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
@@ -70,19 +70,19 @@ class LiftoffAssembler : public TurboAssembler {
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == KIntConst; }
+ bool is_const() const { return loc_ == kIntConst; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
int32_t i32_const() const {
- DCHECK_EQ(loc_, KIntConst);
+ DCHECK_EQ(loc_, kIntConst);
return i32_const_;
}
WasmValue constant() const {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
- DCHECK_EQ(loc_, KIntConst);
+ DCHECK_EQ(loc_, kIntConst);
return type_ == kWasmI32 ? WasmValue(i32_const_)
: WasmValue(int64_t{i32_const_});
}
@@ -105,7 +105,7 @@ class LiftoffAssembler : public TurboAssembler {
union {
LiftoffRegister reg_; // used if loc_ == kRegister
- int32_t i32_const_; // used if loc_ == KIntConst
+ int32_t i32_const_; // used if loc_ == kIntConst
};
};
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 8c5203479e..5ad9dc7315 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -32,7 +32,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
+constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -174,11 +174,12 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
- ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
-
bool ok() const { return ok_; }
- void GetCode(CodeDesc* desc) { asm_.GetCode(nullptr, desc); }
+ void GetCode(CodeDesc* desc) {
+ asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
+ Assembler::kNoHandlerTable);
+ }
OwnedVector<uint8_t> GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
@@ -199,7 +200,7 @@ class LiftoffCompiler {
TRACE("unsupported: %s\n", reason);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
reason);
- BindUnboundLabels(decoder);
+ UnuseLabels(decoder);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
@@ -225,23 +226,21 @@ class LiftoffCompiler {
return safepoint_table_builder_.GetCodeOffset();
}
- void BindUnboundLabels(FullDecoder* decoder) {
+ void UnuseLabels(FullDecoder* decoder) {
#ifdef DEBUG
- // Bind all labels now, otherwise their destructor will fire a DCHECK error
+ auto Unuse = [](Label* label) {
+ label->Unuse();
+ label->UnuseNear();
+ };
+ // Unuse all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
uint32_t control_depth = decoder ? decoder->control_depth() : 0;
for (uint32_t i = 0; i < control_depth; ++i) {
Control* c = decoder->control_at(i);
- Label* label = c->label.get();
- if (!label->is_bound()) __ bind(label);
- if (c->else_state) {
- Label* else_label = c->else_state->label.get();
- if (!else_label->is_bound()) __ bind(else_label);
- }
- }
- for (auto& ool : out_of_line_code_) {
- if (!ool.label.get()->is_bound()) __ bind(ool.label.get());
+ Unuse(c->label.get());
+ if (c->else_state) Unuse(c->else_state->label.get());
}
+ for (auto& ool : out_of_line_code_) Unuse(ool.label.get());
#endif
}
@@ -445,13 +444,14 @@ class LiftoffCompiler {
__ GetTotalFrameSlotCount());
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
+ __ MaybeEmitOutOfLineConstantPool();
// The previous calls may have also generated a bailout.
DidAssemblerBailout(decoder);
}
void OnFirstError(FullDecoder* decoder) {
ok_ = false;
- BindUnboundLabels(decoder);
+ UnuseLabels(decoder);
asm_.AbortCompilation();
}
@@ -1134,7 +1134,7 @@ class LiftoffCompiler {
case kRegister:
__ PushRegister(slot.type(), slot.reg());
break;
- case KIntConst:
+ case kIntConst:
__ cache_state()->stack_state.emplace_back(imm.type, slot.i32_const());
break;
case kStack: {
@@ -1178,7 +1178,7 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case KIntConst:
+ case kIntConst:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot;
break;
@@ -1242,6 +1242,16 @@ class LiftoffCompiler {
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
+ void GetTable(FullDecoder* decoder, const Value& index, Value* result,
+ TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, "table_get");
+ }
+
+ void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
+ TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, "table_set");
+ }
+
void Unreachable(FullDecoder* decoder) {
Label* unreachable_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
@@ -1789,11 +1799,12 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
- __ LoadConstant(LiftoffRegister(tmp_const),
- WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
- // TODO(wasm): use a emit_i32_shli() instead of a multiply.
+ // Multiply {index} by 4 to represent kInt32Size items.
+ STATIC_ASSERT(kInt32Size == 4);
+ // TODO(wasm): use a emit_i32_shli() instead of two adds.
// (currently cannot use shl on ia32/x64 because it clobbers %rcx).
- __ emit_i32_mul(index, index, tmp_const);
+ __ emit_i32_add(index, index, index);
+ __ emit_i32_add(index, index, index);
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
pinned);
@@ -1805,20 +1816,28 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnequal, sig_mismatch_label,
LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
+ // At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call");
- if (kSystemPointerSize == 8) {
- // {index} has already been multiplied by 4. Multiply by another 2.
- __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(2));
- __ emit_i32_mul(index, index, tmp_const);
+ if (kTaggedSize != kInt32Size) {
+ DCHECK_EQ(kTaggedSize, kInt32Size * 2);
+ // Multiply {index} by another 2 to represent kTaggedSize items.
+ __ emit_i32_add(index, index, index);
}
+ // At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]}
LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
- // {index} has already been multiplied by kSystemPointerSizeLog2.
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
__ LoadTaggedPointer(tmp_const, table, index,
ObjectAccess::ElementOffsetInTaggedFixedArray(0),
pinned);
+
+ if (kTaggedSize != kSystemPointerSize) {
+ DCHECK_EQ(kSystemPointerSize, kTaggedSize * 2);
+ // Multiply {index} by another 2 to represent kSystemPointerSize items.
+ __ emit_i32_add(index, index, index);
+ }
+ // At this point {index} has already been multiplied by kSystemPointerSize.
+
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
@@ -1845,6 +1864,16 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
+ void ReturnCall(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
+ const Value args[]) {
+ unsupported(decoder, "return_call");
+ }
+ void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
+ const CallIndirectImmediate<validate>& imm,
+ const Value args[]) {
+ unsupported(decoder, "return_call_indirect");
+ }
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
@@ -1886,12 +1915,11 @@ class LiftoffCompiler {
const Value& src, const Value& size) {
unsupported(decoder, "memory.init");
}
- void MemoryDrop(FullDecoder* decoder,
- const MemoryDropImmediate<validate>& imm) {
- unsupported(decoder, "memory.drop");
+ void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
+ unsupported(decoder, "data.drop");
}
void MemoryCopy(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
unsupported(decoder, "memory.copy");
}
@@ -1904,11 +1932,10 @@ class LiftoffCompiler {
Vector<Value> args) {
unsupported(decoder, "table.init");
}
- void TableDrop(FullDecoder* decoder,
- const TableDropImmediate<validate>& imm) {
- unsupported(decoder, "table.drop");
+ void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
+ unsupported(decoder, "elem.drop");
}
- void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
unsupported(decoder, "table.copy");
}
@@ -1975,7 +2002,10 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) return WasmCompilationResult{decoder.error()};
+ if (decoder.failed()) {
+ compiler->OnFirstError(&decoder);
+ return WasmCompilationResult{decoder.error()};
+ }
if (!compiler->ok()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
@@ -1998,7 +2028,7 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
result.source_positions = compiler->GetSourcePositionTable();
result.protected_instructions = compiler->GetProtectedInstructions();
result.frame_slot_count = compiler->GetTotalFrameSlotCount();
- result.safepoint_table_offset = compiler->GetSafepointTableOffset();
+ result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
DCHECK(result.succeeded());
return result;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index cb66406de4..4fecffb97d 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -1245,7 +1245,7 @@ inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-}; // namespace liftoff
+} // namespace liftoff
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
@@ -1473,7 +1473,7 @@ void LiftoffStackSlots::Construct() {
liftoff::push(asm_, src.reg(), src.type());
}
break;
- case LiftoffAssembler::VarState::KIntConst: {
+ case LiftoffAssembler::VarState::kIntConst: {
// The high word is the sign extension of the low word.
asm_->li(kScratchReg,
Operand(slot.half_ == kLowWord ? src.i32_const()
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 6f9de8189c..3a963cefd6 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -1094,7 +1094,7 @@ inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-}; // namespace liftoff
+} // namespace liftoff
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
@@ -1308,7 +1308,7 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
break;
- case LiftoffAssembler::VarState::KIntConst: {
+ case LiftoffAssembler::VarState::kIntConst: {
asm_->li(kScratchReg, Operand(src.i32_const()));
asm_->push(kScratchReg);
break;
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 35a2e855f1..60924bfc1a 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
- assm->subp(rsp, Immediate(kSystemPointerSize));
+ assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
- assm->subp(rsp, Immediate(kSystemPointerSize));
+ assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
@@ -170,7 +170,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
- movq(reg.gp(), value.to_i64(), rmode);
+ movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
break;
case kWasmF32:
@@ -187,7 +187,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
- movp(dst, liftoff::GetInstanceOperand());
+ movq(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, Operand(dst, offset));
@@ -199,16 +199,16 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
DCHECK_LE(offset, kMaxInt);
- movp(dst, liftoff::GetInstanceOperand());
+ movq(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, Operand(dst, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
- movp(liftoff::GetInstanceOperand(), instance);
+ movq(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- movp(dst, liftoff::GetInstanceOperand());
+ movq(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
@@ -698,9 +698,9 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (lhs.gp() != dst.gp()) {
- leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
+ leaq(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
} else {
- addp(dst.gp(), rhs.gp());
+ addq(dst.gp(), rhs.gp());
}
}
@@ -1412,7 +1412,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- cmpp(rsp, Operand(limit_address, 0));
+ cmpq(rsp, Operand(limit_address, 0));
j(below_equal, ool_code);
}
@@ -1435,7 +1435,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
- subp(rsp, Immediate(num_fp_regs * kStackSlotSize));
+ subq(rsp, Immediate(num_fp_regs * kStackSlotSize));
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
@@ -1456,7 +1456,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
fp_regs.clear(reg);
fp_offset += sizeof(double);
}
- if (fp_offset) addp(rsp, Immediate(fp_offset));
+ if (fp_offset) addq(rsp, Immediate(fp_offset));
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
@@ -1476,7 +1476,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- subp(rsp, Immediate(stack_bytes));
+ subq(rsp, Immediate(stack_bytes));
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
@@ -1486,7 +1486,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
DCHECK_LE(arg_bytes, stack_bytes);
// Pass a pointer to the buffer with the arguments to the C function.
- movp(arg_reg_1, rsp);
+ movq(arg_reg_1, rsp);
constexpr int kNumCCallArgs = 1;
@@ -1510,7 +1510,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
}
- addp(rsp, Immediate(stack_bytes));
+ addq(rsp, Immediate(stack_bytes));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
@@ -1538,12 +1538,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- subp(rsp, Immediate(size));
- movp(addr, rsp);
+ subq(rsp, Immediate(size));
+ movq(addr, rsp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- addp(rsp, Immediate(size));
+ addq(rsp, Immediate(size));
}
void LiftoffStackSlots::Construct() {
@@ -1567,7 +1567,7 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
break;
- case LiftoffAssembler::VarState::KIntConst:
+ case LiftoffAssembler::VarState::kIntConst:
asm_->pushq(Immediate(src.i32_const()));
break;
}
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index c6bed6c2e4..bbcbf9f25e 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_COMPILATION_ENVIRONMENT_H_
#define V8_WASM_COMPILATION_ENVIRONMENT_H_
+#include <memory>
+
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -12,9 +14,13 @@
namespace v8 {
namespace internal {
+
+class Counters;
+
namespace wasm {
class NativeModule;
+class WasmCode;
class WasmError;
enum RuntimeExceptionSupport : bool {
@@ -97,10 +103,11 @@ enum class CompilationEvent : uint8_t {
// This is the PIMPL interface to that private class.
class CompilationState {
public:
- using callback_t = std::function<void(CompilationEvent, const WasmError*)>;
+ using callback_t = std::function<void(CompilationEvent)>;
+
~CompilationState();
- void CancelAndWait();
+ void AbortCompilation();
void SetError(uint32_t func_index, const WasmError& error);
@@ -112,12 +119,15 @@ class CompilationState {
bool failed() const;
+ void OnFinishedUnit(ExecutionTier, WasmCode*);
+
private:
friend class NativeModule;
friend class WasmCompilationUnit;
CompilationState() = delete;
- static std::unique_ptr<CompilationState> New(Isolate*, NativeModule*);
+ static std::unique_ptr<CompilationState> New(NativeModule*,
+ std::shared_ptr<Counters>);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 2c555bb413..f1071dc1b0 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -11,7 +11,9 @@
#include "src/base/compiler-specific.h"
#include "src/flags.h"
#include "src/signature.h"
+#include "src/utils.h"
#include "src/v8memory.h"
+#include "src/vector.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone-containers.h"
@@ -341,7 +343,9 @@ class Decoder {
DCHECK_LT(pc, end_);
b = *pc;
TRACE_IF(trace, "%02x ", b);
- result = result | ((static_cast<IntType>(b) & 0x7f) << shift);
+ typedef typename std::make_unsigned<IntType>::type Unsigned;
+ result = result |
+ (static_cast<Unsigned>(static_cast<IntType>(b) & 0x7f) << shift);
}
if (!is_last_byte && (b & 0x80)) {
// Make sure that we only instantiate the template for valid byte indexes.
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 578a0ff5b7..f23fb81049 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -327,8 +327,9 @@ struct CallFunctionImmediate {
template <Decoder::ValidateFlag validate>
struct MemoryIndexImmediate {
- uint32_t index;
+ uint32_t index = 0;
uint32_t length = 1;
+ inline MemoryIndexImmediate() = default;
inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc + 1, "memory index");
if (!VALIDATE(index == 0)) {
@@ -339,13 +340,11 @@ struct MemoryIndexImmediate {
template <Decoder::ValidateFlag validate>
struct TableIndexImmediate {
- uint32_t index;
+ uint32_t index = 0;
unsigned length = 1;
+ inline TableIndexImmediate() = default;
inline TableIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc + 1, "table index");
- if (!VALIDATE(index == 0)) {
- decoder->errorf(pc + 1, "expected table index 0, found %u", index);
- }
}
};
@@ -462,56 +461,88 @@ struct Simd8x16ShuffleImmediate {
template <Decoder::ValidateFlag validate>
struct MemoryInitImmediate {
- MemoryIndexImmediate<validate> memory;
uint32_t data_segment_index = 0;
+ MemoryIndexImmediate<validate> memory;
unsigned length = 0;
- inline MemoryInitImmediate(Decoder* decoder, const byte* pc)
- : memory(decoder, pc + 1) {
- if (!VALIDATE(decoder->ok())) return;
+ inline MemoryInitImmediate(Decoder* decoder, const byte* pc) {
uint32_t len = 0;
- data_segment_index = decoder->read_i32v<validate>(
- pc + 2 + memory.length, &len, "data segment index");
- length = memory.length + len;
+ data_segment_index =
+ decoder->read_i32v<validate>(pc + 2, &len, "data segment index");
+ if (!VALIDATE(decoder->ok())) return;
+ memory = MemoryIndexImmediate<validate>(decoder, pc + 1 + len);
+ length = len + memory.length;
}
};
template <Decoder::ValidateFlag validate>
-struct MemoryDropImmediate {
+struct DataDropImmediate {
uint32_t index;
unsigned length;
- inline MemoryDropImmediate(Decoder* decoder, const byte* pc) {
+ inline DataDropImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_i32v<validate>(pc + 2, &length, "data segment index");
}
};
template <Decoder::ValidateFlag validate>
+struct MemoryCopyImmediate {
+ MemoryIndexImmediate<validate> memory_src;
+ MemoryIndexImmediate<validate> memory_dst;
+ unsigned length = 0;
+
+ inline MemoryCopyImmediate(Decoder* decoder, const byte* pc) {
+ memory_src = MemoryIndexImmediate<validate>(decoder, pc + 1);
+ if (!VALIDATE(decoder->ok())) return;
+ memory_dst =
+ MemoryIndexImmediate<validate>(decoder, pc + 1 + memory_src.length);
+ if (!VALIDATE(decoder->ok())) return;
+ length = memory_src.length + memory_dst.length;
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct TableInitImmediate {
- TableIndexImmediate<validate> table;
uint32_t elem_segment_index = 0;
+ TableIndexImmediate<validate> table;
unsigned length = 0;
- inline TableInitImmediate(Decoder* decoder, const byte* pc)
- : table(decoder, pc + 1) {
- if (!VALIDATE(decoder->ok())) return;
+ inline TableInitImmediate(Decoder* decoder, const byte* pc) {
uint32_t len = 0;
- elem_segment_index = decoder->read_i32v<validate>(
- pc + 2 + table.length, &len, "elem segment index");
- length = table.length + len;
+ elem_segment_index =
+ decoder->read_i32v<validate>(pc + 2, &len, "elem segment index");
+ if (!VALIDATE(decoder->ok())) return;
+ table = TableIndexImmediate<validate>(decoder, pc + 1 + len);
+ length = len + table.length;
}
};
template <Decoder::ValidateFlag validate>
-struct TableDropImmediate {
+struct ElemDropImmediate {
uint32_t index;
unsigned length;
- inline TableDropImmediate(Decoder* decoder, const byte* pc) {
+ inline ElemDropImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_i32v<validate>(pc + 2, &length, "elem segment index");
}
};
+template <Decoder::ValidateFlag validate>
+struct TableCopyImmediate {
+ TableIndexImmediate<validate> table_src;
+ TableIndexImmediate<validate> table_dst;
+ unsigned length = 0;
+
+ inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
+ table_src = TableIndexImmediate<validate>(decoder, pc + 1);
+ if (!VALIDATE(decoder->ok())) return;
+ table_dst =
+ TableIndexImmediate<validate>(decoder, pc + 1 + table_src.length);
+ if (!VALIDATE(decoder->ok())) return;
+ length = table_src.length + table_dst.length;
+ }
+};
+
// An entry on the value stack.
struct ValueBase {
const byte* pc = nullptr;
@@ -644,6 +675,10 @@ struct ControlBase {
const LocalIndexImmediate<validate>& imm) \
F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(GetTable, const Value& index, Value* result, \
+ const TableIndexImmediate<validate>& imm) \
+ F(SetTable, const Value& index, const Value& value, \
+ const TableIndexImmediate<validate>& imm) \
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
@@ -662,6 +697,10 @@ struct ControlBase {
F(CallIndirect, const Value& index, \
const CallIndirectImmediate<validate>& imm, const Value args[], \
Value returns[]) \
+ F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[]) \
F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
const Vector<Value> inputs, Value* result) \
@@ -679,14 +718,14 @@ struct ControlBase {
const MemoryAccessImmediate<validate>& imm, Value* result) \
F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
const Value& src, const Value& size) \
- F(MemoryDrop, const MemoryDropImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ F(DataDrop, const DataDropImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
const Value& src, const Value& size) \
F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
const Value& value, const Value& size) \
F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
- F(TableDrop, const TableDropImmediate<validate>& imm) \
- F(TableCopy, const TableIndexImmediate<validate>& imm, Vector<Value> args)
+ F(ElemDrop, const ElemDropImmediate<validate>& imm) \
+ F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
@@ -762,6 +801,15 @@ class WasmDecoder : public Decoder {
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
+ case kLocalAnyFunc:
+ if (enabled.anyref) {
+ type = kWasmAnyFunc;
+ break;
+ }
+ decoder->error(decoder->pc() - 1,
+ "local type 'anyfunc' is not enabled with "
+ "--experimental-wasm-anyref");
+ return false;
case kLocalExceptRef:
if (enabled.eh) {
type = kWasmExceptRef;
@@ -820,6 +868,8 @@ class WasmDecoder : public Decoder {
case kExprMemoryGrow:
case kExprCallFunction:
case kExprCallIndirect:
+ case kExprReturnCall:
+ case kExprReturnCallIndirect:
// Add instance cache nodes to the assigned set.
// TODO(titzer): make this more clear.
assigned->Add(locals_count - 1);
@@ -874,6 +924,16 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool CanReturnCall(FunctionSig* target_sig) {
+ if (target_sig == nullptr) return false;
+ size_t num_returns = sig_->return_count();
+ if (num_returns != target_sig->return_count()) return false;
+ for (size_t i = 0; i < num_returns; ++i) {
+ if (sig_->GetReturn(i) != target_sig->GetReturn(i)) return false;
+ }
+ return true;
+ }
+
inline bool Complete(const byte* pc, CallFunctionImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
imm.index < module_->functions.size())) {
@@ -1024,26 +1084,27 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(MemoryIndexImmediate<validate>& imm) {
+ inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr && module_->has_memory)) {
- errorf(pc_ + 1, "memory instruction with no memory");
+ errorf(pc + 1, "memory instruction with no memory");
return false;
}
return true;
}
inline bool Validate(MemoryInitImmediate<validate>& imm) {
- if (!Validate(imm.memory)) return false;
if (!VALIDATE(module_ != nullptr &&
imm.data_segment_index <
module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
+ if (!Validate(pc_ + imm.length - imm.memory.length - 1, imm.memory))
+ return false;
return true;
}
- inline bool Validate(MemoryDropImmediate<validate>& imm) {
+ inline bool Validate(DataDropImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
imm.index < module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.index);
@@ -1052,26 +1113,33 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Validate(MemoryCopyImmediate<validate>& imm) {
+ if (!Validate(pc_ + 1, imm.memory_src)) return false;
+ if (!Validate(pc_ + 2, imm.memory_dst)) return false;
+ return true;
+ }
+
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr && imm.index < module_->tables.size())) {
- errorf(pc_ + 1, "invalid table index: %u", imm.index);
+ errorf(pc, "invalid table index: %u", imm.index);
return false;
}
return true;
}
inline bool Validate(TableInitImmediate<validate>& imm) {
- if (!Validate(pc_ + 1, imm.table)) return false;
if (!VALIDATE(module_ != nullptr &&
imm.elem_segment_index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u",
imm.elem_segment_index);
return false;
}
+ if (!Validate(pc_ + imm.length - imm.table.length - 1, imm.table))
+ return false;
return true;
}
- inline bool Validate(TableDropImmediate<validate>& imm) {
+ inline bool Validate(ElemDropImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
imm.index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u", imm.index);
@@ -1080,6 +1148,12 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Validate(TableCopyImmediate<validate>& imm) {
+ if (!Validate(pc_ + 1, imm.table_src)) return false;
+ if (!Validate(pc_ + 2, imm.table_dst)) return false;
+ return true;
+ }
+
static uint32_t OpcodeLength(Decoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
@@ -1096,17 +1170,23 @@ class WasmDecoder : public Decoder {
BranchDepthImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprSetGlobal:
- case kExprGetGlobal: {
+ case kExprGetGlobal:
+ case kExprSetGlobal: {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
-
- case kExprCallFunction: {
+ case kExprGetTable:
+ case kExprSetTable: {
+ TableIndexImmediate<validate> imm(decoder, pc);
+ return 1 + imm.length;
+ }
+ case kExprCallFunction:
+ case kExprReturnCall: {
CallFunctionImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprCallIndirect: {
+ case kExprCallIndirect:
+ case kExprReturnCallIndirect: {
CallIndirectImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1182,11 +1262,14 @@ class WasmDecoder : public Decoder {
MemoryInitImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
- case kExprMemoryDrop: {
- MemoryDropImmediate<validate> imm(decoder, pc);
+ case kExprDataDrop: {
+ DataDropImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
+ case kExprMemoryCopy: {
+ MemoryCopyImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
- case kExprMemoryCopy:
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(decoder, pc + 1);
return 2 + imm.length;
@@ -1195,12 +1278,12 @@ class WasmDecoder : public Decoder {
TableInitImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
- case kExprTableDrop: {
- TableDropImmediate<validate> imm(decoder, pc);
+ case kExprElemDrop: {
+ ElemDropImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
case kExprTableCopy: {
- TableIndexImmediate<validate> imm(decoder, pc + 1);
+ TableCopyImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
default:
@@ -1272,9 +1355,11 @@ class WasmDecoder : public Decoder {
switch (opcode) {
case kExprSelect:
return {3, 1};
+ case kExprSetTable:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ case kExprGetTable:
case kExprTeeLocal:
case kExprMemoryGrow:
return {1, 1};
@@ -1286,7 +1371,6 @@ class WasmDecoder : public Decoder {
case kExprIf:
case kExprRethrow:
return {1, 0};
- case kExprCatch:
case kExprGetLocal:
case kExprGetGlobal:
case kExprI32Const:
@@ -1320,9 +1404,12 @@ class WasmDecoder : public Decoder {
case kExprEnd:
case kExprElse:
case kExprTry:
+ case kExprCatch:
case kExprBrOnExn:
case kExprNop:
case kExprReturn:
+ case kExprReturnCall:
+ case kExprReturnCallIndirect:
case kExprUnreachable:
return {0, 0};
case kNumericPrefix:
@@ -1893,7 +1980,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRefNull: {
CHECK_PROTOTYPE_OPCODE(anyref);
- auto* value = Push(kWasmAnyRef);
+ auto* value = Push(kWasmNullRef);
CALL_INTERFACE_IF_REACHABLE(RefNull, value);
len = 1;
break;
@@ -1949,6 +2036,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
break;
}
+ case kExprGetTable: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ DCHECK_NOT_NULL(this->module_);
+ auto index = Pop(0, kWasmI32);
+ auto* result = Push(this->module_->tables[imm.index].type);
+ CALL_INTERFACE_IF_REACHABLE(GetTable, index, result, imm);
+ break;
+ }
+ case kExprSetTable: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto value = Pop(0, this->module_->tables[imm.index].type);
+ auto index = Pop(0, kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(SetTable, index, value, imm);
+ break;
+ }
+
case kExprI32LoadMem8S:
len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
break;
@@ -2061,6 +2170,39 @@ class WasmFullDecoder : public WasmDecoder<validate> {
returns);
break;
}
+ case kExprReturnCall: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
+ break;
+ }
+
+ PopArgs(imm.sig);
+
+ CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args_.data());
+ EndControl();
+ break;
+ }
+ case kExprReturnCallIndirect: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+ CallIndirectImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
+ break;
+ }
+ auto index = Pop(0, kWasmI32);
+ PopArgs(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
+ args_.data());
+ EndControl();
+ break;
+ }
case kNumericPrefix: {
++len;
byte numeric_index =
@@ -2453,15 +2595,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
break;
}
- case kExprMemoryDrop: {
- MemoryDropImmediate<validate> imm(this, this->pc_);
+ case kExprDataDrop: {
+ DataDropImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- CALL_INTERFACE_IF_REACHABLE(MemoryDrop, imm);
+ CALL_INTERFACE_IF_REACHABLE(DataDrop, imm);
break;
}
case kExprMemoryCopy: {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ MemoryCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
auto size = Pop(2, sig->GetParam(2));
@@ -2472,7 +2614,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(imm)) break;
+ if (!this->Validate(this->pc_ + 1, imm)) break;
len += imm.length;
auto size = Pop(2, sig->GetParam(2));
auto value = Pop(1, sig->GetParam(1));
@@ -2488,16 +2630,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args_));
break;
}
- case kExprTableDrop: {
- TableDropImmediate<validate> imm(this, this->pc_);
+ case kExprElemDrop: {
+ ElemDropImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- CALL_INTERFACE_IF_REACHABLE(TableDrop, imm);
+ CALL_INTERFACE_IF_REACHABLE(ElemDrop, imm);
break;
}
case kExprTableCopy: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) break;
+ TableCopyImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(imm)) break;
len += imm.length;
PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args_));
@@ -2554,9 +2696,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return stack_.data() + old_size;
}
+ V8_INLINE bool IsSubType(ValueType expected, ValueType actual) {
+ return (expected == actual) ||
+ (expected == kWasmAnyRef && actual == kWasmNullRef) ||
+ (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
+ (expected == kWasmAnyFunc && actual == kWasmNullRef);
+ }
+
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(val.type == expected || val.type == kWasmVar ||
+ if (!VALIDATE(IsSubType(expected, val.type) || val.type == kWasmVar ||
expected == kWasmVar)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
@@ -2603,7 +2752,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (val.type == old.type) continue;
+ if (IsSubType(old.type, val.type)) continue;
// If {val.type} is polymorphic, which results from unreachable, make
// it more specific by using the merge value's expected type.
// If it is not polymorphic, this is a type error.
@@ -2674,7 +2823,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (val.type == expected_type) continue;
+ if (IsSubType(expected_type, val.type)) continue;
// If {val.type} is polymorphic, which results from unreachable,
// make it more specific by using the return's expected type.
// If it is not polymorphic, this is a type error.
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index c166683bee..deb4caee15 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -124,7 +124,7 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, int index,
ExecutionTier tier)
- : wasm_engine_(wasm_engine), func_index_(index), tier_(tier) {
+ : wasm_engine_(wasm_engine), func_index_(index), requested_tier_(tier) {
if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
(FLAG_wasm_tier_mask_for_testing & (1 << index))) {
tier = ExecutionTier::kOptimized;
@@ -154,11 +154,11 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
if (FLAG_trace_wasm_compiler) {
PrintF("Compiling wasm function %d with %s\n\n", func_index_,
- GetExecutionTierAsString(tier_));
+ GetExecutionTierAsString(executed_tier_));
}
WasmCompilationResult result;
- switch (tier_) {
+ switch (executed_tier_) {
case ExecutionTier::kBaseline:
result =
liftoff_unit_->ExecuteCompilation(env, func_body, counters, detected);
@@ -193,20 +193,16 @@ WasmCode* WasmCompilationUnit::Publish(WasmCompilationResult result,
return nullptr;
}
- // The {tier} argument specifies the requested tier, which can differ from the
- // actually executed tier stored in {unit->tier()}.
DCHECK(result.succeeded());
- WasmCode::Tier code_tier = tier_ == ExecutionTier::kBaseline
+ WasmCode::Tier code_tier = executed_tier_ == ExecutionTier::kBaseline
? WasmCode::kLiftoff
: WasmCode::kTurbofan;
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
+
WasmCode* code = native_module->AddCode(
func_index_, result.code_desc, result.frame_slot_count,
- result.safepoint_table_offset, result.handler_table_offset,
- std::move(result.protected_instructions),
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), WasmCode::kFunction, code_tier);
- // TODO(clemensh): Merge this into {AddCode}?
- native_module->PublishCode(code);
return code;
}
@@ -214,7 +210,7 @@ void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
// This method is being called in the constructor, where neither
// {liftoff_unit_} nor {turbofan_unit_} are set, or to switch tier from
// kLiftoff to kTurbofan, in which case {liftoff_unit_} is already set.
- tier_ = new_tier;
+ executed_tier_ = new_tier;
switch (new_tier) {
case ExecutionTier::kBaseline:
DCHECK(!turbofan_unit_);
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 8f235a5d1c..c7d1d5e21d 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
+#include "src/code-desc.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-body-decoder.h"
@@ -60,8 +61,7 @@ struct WasmCompilationResult {
CodeDesc code_desc;
std::unique_ptr<uint8_t[]> instr_buffer;
uint32_t frame_slot_count = 0;
- size_t safepoint_table_offset = 0;
- size_t handler_table_offset = 0;
+ uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
@@ -87,7 +87,8 @@ class WasmCompilationUnit final {
WasmCode* Publish(WasmCompilationResult, NativeModule*);
- ExecutionTier tier() const { return tier_; }
+ ExecutionTier requested_tier() const { return requested_tier_; }
+ ExecutionTier executed_tier() const { return executed_tier_; }
static void CompileWasmFunction(Isolate*, NativeModule*,
WasmFeatures* detected, const WasmFunction*,
@@ -99,8 +100,8 @@ class WasmCompilationUnit final {
WasmEngine* const wasm_engine_;
const int func_index_;
- ExecutionTier tier_;
- WasmCode* result_ = nullptr;
+ ExecutionTier requested_tier_;
+ ExecutionTier executed_tier_;
// LiftoffCompilationUnit, set if {tier_ == kLiftoff}.
std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index ac297662c8..b64dd0351f 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -287,6 +287,16 @@ class WasmGraphBuildingInterface {
BUILD(SetGlobal, imm.index, value.node);
}
+ void GetTable(FullDecoder* decoder, const Value& index, Value* result,
+ const TableIndexImmediate<validate>& imm) {
+ result->node = BUILD(GetTable, imm.index, index.node, decoder->position());
+ }
+
+ void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
+ const TableIndexImmediate<validate>& imm) {
+ BUILD(SetTable, imm.index, index.node, value.node, decoder->position());
+ }
+
void Unreachable(FullDecoder* decoder) {
BUILD(Unreachable, decoder->position());
}
@@ -395,12 +405,24 @@ class WasmGraphBuildingInterface {
DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
}
+ void ReturnCall(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
+ const Value args[]) {
+ UNIMPLEMENTED();
+ }
+
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
}
+ void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
+ const CallIndirectImmediate<validate>& imm,
+ const Value args[]) {
+ UNIMPLEMENTED();
+ }
+
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
TFNode** inputs = GetNodes(args);
@@ -510,12 +532,11 @@ class WasmGraphBuildingInterface {
BUILD(MemoryInit, imm.data_segment_index, dst.node, src.node, size.node,
decoder->position());
}
- void MemoryDrop(FullDecoder* decoder,
- const MemoryDropImmediate<validate>& imm) {
- BUILD(MemoryDrop, imm.index, decoder->position());
+ void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
+ BUILD(DataDrop, imm.index, decoder->position());
}
void MemoryCopy(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
BUILD(MemoryCopy, dst.node, src.node, size.node, decoder->position());
}
@@ -529,14 +550,13 @@ class WasmGraphBuildingInterface {
BUILD(TableInit, imm.table.index, imm.elem_segment_index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
- void TableDrop(FullDecoder* decoder,
- const TableDropImmediate<validate>& imm) {
- BUILD(TableDrop, imm.index, decoder->position());
+ void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
+ BUILD(ElemDrop, imm.index, decoder->position());
}
- void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableCopy, imm.index, args[0].node, args[1].node, args[2].node,
- decoder->position());
+ BUILD(TableCopy, imm.table_src.index, imm.table_dst.index, args[0].node,
+ args[1].node, args[2].node, decoder->position());
}
private:
@@ -644,6 +664,7 @@ class WasmGraphBuildingInterface {
case kWasmS128:
return builder_->S128Zero();
case kWasmAnyRef:
+ case kWasmAnyFunc:
case kWasmExceptRef:
return builder_->RefNull();
default:
@@ -793,10 +814,12 @@ class WasmGraphBuildingInterface {
result->effect = from->effect;
result->state = SsaEnv::kReached;
- result->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
- memcpy(result->locals, from->locals, size);
+ if (size > 0) {
+ result->locals = reinterpret_cast<TFNode**>(decoder->zone()->New(size));
+ memcpy(result->locals, from->locals, size);
+ } else {
+ result->locals = nullptr;
+ }
result->instance_cache = from->instance_cache;
return result;
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 462e3a4b5a..ec230ff742 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -40,6 +40,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
PatchConstPool(); // force patching entries for partial const pool
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
movq(kScratchRegister, static_cast<uint64_t>(target));
jmp(kScratchRegister);
@@ -57,6 +61,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
@@ -81,6 +89,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target);
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+ CheckConstPool(true, false); // force emit of const pool
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
// Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
// mode used below, we need this to allow concurrent patching of this slot.
@@ -103,6 +116,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+ CheckConstPool(true, false); // force emit of const pool
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
// TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
// patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
@@ -118,7 +136,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
-#elif V8_TARGET_ARCH_S390
+#elif V8_TARGET_ARCH_S390X
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to r7. 6 bytes
@@ -128,6 +146,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
b(r1); // 2 bytes
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r1, Operand(target));
b(r1);
@@ -150,6 +172,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Jump(lazy_compile_target, RelocInfo::NONE);
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
Jump(target, RelocInfo::NONE);
}
@@ -162,7 +188,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to register. max 5 instrs
@@ -173,6 +199,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
bctr();
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ JumpToInstructionStream(builtin_target);
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) {
mov(r0, Operand(target));
mtctr(r0);
@@ -193,6 +223,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
UNIMPLEMENTED();
}
+void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
+ UNIMPLEMENTED();
+}
+
void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
void JumpTableAssembler::NopBytes(int bytes) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 68fe596660..548639a1ba 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -27,7 +27,7 @@ namespace wasm {
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
-class JumpTableAssembler : public TurboAssembler {
+class JumpTableAssembler : public MacroAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
@@ -55,6 +55,16 @@ class JumpTableAssembler : public TurboAssembler {
kJumpTableLineSize;
}
+ // Translate a stub slot index to an offset into the continuous jump table.
+ static uint32_t StubSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kJumpTableStubSlotSize;
+ }
+
+ // Determine the size of a jump table containing only runtime stub slots.
+ static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
+ return slot_count * kJumpTableStubSlotSize;
+ }
+
static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
uint32_t func_index,
Address lazy_compile_target,
@@ -64,7 +74,19 @@ class JumpTableAssembler : public TurboAssembler {
jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
- Assembler::FlushICache(slot, kJumpTableSlotSize);
+ FlushInstructionCache(slot, kJumpTableSlotSize);
+ }
+ }
+
+ static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
+ Address builtin_target,
+ WasmCode::FlushICache flush_i_cache) {
+ Address slot = base + StubSlotIndexToOffset(slot_index);
+ JumpTableAssembler jtasm(slot);
+ jtasm.EmitRuntimeStubSlot(builtin_target);
+ jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
+ if (flush_i_cache) {
+ FlushInstructionCache(slot, kJumpTableStubSlotSize);
}
}
@@ -76,14 +98,14 @@ class JumpTableAssembler : public TurboAssembler {
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
- Assembler::FlushICache(slot, kJumpTableSlotSize);
+ FlushInstructionCache(slot, kJumpTableSlotSize);
}
}
private:
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
- : TurboAssembler(nullptr, JumpTableAssemblerOptions(),
+ : MacroAssembler(nullptr, JumpTableAssemblerOptions(),
CodeObjectRequired::kNo,
ExternalAssemblerBuffer(
reinterpret_cast<uint8_t*>(slot_addr), size)) {}
@@ -94,36 +116,39 @@ class JumpTableAssembler : public TurboAssembler {
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 18;
+ static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 5 * kInstrSize;
static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
- static constexpr int kJumpTableLineSize = 20;
+ static constexpr int kJumpTableLineSize = 128;
static constexpr int kJumpTableSlotSize = 20;
-#elif V8_TARGET_ARCH_S390
- static constexpr int kJumpTableLineSize = 14;
- static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
- static constexpr int kJumpTableLineSize = 48;
+ static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 48;
-#elif V8_TARGET_ARCH_PPC
- static constexpr int kJumpTableLineSize = 24;
- static constexpr int kJumpTableSlotSize = 24;
+ static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
+ static constexpr int kJumpTableStubSlotSize = 1;
#endif
static constexpr int kJumpTableSlotsPerLine =
@@ -142,6 +167,8 @@ class JumpTableAssembler : public TurboAssembler {
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
+ void EmitRuntimeStubSlot(Address builtin_target);
+
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index a0c7c32b00..6b0d824768 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -16,7 +16,9 @@ void LocalDeclEncoder::Prepend(Zone* zone, const byte** start,
size_t size = (*end - *start);
byte* buffer = reinterpret_cast<byte*>(zone->New(Size() + size));
size_t pos = Emit(buffer);
- memcpy(buffer + pos, *start, size);
+ if (size > 0) {
+ memcpy(buffer + pos, *start, size);
+ }
pos += size;
*start = buffer;
*end = buffer + pos;
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 885a534198..3f8e8b3db5 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -6,6 +6,7 @@
#include "src/utils.h"
#include "src/v8memory.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index bdceb0b73b..09e4822408 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -12,6 +12,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
+#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/identity-map.h"
#include "src/property-descriptor.h"
#include "src/task-utils.h"
@@ -53,6 +54,61 @@ namespace {
enum class CompileMode : uint8_t { kRegular, kTiering };
+// Background compile jobs hold a shared pointer to this token. The token is
+// used to notify them that they should stop. As soon as they see this (after
+// finishing their current compilation unit), they will stop.
+// This allows to already remove the NativeModule without having to synchronize
+// on background compile jobs.
+class BackgroundCompileToken {
+ public:
+ explicit BackgroundCompileToken(NativeModule* native_module)
+ : native_module_(native_module) {}
+
+ void Cancel() {
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(&mutex_);
+ native_module_ = nullptr;
+ }
+
+ private:
+ friend class BackgroundCompileScope;
+ base::SharedMutex mutex_;
+ NativeModule* native_module_;
+
+ NativeModule* StartScope() {
+ mutex_.LockShared();
+ return native_module_;
+ }
+
+ void ExitScope() { mutex_.UnlockShared(); }
+};
+
+class CompilationStateImpl;
+
+// Keep these scopes short, as they hold the mutex of the token, which
+// sequentializes all these scopes. The mutex is also acquired from foreground
+// tasks, which should not be blocked for a long time.
+class BackgroundCompileScope {
+ public:
+ explicit BackgroundCompileScope(
+ const std::shared_ptr<BackgroundCompileToken>& token)
+ : token_(token.get()), native_module_(token->StartScope()) {}
+
+ ~BackgroundCompileScope() { token_->ExitScope(); }
+
+ bool cancelled() const { return native_module_ == nullptr; }
+
+ NativeModule* native_module() {
+ DCHECK(!cancelled());
+ return native_module_;
+ }
+
+ inline CompilationStateImpl* compilation_state();
+
+ private:
+ BackgroundCompileToken* const token_;
+ NativeModule* const native_module_;
+};
+
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
@@ -60,17 +116,17 @@ enum class CompileMode : uint8_t { kRegular, kTiering };
// It's public interface {CompilationState} lives in compilation-environment.h.
class CompilationStateImpl {
public:
- CompilationStateImpl(internal::Isolate*, NativeModule*);
+ CompilationStateImpl(NativeModule*, std::shared_ptr<Counters> async_counters);
~CompilationStateImpl();
// Cancel all background compilation and wait for all tasks to finish. Call
// this before destructing this object.
- void CancelAndWait();
+ void AbortCompilation();
// Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background
// compilation.
- void SetNumberOfFunctionsToCompile(size_t num_functions);
+ void SetNumberOfFunctionsToCompile(int num_functions);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run.
@@ -81,9 +137,6 @@ class CompilationStateImpl {
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units);
std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit();
- std::unique_ptr<WasmCompilationUnit> GetNextExecutedUnit();
-
- bool HasCompilationUnitToFinish();
void OnFinishedUnit(ExecutionTier, WasmCode*);
@@ -91,18 +144,10 @@ class CompilationStateImpl {
void OnBackgroundTaskStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
void RestartBackgroundCompileTask();
- void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max());
- // Only one foreground thread (finisher) is allowed to run at a time.
- // {SetFinisherIsRunning} returns whether the flag changed its state.
- bool SetFinisherIsRunning(bool value);
- void ScheduleFinisherTask();
-
- void Abort();
+ void RestartBackgroundTasks();
void SetError(uint32_t func_index, const WasmError& error);
- Isolate* isolate() const { return isolate_; }
-
bool failed() const {
return compile_error_.load(std::memory_order_relaxed) != nullptr;
}
@@ -158,70 +203,12 @@ class CompilationStateImpl {
: func_index(func_index), error(std::move(error)) {}
};
- class LogCodesTask : public CancelableTask {
- public:
- LogCodesTask(CancelableTaskManager* manager,
- CompilationStateImpl* compilation_state, Isolate* isolate)
- : CancelableTask(manager),
- compilation_state_(compilation_state),
- isolate_(isolate) {
- // This task should only be created if we should actually log code.
- DCHECK(WasmCode::ShouldBeLogged(isolate));
- }
-
- // Hold the compilation state {mutex_} when calling this method.
- void AddCode(WasmCode* code) { code_to_log_.push_back(code); }
-
- void RunInternal() override {
- // Remove this task from the {CompilationStateImpl}. The next compilation
- // that finishes will allocate and schedule a new task.
- {
- base::MutexGuard guard(&compilation_state_->mutex_);
- DCHECK_EQ(this, compilation_state_->log_codes_task_);
- compilation_state_->log_codes_task_ = nullptr;
- }
- // If by now we shouldn't log code any more, don't log it.
- if (!WasmCode::ShouldBeLogged(isolate_)) return;
- for (WasmCode* code : code_to_log_) {
- code->LogCode(isolate_);
- }
- }
+ void NotifyOnEvent(CompilationEvent event);
- private:
- CompilationStateImpl* const compilation_state_;
- Isolate* const isolate_;
- std::vector<WasmCode*> code_to_log_;
- };
-
- class FreeCallbacksTask : public CancelableTask {
- public:
- explicit FreeCallbacksTask(CompilationStateImpl* comp_state)
- : CancelableTask(&comp_state->foreground_task_manager_),
- compilation_state_(comp_state) {}
-
- void RunInternal() override { compilation_state_->callbacks_.clear(); }
-
- private:
- CompilationStateImpl* const compilation_state_;
- };
-
- void NotifyOnEvent(CompilationEvent event, const WasmError* error);
-
- std::vector<std::unique_ptr<WasmCompilationUnit>>& finish_units() {
- return baseline_compilation_finished() ? tiering_finish_units_
- : baseline_finish_units_;
- }
-
- // TODO(mstarzinger): Get rid of the Isolate field to make sure the
- // {CompilationStateImpl} can be shared across multiple Isolates.
- Isolate* const isolate_;
NativeModule* const native_module_;
+ const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
const CompileMode compile_mode_;
- // Store the value of {WasmCode::ShouldBeLogged()} at creation time of the
- // compilation state.
- // TODO(wasm): We might lose log events if logging is enabled while
- // compilation is running.
- bool const should_log_code_;
+ const std::shared_ptr<Counters> async_counters_;
// Compilation error, atomically updated, but at most once (nullptr -> error).
// Uses acquire-release semantics (acquire on load, release on update).
@@ -238,27 +225,19 @@ class CompilationStateImpl {
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
- bool finisher_is_running_ = false;
- size_t num_background_tasks_ = 0;
-
- std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_;
- std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_finish_units_;
+ int num_background_tasks_ = 0;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
WasmFeatures detected_features_ = kNoWasmFeatures;
- // The foreground task to log finished wasm code. Is {nullptr} if no such task
- // is currently scheduled.
- LogCodesTask* log_codes_task_ = nullptr;
-
// Abstraction over the storage of the wire bytes. Held in a shared_ptr so
// that background compilation jobs can keep the storage alive while
// compiling.
std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
- size_t outstanding_baseline_units_ = 0;
- size_t outstanding_tiering_units_ = 0;
+ int outstanding_baseline_units_ = 0;
+ int outstanding_tiering_units_ = 0;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -267,25 +246,9 @@ class CompilationStateImpl {
// the foreground thread.
std::vector<CompilationState::callback_t> callbacks_;
- // Remember whether {Abort()} was called. When set from the foreground this
- // ensures no more callbacks will be called afterwards. No guarantees when set
- // from the background. Only needs to be atomic so that it can be set from
- // foreground and background.
- std::atomic<bool> aborted_{false};
-
- CancelableTaskManager background_task_manager_;
- CancelableTaskManager foreground_task_manager_;
- std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
-
- const size_t max_background_tasks_ = 0;
+ const int max_background_tasks_ = 0;
};
-void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
- if (detected.threads) {
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
- }
-}
-
CompilationStateImpl* Impl(CompilationState* compilation_state) {
return reinterpret_cast<CompilationStateImpl*>(compilation_state);
}
@@ -293,6 +256,16 @@ const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
}
+CompilationStateImpl* BackgroundCompileScope::compilation_state() {
+ return Impl(native_module()->compilation_state());
+}
+
+void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
+ if (detected.threads) {
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
+ }
+}
+
} // namespace
//////////////////////////////////////////////////////
@@ -300,7 +273,7 @@ const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
-void CompilationState::CancelAndWait() { Impl(this)->CancelAndWait(); }
+void CompilationState::AbortCompilation() { Impl(this)->AbortCompilation(); }
void CompilationState::SetError(uint32_t func_index, const WasmError& error) {
Impl(this)->SetError(func_index, error);
@@ -322,11 +295,15 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); }
+void CompilationState::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
+ Impl(this)->OnFinishedUnit(tier, code);
+}
+
// static
std::unique_ptr<CompilationState> CompilationState::New(
- Isolate* isolate, NativeModule* native_module) {
+ NativeModule* native_module, std::shared_ptr<Counters> async_counters) {
return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
- new CompilationStateImpl(isolate, native_module)));
+ new CompilationStateImpl(native_module, std::move(async_counters))));
}
// End of PIMPL implementation of {CompilationState}.
@@ -472,9 +449,7 @@ double MonotonicallyIncreasingTimeInMs() {
}
// Run by each compilation task and by the main thread (i.e. in both
-// foreground and background threads). The no_finisher_callback is called
-// within the result_mutex_ lock when no finishing task is running, i.e. when
-// the finisher_is_running_ flag is not set.
+// foreground and background threads).
bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
NativeModule* native_module,
CompilationStateImpl* compilation_state,
@@ -486,14 +461,11 @@ bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
compilation_state->GetNextCompilationUnit();
if (unit == nullptr) return false;
- // Get the tier before starting compilation, as compilation can switch tiers
- // if baseline bails out.
- ExecutionTier tier = unit->tier();
WasmCompilationResult result = unit->ExecuteCompilation(
env, compilation_state->GetWireBytesStorage(), counters, detected);
WasmCode* code = unit->Publish(std::move(result), native_module);
- compilation_state->OnFinishedUnit(tier, code);
+ compilation_state->OnFinishedUnit(unit->requested_tier(), code);
return true;
}
@@ -511,15 +483,6 @@ void InitializeCompilationUnits(NativeModule* native_module,
builder.Commit();
}
-void FinishCompilationUnits(CompilationStateImpl* compilation_state) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FinishCompilationUnits");
- while (!compilation_state->failed()) {
- std::unique_ptr<WasmCompilationUnit> unit =
- compilation_state->GetNextExecutedUnit();
- if (unit == nullptr) break;
- }
-}
-
void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// Data structures for the parallel compilation.
@@ -532,12 +495,6 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// the background threads.
// 2) The background threads and the main thread pick one compilation unit at
// a time and execute the parallel phase of the compilation unit.
- // 3) After the parallel phase of all compilation units has started, the
- // main thread continues to finish all compilation units as long as
- // baseline-compilation units are left to be processed.
- // 4) If tier-up is enabled, the main thread restarts background tasks
- // that take care of compiling and finishing the top-tier compilation
- // units.
// Turn on the {CanonicalHandleScope} so that the background threads can
// use the node cache.
@@ -545,12 +502,9 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
- // Make sure that no foreground task is spawned for finishing
- // the compilation units. This foreground thread will be
- // responsible for finishing compilation.
- compilation_state->SetFinisherIsRunning(true);
- uint32_t num_wasm_functions =
- native_module->num_functions() - native_module->num_imported_functions();
+ DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
+ int num_wasm_functions =
+ static_cast<int>(native_module->module()->num_declared_functions);
compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
// 1) The main thread allocates a compilation unit for each wasm function
@@ -564,38 +518,19 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// a time and execute the parallel phase of the compilation unit.
WasmFeatures detected_features;
CompilationEnv env = native_module->CreateCompilationEnv();
- while (FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
- &detected_features,
- isolate->counters()) &&
+ // TODO(wasm): This might already execute TurboFan units on the main thread,
+ // while waiting for baseline compilation to finish. This can introduce
+ // additional delay.
+ // TODO(wasm): This is a busy-wait loop once all units have started executing
+ // in background threads. Replace by a semaphore / barrier.
+ while (!compilation_state->failed() &&
!compilation_state->baseline_compilation_finished()) {
- // TODO(clemensh): Refactor ownership of the AsyncCompileJob and remove
- // this.
- FinishCompilationUnits(compilation_state);
-
- if (compilation_state->failed()) break;
- }
-
- while (!compilation_state->failed()) {
- // 3) After the parallel phase of all compilation units has started, the
- // main thread continues to finish compilation units as long as
- // baseline compilation units are left to be processed. If compilation
- // already failed, all background tasks have already been canceled
- // in {FinishCompilationUnits}, and there are no units to finish.
- FinishCompilationUnits(compilation_state);
-
- if (compilation_state->baseline_compilation_finished()) break;
+ FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
+ &detected_features, isolate->counters());
}
// Publish features from the foreground and background tasks.
compilation_state->PublishDetectedFeatures(isolate, detected_features);
-
- // 4) If tiering-compilation is enabled, we need to set the finisher
- // to false, such that the background threads will spawn a foreground
- // thread to finish the top-tier compilation units.
- if (!compilation_state->failed() &&
- compilation_state->compile_mode() == CompileMode::kTiering) {
- compilation_state->SetFinisherIsRunning(false);
- }
}
void CompileSequentially(Isolate* isolate, NativeModule* native_module,
@@ -696,100 +631,96 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
}
}
-// The runnable task that finishes compilation in foreground (e.g. updating
-// the NativeModule, the code table, etc.).
-class FinishCompileTask : public CancelableTask {
+// The runnable task that performs compilations in the background.
+class BackgroundCompileTask : public CancelableTask {
public:
- explicit FinishCompileTask(CompilationStateImpl* compilation_state,
- CancelableTaskManager* task_manager)
- : CancelableTask(task_manager), compilation_state_(compilation_state) {}
+ explicit BackgroundCompileTask(CancelableTaskManager* manager,
+ std::shared_ptr<BackgroundCompileToken> token,
+ std::shared_ptr<Counters> async_counters)
+ : CancelableTask(manager),
+ token_(std::move(token)),
+ async_counters_(std::move(async_counters)) {}
void RunInternal() override {
- Isolate* isolate = compilation_state_->isolate();
- HandleScope scope(isolate);
- SaveContext saved_context(isolate);
- isolate->set_context(Context());
-
- TRACE_COMPILE("(4a) Finishing compilation units...\n");
- if (compilation_state_->failed()) {
- compilation_state_->SetFinisherIsRunning(false);
- return;
- }
+ TRACE_COMPILE("(3b) Compiling...\n");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "BackgroundCompileTask::RunInternal");
- // We execute for 1 ms and then reschedule the task, same as the GC.
- double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
- while (true) {
- compilation_state_->RestartBackgroundTasks();
+ double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
- std::unique_ptr<WasmCompilationUnit> unit =
- compilation_state_->GetNextExecutedUnit();
+ // These fields are initialized in a {BackgroundCompileScope} before
+ // starting compilation.
+ base::Optional<CompilationEnv> env;
+ std::shared_ptr<WireBytesStorage> wire_bytes;
+ std::shared_ptr<const WasmModule> module;
+ std::unique_ptr<WasmCompilationUnit> unit;
+ WasmFeatures detected_features = kNoWasmFeatures;
+ // Preparation (synchronized): Initialize the fields above and get the first
+ // compilation unit.
+ {
+ BackgroundCompileScope compile_scope(token_);
+ if (compile_scope.cancelled()) return;
+ env.emplace(compile_scope.native_module()->CreateCompilationEnv());
+ wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
+ module = compile_scope.native_module()->shared_module();
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit();
if (unit == nullptr) {
- // It might happen that a background task just scheduled a unit to be
- // finished, but did not start a finisher task since the flag was still
- // set. Check for this case, and continue if there is more work.
- compilation_state_->SetFinisherIsRunning(false);
- if (compilation_state_->HasCompilationUnitToFinish() &&
- compilation_state_->SetFinisherIsRunning(true)) {
- continue;
- }
- break;
- }
-
- if (compilation_state_->failed()) break;
-
- if (deadline < MonotonicallyIncreasingTimeInMs()) {
- // We reached the deadline. We reschedule this task and return
- // immediately. Since we rescheduled this task already, we do not set
- // the FinisherIsRunning flag to false.
- compilation_state_->ScheduleFinisherTask();
+ compile_scope.compilation_state()->OnBackgroundTaskStopped(
+ detected_features);
return;
}
}
- }
- private:
- CompilationStateImpl* compilation_state_;
-};
+ bool compilation_failed = false;
+ while (true) {
+ // (asynchronous): Execute the compilation.
-// The runnable task that performs compilations in the background.
-class BackgroundCompileTask : public CancelableTask {
- public:
- explicit BackgroundCompileTask(CancelableTaskManager* task_manager,
- NativeModule* native_module,
- Counters* counters)
- : CancelableTask(task_manager),
- native_module_(native_module),
- counters_(counters) {}
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ &env.value(), wire_bytes, async_counters_.get(), &detected_features);
- void RunInternal() override {
- TRACE_COMPILE("(3b) Compiling...\n");
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "BackgroundCompileTask::RunInternal");
- // The number of currently running background tasks is reduced in
- // {OnBackgroundTaskStopped}.
- CompilationEnv env = native_module_->CreateCompilationEnv();
- auto* compilation_state = Impl(native_module_->compilation_state());
- WasmFeatures detected_features = kNoWasmFeatures;
- double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
- while (!compilation_state->failed()) {
- if (!FetchAndExecuteCompilationUnit(&env, native_module_,
- compilation_state, &detected_features,
- counters_)) {
- break;
- }
- if (deadline < MonotonicallyIncreasingTimeInMs()) {
- compilation_state->ReportDetectedFeatures(detected_features);
- compilation_state->RestartBackgroundCompileTask();
- return;
+ // (synchronized): Publish the compilation result and get the next unit.
+ {
+ BackgroundCompileScope compile_scope(token_);
+ if (compile_scope.cancelled()) return;
+ WasmCode* code =
+ unit->Publish(std::move(result), compile_scope.native_module());
+ if (code == nullptr) {
+ // Compile error.
+ compile_scope.compilation_state()->OnBackgroundTaskStopped(
+ detected_features);
+ compilation_failed = true;
+ break;
+ }
+
+ // Successfully finished one unit.
+ compile_scope.compilation_state()->OnFinishedUnit(
+ unit->requested_tier(), code);
+ if (deadline < MonotonicallyIncreasingTimeInMs()) {
+ compile_scope.compilation_state()->ReportDetectedFeatures(
+ detected_features);
+ compile_scope.compilation_state()->RestartBackgroundCompileTask();
+ return;
+ }
+
+ // Get next unit.
+ unit = compile_scope.compilation_state()->GetNextCompilationUnit();
+ if (unit == nullptr) {
+ compile_scope.compilation_state()->OnBackgroundTaskStopped(
+ detected_features);
+ return;
+ }
}
}
- compilation_state->OnBackgroundTaskStopped(detected_features);
+ // We only get here if compilation failed. Other exits return directly.
+ DCHECK(compilation_failed);
+ USE(compilation_failed);
+ token_->Cancel();
}
private:
- NativeModule* const native_module_;
- Counters* const counters_;
+ std::shared_ptr<BackgroundCompileToken> token_;
+ std::shared_ptr<Counters> async_counters_;
};
} // namespace
@@ -818,7 +749,7 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
// Create a new {NativeModule} first.
- auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
+ auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled, code_size_estimate,
wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
native_module->SetWireBytes(std::move(wire_bytes_copy));
@@ -859,11 +790,9 @@ AsyncCompileJob::AsyncCompileJob(
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
- // The handle for the context must be deferred.
- DeferredHandleScope deferred(isolate);
- native_context_ = Handle<Context>(context->native_context(), isolate);
+ native_context_ =
+ isolate->global_handles()->Create(context->native_context());
DCHECK(native_context_->IsNativeContext());
- deferred_handles_.push_back(deferred.Detach());
}
void AsyncCompileJob::Start() {
@@ -886,7 +815,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset) override;
- bool ProcessCodeSectionHeader(size_t functions_count, uint32_t offset,
+ bool ProcessCodeSectionHeader(int functions_count, uint32_t offset,
std::shared_ptr<WireBytesStorage>) override;
bool ProcessFunctionBody(Vector<const uint8_t> bytes,
@@ -912,7 +841,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
ModuleDecoder decoder_;
AsyncCompileJob* job_;
std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
- uint32_t next_function_ = 0;
+ int num_functions_ = 0;
};
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
@@ -927,7 +856,7 @@ AsyncCompileJob::~AsyncCompileJob() {
// If the runtime objects were not created yet, then initial compilation did
// not finish yet. In this case we can abort compilation.
if (native_module_ && module_object_.is_null()) {
- Impl(native_module_->compilation_state())->Abort();
+ Impl(native_module_->compilation_state())->AbortCompilation();
}
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
@@ -935,7 +864,10 @@ AsyncCompileJob::~AsyncCompileJob() {
// https://crbug.com/888170.
if (stream_) stream_->NotifyCompilationEnded();
CancelPendingForegroundTask();
- for (auto d : deferred_handles_) delete d;
+ isolate_->global_handles()->Destroy(native_context_.location());
+ if (!module_object_.is_null()) {
+ isolate_->global_handles()->Destroy(module_object_.location());
+ }
}
void AsyncCompileJob::CreateNativeModule(
@@ -957,7 +889,7 @@ void AsyncCompileJob::CreateNativeModule(
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
- native_module_ = isolate_->wasm_engine()->code_manager()->NewNativeModule(
+ native_module_ = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, code_size_estimate,
wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
@@ -975,14 +907,10 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module);
- module_object_ = WasmModuleObject::New(isolate_, native_module_, script,
- code_size_estimate);
+ Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+ isolate_, native_module_, script, code_size_estimate);
- {
- DeferredHandleScope deferred(isolate_);
- module_object_ = handle(*module_object_, isolate_);
- deferred_handles_.push_back(deferred.Detach());
- }
+ module_object_ = isolate_->global_handles()->Create(*module_object);
}
// This function assumes that it is executed in a HandleScope, and that a
@@ -1018,11 +946,13 @@ void AsyncCompileJob::FinishCompile() {
FinishModule();
}
-void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) {
+void AsyncCompileJob::AsyncCompileFailed(const WasmError& error) {
+ ErrorThrower thrower(isolate_, "WebAssembly.compile()");
+ thrower.CompileFailed(error);
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->RemoveCompileJob(this);
- resolver_->OnCompilationFailed(error_reason);
+ resolver_->OnCompilationFailed(thrower.Reify());
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
@@ -1033,15 +963,13 @@ class AsyncCompileJob::CompilationStateCallback {
public:
explicit CompilationStateCallback(AsyncCompileJob* job) : job_(job) {}
- void operator()(CompilationEvent event, const WasmError* error) {
+ void operator()(CompilationEvent event) {
// This callback is only being called from a foreground task.
switch (event) {
case CompilationEvent::kFinishedBaselineCompilation:
DCHECK(!last_event_.has_value());
if (job_->DecrementAndCheckFinisherCount()) {
- SaveContext saved_context(job_->isolate());
- job_->isolate()->set_context(*job_->native_context_);
- job_->FinishCompile();
+ job_->DoSync<CompileFinished>();
}
break;
case CompilationEvent::kFinishedTopTierCompilation:
@@ -1049,29 +977,11 @@ class AsyncCompileJob::CompilationStateCallback {
// This callback should not react to top tier finished callbacks, since
// the job might already be gone then.
break;
- case CompilationEvent::kFailedCompilation:
+ case CompilationEvent::kFailedCompilation: {
DCHECK(!last_event_.has_value());
- DCHECK_NOT_NULL(error);
- // Tier-up compilation should not fail if baseline compilation
- // did not fail.
- DCHECK(!Impl(job_->native_module_->compilation_state())
- ->baseline_compilation_finished());
-
- {
- SaveContext saved_context(job_->isolate());
- job_->isolate()->set_context(*job_->native_context_);
- ErrorThrower thrower(job_->isolate(), "AsyncCompilation");
- thrower.CompileFailed(nullptr, *error);
- Handle<Object> error = thrower.Reify();
-
- DeferredHandleScope deferred(job_->isolate());
- error = handle(*error, job_->isolate());
- job_->deferred_handles_.push_back(deferred.Detach());
-
- job_->DoSync<CompileFailed, kUseExistingForegroundTask>(error);
- }
-
+ job_->DoSync<CompileFailed>();
break;
+ }
default:
UNREACHABLE();
}
@@ -1083,6 +993,8 @@ class AsyncCompileJob::CompilationStateCallback {
private:
AsyncCompileJob* job_;
#ifdef DEBUG
+ // This will be modified by different threads, but they externally
+ // synchronize, so no explicit synchronization (currently) needed here.
base::Optional<CompilationEvent> last_event_;
#endif
};
@@ -1096,8 +1008,7 @@ class AsyncCompileJob::CompileStep {
void Run(AsyncCompileJob* job, bool on_foreground) {
if (on_foreground) {
HandleScope scope(job->isolate_);
- SaveContext saved_context(job->isolate_);
- job->isolate_->set_context(*job->native_context_);
+ SaveAndSwitchContext saved_context(job->isolate_, *job->native_context_);
RunInForeground(job);
} else {
RunInBackground(job);
@@ -1255,10 +1166,8 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- ErrorThrower thrower(job->isolate_, "AsyncCompile");
- thrower.CompileFailed("Wasm decoding failed", error_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job->AsyncCompileFailed(thrower.Reify());
+ return job->AsyncCompileFailed(error_);
}
};
@@ -1312,20 +1221,29 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
};
//==========================================================================
-// Step 4b (sync): Compilation failed. Reject Promise.
+// Step 3a (sync): Compilation failed.
//==========================================================================
class AsyncCompileJob::CompileFailed : public CompileStep {
- public:
- explicit CompileFailed(Handle<Object> error_reason)
- : error_reason_(error_reason) {}
-
+ private:
void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(4b) Compilation Failed...\n");
- return job->AsyncCompileFailed(error_reason_);
+ TRACE_COMPILE("(3a) Compilation failed\n");
+
+ WasmError error =
+ Impl(job->native_module_->compilation_state())->GetCompileError();
+ // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
+ return job->AsyncCompileFailed(error);
}
+};
+//==========================================================================
+// Step 3b (sync): Compilation finished.
+//==========================================================================
+class AsyncCompileJob::CompileFinished : public CompileStep {
private:
- Handle<Object> error_reason_;
+ void RunInForeground(AsyncCompileJob* job) override {
+ TRACE_COMPILE("(3b) Compilation finished\n");
+ job->FinishCompile();
+ }
};
void AsyncCompileJob::CompileWrappers() {
@@ -1340,21 +1258,7 @@ void AsyncCompileJob::CompileWrappers() {
void AsyncCompileJob::FinishModule() {
TRACE_COMPILE("(6) Finish module...\n");
AsyncCompileSucceeded(module_object_);
-
- size_t num_functions = native_module_->num_functions() -
- native_module_->num_imported_functions();
- auto* compilation_state = Impl(native_module_->compilation_state());
- if (compilation_state->compile_mode() == CompileMode::kRegular ||
- num_functions == 0) {
- // If we do not tier up, the async compile job is done here and
- // can be deleted.
- isolate_->wasm_engine()->RemoveCompileJob(this);
- return;
- }
- DCHECK_EQ(CompileMode::kTiering, compilation_state->compile_mode());
- if (compilation_state->baseline_compilation_finished()) {
- isolate_->wasm_engine()->RemoveCompileJob(this);
- }
+ isolate_->wasm_engine()->RemoveCompileJob(this);
}
AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
@@ -1372,7 +1276,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationStateImpl as well.
if (job_->native_module_) {
- Impl(job_->native_module_->compilation_state())->Abort();
+ Impl(job_->native_module_->compilation_state())->AbortCompilation();
job_->DoSync<AsyncCompileJob::DecodeFail,
AsyncCompileJob::kUseExistingForegroundTask>(error);
@@ -1434,9 +1338,9 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
// Start the code section.
bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
- size_t functions_count, uint32_t offset,
+ int functions_count, uint32_t offset,
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
- TRACE_STREAMING("Start the code section with %zu functions...\n",
+ TRACE_STREAMING("Start the code section with %d functions...\n",
functions_count);
if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(functions_count),
offset)) {
@@ -1464,14 +1368,14 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// Process a function body.
bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) {
- TRACE_STREAMING("Process function body %d ...\n", next_function_);
+ TRACE_STREAMING("Process function body %d ...\n", num_functions_);
decoder_.DecodeFunctionBody(
- next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
+ num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
- uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
+ int index = num_functions_ + decoder_.module()->num_imported_functions;
compilation_unit_builder_->AddUnit(index);
- ++next_function_;
+ ++num_functions_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
return true;
@@ -1499,8 +1403,7 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
// CreateNativeModule, PrepareRuntimeObjects and FinishCompile as this is a
// callback from the embedder.
HandleScope scope(job_->isolate_);
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->native_context_);
+ SaveAndSwitchContext saved_context(job_->isolate_, *job_->native_context_);
bool needs_finish = job_->DecrementAndCheckFinisherCount();
if (job_->native_module_ == nullptr) {
@@ -1532,19 +1435,14 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
// DeserializeNativeModule and FinishCompile assume that they are executed in
// a HandleScope, and that a context is set on the isolate.
HandleScope scope(job_->isolate_);
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->native_context_);
+ SaveAndSwitchContext saved_context(job_->isolate_, *job_->native_context_);
MaybeHandle<WasmModuleObject> result =
DeserializeNativeModule(job_->isolate_, module_bytes, wire_bytes);
if (result.is_null()) return false;
- job_->module_object_ = result.ToHandleChecked();
- {
- DeferredHandleScope deferred(job_->isolate_);
- job_->module_object_ = handle(*job_->module_object_, job_->isolate_);
- job_->deferred_handles_.push_back(deferred.Detach());
- }
+ job_->module_object_ =
+ job_->isolate_->global_handles()->Create(*result.ToHandleChecked());
job_->native_module_ = job_->module_object_->shared_native_module();
auto owned_wire_bytes = OwnedVector<uint8_t>::Of(wire_bytes);
job_->wire_bytes_ = ModuleWireBytes(owned_wire_bytes.as_vector());
@@ -1553,36 +1451,32 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
-CompilationStateImpl::CompilationStateImpl(internal::Isolate* isolate,
- NativeModule* native_module)
- : isolate_(isolate),
- native_module_(native_module),
+CompilationStateImpl::CompilationStateImpl(
+ NativeModule* native_module, std::shared_ptr<Counters> async_counters)
+ : native_module_(native_module),
+ background_compile_token_(
+ std::make_shared<BackgroundCompileToken>(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
native_module->module()->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
- should_log_code_(WasmCode::ShouldBeLogged(isolate)),
+ async_counters_(std::move(async_counters)),
max_background_tasks_(std::max(
1, std::min(FLAG_wasm_num_compilation_tasks,
- V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8::Platform* platform = V8::GetCurrentPlatform();
- foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
-}
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {}
CompilationStateImpl::~CompilationStateImpl() {
- DCHECK(background_task_manager_.canceled());
- DCHECK(foreground_task_manager_.canceled());
CompilationError* error = compile_error_.load(std::memory_order_acquire);
if (error != nullptr) delete error;
}
-void CompilationStateImpl::CancelAndWait() {
- background_task_manager_.CancelAndWait();
- foreground_task_manager_.CancelAndWait();
+void CompilationStateImpl::AbortCompilation() {
+ background_compile_token_->Cancel();
+ // No more callbacks after abort.
+ callbacks_.clear();
}
-void CompilationStateImpl::SetNumberOfFunctionsToCompile(size_t num_functions) {
+void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
DCHECK(!failed());
base::MutexGuard guard(&mutex_);
outstanding_baseline_units_ = num_functions;
@@ -1604,7 +1498,8 @@ void CompilationStateImpl::AddCompilationUnits(
if (compile_mode_ == CompileMode::kTiering) {
DCHECK_EQ(baseline_units.size(), tiering_units.size());
- DCHECK_EQ(tiering_units.back()->tier(), ExecutionTier::kOptimized);
+ DCHECK_EQ(tiering_units.back()->requested_tier(),
+ ExecutionTier::kOptimized);
tiering_compilation_units_.insert(
tiering_compilation_units_.end(),
std::make_move_iterator(tiering_units.begin()),
@@ -1639,26 +1534,10 @@ CompilationStateImpl::GetNextCompilationUnit() {
return std::unique_ptr<WasmCompilationUnit>();
}
-std::unique_ptr<WasmCompilationUnit>
-CompilationStateImpl::GetNextExecutedUnit() {
- std::vector<std::unique_ptr<WasmCompilationUnit>>& units = finish_units();
- base::MutexGuard guard(&mutex_);
- if (units.empty()) return {};
- std::unique_ptr<WasmCompilationUnit> ret = std::move(units.back());
- units.pop_back();
- return ret;
-}
-
-bool CompilationStateImpl::HasCompilationUnitToFinish() {
- return !finish_units().empty();
-}
-
void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// This mutex guarantees that events happen in the right order.
base::MutexGuard guard(&mutex_);
- if (failed()) return;
-
// If we are *not* compiling in tiering mode, then all units are counted as
// baseline units.
bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
@@ -1668,9 +1547,6 @@ void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// tiering units.
DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
- // Bitset of events to deliver.
- base::EnumSet<CompilationEvent> events;
-
if (is_tiering_unit) {
DCHECK_LT(0, outstanding_tiering_units_);
--outstanding_tiering_units_;
@@ -1678,56 +1554,30 @@ void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// If baseline compilation has not finished yet, then also trigger
// {kFinishedBaselineCompilation}.
if (outstanding_baseline_units_ > 0) {
- events.Add(CompilationEvent::kFinishedBaselineCompilation);
+ NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
}
- events.Add(CompilationEvent::kFinishedTopTierCompilation);
+ NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
}
} else {
DCHECK_LT(0, outstanding_baseline_units_);
--outstanding_baseline_units_;
if (outstanding_baseline_units_ == 0) {
- events.Add(CompilationEvent::kFinishedBaselineCompilation);
+ NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation);
// If we are not tiering, then we also trigger the "top tier finished"
// event when baseline compilation is finished.
if (!is_tiering_mode) {
- events.Add(CompilationEvent::kFinishedTopTierCompilation);
+ NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation);
}
}
}
- if (!events.empty()) {
- auto notify_events = [this, events] {
- for (auto event : {CompilationEvent::kFinishedBaselineCompilation,
- CompilationEvent::kFinishedTopTierCompilation}) {
- if (!events.contains(event)) continue;
- NotifyOnEvent(event, nullptr);
- }
- };
- foreground_task_runner_->PostTask(
- MakeCancelableTask(&foreground_task_manager_, notify_events));
- }
-
- if (should_log_code_ && code != nullptr) {
- if (log_codes_task_ == nullptr) {
- auto new_task = base::make_unique<LogCodesTask>(&foreground_task_manager_,
- this, isolate_);
- log_codes_task_ = new_task.get();
- foreground_task_runner_->PostTask(std::move(new_task));
- }
- log_codes_task_->AddCode(code);
- }
+ if (code != nullptr) native_module_->engine()->LogCode(code);
}
void CompilationStateImpl::RestartBackgroundCompileTask() {
- auto task = base::make_unique<BackgroundCompileTask>(
- &background_task_manager_, native_module_, isolate_->counters());
-
- // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
- // tasks. This is used to make timing deterministic.
- if (FLAG_wasm_num_compilation_tasks == 0) {
- foreground_task_runner_->PostTask(std::move(task));
- return;
- }
+ auto task =
+ native_module_->engine()->NewBackgroundCompileTask<BackgroundCompileTask>(
+ background_compile_token_, async_counters_);
if (baseline_compilation_finished()) {
V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
@@ -1761,8 +1611,8 @@ void CompilationStateImpl::PublishDetectedFeatures(
UpdateFeatureUseCounts(isolate, detected_features_);
}
-void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
- size_t num_restart;
+void CompilationStateImpl::RestartBackgroundTasks() {
+ int num_restart;
{
base::MutexGuard guard(&mutex_);
// No need to restart tasks if compilation already failed.
@@ -1772,8 +1622,11 @@ void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
if (num_background_tasks_ == max_background_tasks_) return;
size_t num_compilation_units =
baseline_compilation_units_.size() + tiering_compilation_units_.size();
- size_t stopped_tasks = max_background_tasks_ - num_background_tasks_;
- num_restart = std::min(max, std::min(num_compilation_units, stopped_tasks));
+ num_restart = max_background_tasks_ - num_background_tasks_;
+ DCHECK_LE(0, num_restart);
+ if (num_compilation_units < static_cast<size_t>(num_restart)) {
+ num_restart = static_cast<int>(num_compilation_units);
+ }
num_background_tasks_ += num_restart;
}
@@ -1782,31 +1635,6 @@ void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
}
}
-bool CompilationStateImpl::SetFinisherIsRunning(bool value) {
- base::MutexGuard guard(&mutex_);
- if (finisher_is_running_ == value) return false;
- finisher_is_running_ = value;
- return true;
-}
-
-void CompilationStateImpl::ScheduleFinisherTask() {
- foreground_task_runner_->PostTask(
- base::make_unique<FinishCompileTask>(this, &foreground_task_manager_));
-}
-
-void CompilationStateImpl::Abort() {
- SetError(0, WasmError{0, "Compilation aborted"});
- background_task_manager_.CancelAndWait();
- // No more callbacks after abort. Don't free the std::function objects here,
- // since this might clear references in the embedder, which is only allowed on
- // the main thread.
- aborted_.store(true);
- if (!callbacks_.empty()) {
- foreground_task_runner_->PostTask(
- base::make_unique<FreeCallbacksTask>(this));
- }
-}
-
void CompilationStateImpl::SetError(uint32_t func_index,
const WasmError& error) {
DCHECK(error.has_error());
@@ -1822,18 +1650,11 @@ void CompilationStateImpl::SetError(uint32_t func_index,
compile_error.release();
// Schedule a foreground task to call the callback and notify users about the
// compile error.
- foreground_task_runner_->PostTask(
- MakeCancelableTask(&foreground_task_manager_, [this] {
- WasmError error = GetCompileError();
- NotifyOnEvent(CompilationEvent::kFailedCompilation, &error);
- }));
-}
-
-void CompilationStateImpl::NotifyOnEvent(CompilationEvent event,
- const WasmError* error) {
- if (aborted_.load()) return;
- HandleScope scope(isolate_);
- for (auto& callback : callbacks_) callback(event, error);
+ NotifyOnEvent(CompilationEvent::kFailedCompilation);
+}
+
+void CompilationStateImpl::NotifyOnEvent(CompilationEvent event) {
+ for (auto& callback : callbacks_) callback(event);
// If no more events are expected after this one, clear the callbacks to free
// memory. We can safely do this here, as this method is only called from
// foreground tasks.
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 7f860ac036..04f0bd2042 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -92,7 +92,8 @@ class AsyncCompileJob {
class DecodeModule; // Step 1 (async)
class DecodeFail; // Step 1b (sync)
class PrepareAndStartCompile; // Step 2 (sync)
- class CompileFailed; // Step 4b (sync)
+ class CompileFailed; // Step 3a (sync)
+ class CompileFinished; // Step 3b (sync)
friend class AsyncStreamingProcessor;
@@ -100,6 +101,7 @@ class AsyncCompileJob {
// function should finish the asynchronous compilation, see the comment on
// {outstanding_finishers_}.
V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
+ DCHECK_LT(0, outstanding_finishers_.load());
return outstanding_finishers_.fetch_sub(1) == 1;
}
@@ -108,7 +110,7 @@ class AsyncCompileJob {
void FinishCompile();
- void AsyncCompileFailed(Handle<Object> error_reason);
+ void AsyncCompileFailed(const WasmError&);
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
@@ -159,7 +161,6 @@ class AsyncCompileJob {
Handle<Context> native_context_;
const std::shared_ptr<CompilationResultResolver> resolver_;
- std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
std::shared_ptr<NativeModule> native_module_;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 25a6633178..c60eeba44f 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -367,7 +367,7 @@ class ModuleDecoderImpl : public Decoder {
// Check if the section is out-of-order.
if (section_code < next_ordered_section_ &&
section_code < kFirstUnorderedSection) {
- errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ errorf(pc(), "unexpected section <%s>", SectionName(section_code));
return;
}
@@ -446,18 +446,18 @@ class ModuleDecoderImpl : public Decoder {
if (enabled_features_.bulk_memory) {
DecodeDataCountSection();
} else {
- errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ errorf(pc(), "unexpected section <%s>", SectionName(section_code));
}
break;
case kExceptionSectionCode:
if (enabled_features_.eh) {
DecodeExceptionSection();
} else {
- errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ errorf(pc(), "unexpected section <%s>", SectionName(section_code));
}
break;
default:
- errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ errorf(pc(), "unexpected section <%s>", SectionName(section_code));
return;
}
@@ -524,6 +524,7 @@ class ModuleDecoderImpl : public Decoder {
// ===== Imported table ==========================================
if (!AddTable(module_.get())) break;
import->index = static_cast<uint32_t>(module_->tables.size());
+ module_->num_imported_tables++;
module_->tables.emplace_back();
WasmTable* table = &module_->tables.back();
table->imported = true;
@@ -680,7 +681,8 @@ class ModuleDecoderImpl : public Decoder {
switch (exp->kind) {
case kExternalFunction: {
WasmFunction* func = nullptr;
- exp->index = consume_func_index(module_.get(), &func);
+ exp->index =
+ consume_func_index(module_.get(), &func, "export function index");
module_->num_exported_functions++;
if (func) func->exported = true;
break;
@@ -757,7 +759,8 @@ class ModuleDecoderImpl : public Decoder {
void DecodeStartSection() {
WasmFunction* func;
const byte* pos = pc_;
- module_->start_function_index = consume_func_index(module_.get(), &func);
+ module_->start_function_index =
+ consume_func_index(module_.get(), &func, "start function index");
if (func &&
(func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
error(pos, "invalid start function: non-zero parameter or return count");
@@ -791,10 +794,16 @@ class ModuleDecoderImpl : public Decoder {
table_index);
break;
}
+ } else {
+ ValueType type = consume_reference_type();
+ if (type != kWasmAnyFunc) {
+ error(pc_ - 1, "invalid element segment type");
+ break;
+ }
}
uint32_t num_elem =
- consume_count("number of elements", kV8MaxWasmTableEntries);
+ consume_count("number of elements", max_table_init_entries());
if (is_active) {
module_->elem_segments.emplace_back(table_index, offset);
} else {
@@ -803,11 +812,9 @@ class ModuleDecoderImpl : public Decoder {
WasmElemSegment* init = &module_->elem_segments.back();
for (uint32_t j = 0; j < num_elem; j++) {
- WasmFunction* func = nullptr;
- uint32_t index = consume_func_index(module_.get(), &func);
- DCHECK_IMPLIES(ok(), func != nullptr);
- if (!ok()) break;
- DCHECK_EQ(index, func->func_index);
+ uint32_t index = is_active ? consume_element_func_index()
+ : consume_passive_element();
+ if (failed()) break;
init->entries.push_back(index);
}
}
@@ -1170,8 +1177,6 @@ class ModuleDecoderImpl : public Decoder {
}
}
- // Decodes a single data segment entry inside a module starting at {pc_}.
-
// Calculate individual global offsets and total size of globals table.
void CalculateGlobalOffsets(WasmModule* module) {
uint32_t untagged_offset = 0;
@@ -1267,8 +1272,9 @@ class ModuleDecoderImpl : public Decoder {
return count;
}
- uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
- return consume_index("function index", module->functions, func);
+ uint32_t consume_func_index(WasmModule* module, WasmFunction** func,
+ const char* name) {
+ return consume_index(name, module->functions, func);
}
uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
@@ -1569,13 +1575,10 @@ class ModuleDecoderImpl : public Decoder {
flags = consume_u32v("flags");
if (failed()) return;
} else {
- flags = consume_u32v(name);
- if (failed()) return;
-
- if (flags != 0) {
- errorf(pos, "illegal %s %u != 0", name, flags);
- return;
- }
+ // Without the bulk memory proposal, we should still read the table index.
+ // This is the same as reading the `ActiveWithIndex` flag with the bulk
+ // memory proposal.
+ flags = SegmentFlags::kActiveWithIndex;
}
bool read_index;
@@ -1607,6 +1610,37 @@ class ModuleDecoderImpl : public Decoder {
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
+
+ uint32_t consume_element_func_index() {
+ WasmFunction* func = nullptr;
+ uint32_t index =
+ consume_func_index(module_.get(), &func, "element function index");
+ if (failed()) return index;
+ DCHECK_NE(func, nullptr);
+ DCHECK_EQ(index, func->func_index);
+ DCHECK_NE(index, WasmElemSegment::kNullIndex);
+ return index;
+ }
+
+ uint32_t consume_passive_element() {
+ uint32_t index = WasmElemSegment::kNullIndex;
+ uint8_t opcode = consume_u8("element opcode");
+ if (failed()) return index;
+ switch (opcode) {
+ case kExprRefNull:
+ index = WasmElemSegment::kNullIndex;
+ break;
+ case kExprRefFunc:
+ index = consume_element_func_index();
+ if (failed()) return index;
+ break;
+ default:
+ error("invalid opcode in element");
+ break;
+ }
+ expect_u8("end opcode", kExprEnd);
+ return index;
+ }
};
ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 04c0f3cf44..237f047db8 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -4,6 +4,7 @@
#include "src/wasm/module-instantiate.h"
#include "src/asmjs/asm-js.h"
+#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/property-descriptor.h"
#include "src/utils.h"
#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
@@ -25,6 +26,32 @@ namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
+
+uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
+ const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kI32Const:
+ return expr.val.i32_const;
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t offset =
+ instance->module()->globals[expr.val.global_index].offset;
+ auto raw_addr =
+ reinterpret_cast<Address>(
+ instance->untagged_globals_buffer()->backing_store()) +
+ offset;
+ return ReadLittleEndianValue<uint32_t>(raw_addr);
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Represents the initialized state of a table.
+struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_functions; // JSFunctions exported
+ size_t table_size;
+};
} // namespace
// A helper class to simplify instantiating a module from a module object.
@@ -42,13 +69,6 @@ class InstanceBuilder {
bool ExecuteStartFunction();
private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- size_t table_size;
- };
-
// A pre-evaluated value to use in import binding.
struct SanitizedImport {
Handle<String> module_name;
@@ -66,7 +86,6 @@ class InstanceBuilder {
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
@@ -109,8 +128,6 @@ class InstanceBuilder {
MaybeHandle<Object> LookupImportAsm(uint32_t index,
Handle<String> import_name);
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
-
// Load data segments into the memory.
void LoadDataSegments(Handle<WasmInstanceObject> instance);
@@ -170,7 +187,8 @@ class InstanceBuilder {
void InitGlobals();
// Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages,
+ uint32_t maximum_pages);
bool NeedsWrappers() const;
@@ -245,6 +263,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
auto initial_pages_counter = SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
initial_pages_counter->AddSample(initial_pages);
+ if (module_->has_maximum_pages) {
+ DCHECK_EQ(kWasmOrigin, module_->origin);
+ auto max_pages_counter =
+ isolate_->counters()->wasm_wasm_max_mem_pages_count();
+ max_pages_counter->AddSample(module_->maximum_pages);
+ }
// Asm.js has memory_ already set at this point, so we don't want to
// overwrite it.
if (memory_.is_null()) {
@@ -263,7 +287,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// even when the size is zero to prevent null-dereference issues
// (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages);
+ memory_ = AllocateMemory(initial_pages, module_->maximum_pages);
if (memory_.is_null()) {
// failed to allocate memory
DCHECK(isolate_->has_pending_exception() || thrower_->error());
@@ -367,9 +391,18 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Reserve the metadata for indirect function tables.
+ // Set up table storage space.
//--------------------------------------------------------------------------
int table_count = static_cast<int>(module_->tables.size());
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ for (int i = module_->num_imported_tables; i < table_count; i++) {
+ const WasmTable& table = module_->tables[i];
+ Handle<WasmTableObject> table_obj = WasmTableObject::New(
+ isolate_, table.initial_size, table.maximum_size, nullptr);
+ tables->set(i, *table_obj);
+ }
+ instance->set_tables(*tables);
+
table_instances_.resize(table_count);
//--------------------------------------------------------------------------
@@ -427,7 +460,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (const WasmElemSegment& elem_segment : module_->elem_segments) {
if (!elem_segment.active) continue;
DCHECK(elem_segment.table_index < table_instances_.size());
- uint32_t base = EvalUint32InitExpr(elem_segment.offset);
+ uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
size_t table_size = table_instances_[elem_segment.table_index].table_size;
if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
thrower_->LinkError("table initializer is out of bounds");
@@ -440,7 +473,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
for (const WasmDataSegment& seg : module_->data_segments) {
if (!seg.active) continue;
- uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+ uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
if (!IsInBounds(base, seg.source.length(), instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
return {};
@@ -595,20 +628,6 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
return result;
}
-uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
- switch (expr.kind) {
- case WasmInitExpr::kI32Const:
- return expr.val.i32_const;
- case WasmInitExpr::kGlobalIndex: {
- uint32_t offset = module_->globals[expr.val.global_index].offset;
- return ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(raw_buffer_ptr(untagged_globals_, offset)));
- }
- default:
- UNREACHABLE();
- }
-}
-
// Load data segments into the memory.
void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
Vector<const uint8_t> wire_bytes =
@@ -619,7 +638,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
if (source_size == 0) continue;
// Passive segments are not copied during instantiation.
if (!segment.active) continue;
- uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+ uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
DCHECK(IsInBounds(dest_offset, source_size, instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
const byte* src = wire_bytes.start() + segment.source.offset();
@@ -818,10 +837,10 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
TableInstance& table_instance = table_instances_[table_index];
table_instance.table_object = Handle<WasmTableObject>::cast(value);
instance->set_table_object(*table_instance.table_object);
- table_instance.js_wrappers =
- Handle<FixedArray>(table_instance.table_object->functions(), isolate_);
+ table_instance.js_functions =
+ Handle<FixedArray>(table_instance.table_object->elements(), isolate_);
- int imported_table_size = table_instance.js_wrappers->length();
+ int imported_table_size = table_instance.js_functions->length();
if (imported_table_size < static_cast<int>(table.initial_size)) {
thrower_->LinkError("table import %d is smaller than initial %d, got %u",
import_index, table.initial_size, imported_table_size);
@@ -854,7 +873,7 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_table_size; ++i) {
- Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ Handle<Object> val(table_instance.js_functions->get(i), isolate_);
// TODO(mtrofin): this is the same logic as WasmTableObject::Set:
// insert in the local table a wrapper from the other module, and add
// a reference to the owning instance of the other module.
@@ -1201,18 +1220,25 @@ void InstanceBuilder::InitGlobals() {
}
// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
- if (num_pages > max_mem_pages()) {
+Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
+ uint32_t maximum_pages) {
+ if (initial_pages > max_mem_pages()) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
- SharedFlag shared_flag =
- is_shared_memory ? SharedFlag::kShared : SharedFlag::kNotShared;
Handle<JSArrayBuffer> mem_buffer;
- if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, shared_flag)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm memory");
+ if (is_shared_memory) {
+ if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize,
+ maximum_pages * kWasmPageSize)
+ .ToHandle(&mem_buffer)) {
+ thrower_->RangeError("Out of memory: wasm shared memory");
+ }
+ } else {
+ if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize)
+ .ToHandle(&mem_buffer)) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ }
}
return mem_buffer;
}
@@ -1220,7 +1246,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table_instance : table_instances_) {
- if (!table_instance.js_wrappers.is_null()) return true;
+ if (!table_instance.js_functions.is_null()) return true;
}
for (auto& table : module_->tables) {
if (table.exported) return true;
@@ -1234,20 +1260,18 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
isolate_);
if (NeedsWrappers()) {
- // Fill the table to cache the exported JSFunction wrappers.
- js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
- Handle<JSFunction>::null());
-
// If an imported WebAssembly function gets exported, the exported function
- // has to be identical to to imported function. Therefore we put all
- // imported WebAssembly functions into the js_wrappers_ list.
+ // has to be identical to to imported function. Therefore we cache all
+ // imported WebAssembly functions in the instance.
for (int index = 0, end = static_cast<int>(module_->import_table.size());
index < end; ++index) {
const WasmImport& import = module_->import_table[index];
if (import.kind == kExternalFunction) {
Handle<Object> value = sanitized_imports_[index].value;
if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ WasmInstanceObject::SetWasmExportedFunction(
+ isolate_, instance, import.index,
+ Handle<WasmExportedFunction>::cast(value));
}
}
}
@@ -1298,9 +1322,12 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
+ // TODO(wasm): reduce duplication with LoadElemSegment() further below
const WasmFunction& function = module_->functions[exp.index];
- Handle<JSFunction> js_function = js_wrappers_[exp.index];
- if (js_function.is_null()) {
+ MaybeHandle<WasmExportedFunction> wasm_exported_function =
+ WasmInstanceObject::GetWasmExportedFunction(isolate_, instance,
+ exp.index);
+ if (wasm_exported_function.is_null()) {
// Wrap the exported code as a JSFunction.
Handle<Code> export_code =
export_wrappers->GetValueChecked<Code>(isolate_, export_index);
@@ -1314,12 +1341,14 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
isolate_, module_object_, func_name_ref)
.ToHandleChecked();
}
- js_function = WasmExportedFunction::New(
+ wasm_exported_function = WasmExportedFunction::New(
isolate_, instance, func_name, function.func_index,
static_cast<int>(function.sig->parameter_count()), export_code);
- js_wrappers_[exp.index] = js_function;
+ WasmInstanceObject::SetWasmExportedFunction(
+ isolate_, instance, exp.index,
+ wasm_exported_function.ToHandleChecked());
}
- desc.set_value(js_function);
+ desc.set_value(wasm_exported_function.ToHandleChecked());
export_index++;
break;
}
@@ -1332,7 +1361,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
: FLAG_wasm_max_table_size;
table_instance.table_object =
WasmTableObject::New(isolate_, table.initial_size, maximum,
- &table_instance.js_wrappers);
+ &table_instance.js_functions);
}
instance->set_table_object(*table_instance.table_object);
desc.set_value(table_instance.table_object);
@@ -1417,7 +1446,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, export_to, name, &desc, kThrowOnError);
+ isolate_, export_to, name, &desc, Just(kThrowOnError));
if (!status.IsJust()) {
DisallowHeapAllocation no_gc;
TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>(no_gc));
@@ -1440,72 +1469,107 @@ void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
size_t table_count = module_->tables.size();
for (size_t index = 0; index < table_count; ++index) {
const WasmTable& table = module_->tables[index];
- TableInstance& table_instance = table_instances_[index];
if (!instance->has_indirect_function_table() &&
table.type == kWasmAnyFunc) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, table.initial_size);
- table_instance.table_size = table.initial_size;
+ table_instances_[index].table_size = table.initial_size;
}
}
}
-void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
- NativeModule* native_module = module_object_->native_module();
- for (auto& elem_segment : module_->elem_segments) {
- // Passive segments are not copied during instantiation.
- if (!elem_segment.active) continue;
+bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ const TableInstance& table_instance,
+ JSToWasmWrapperCache* js_to_wasm_cache,
+ const WasmElemSegment& elem_segment, uint32_t dst,
+ uint32_t src, size_t count) {
+ // TODO(wasm): Move this functionality into wasm-objects, since it is used
+ // for both instantiation and in the implementation of the table.init
+ // instruction.
+ if (!IsInBounds(dst, count, table_instance.table_size)) return false;
+ if (!IsInBounds(src, count, elem_segment.entries.size())) return false;
+
+ const WasmModule* module = instance->module();
+ for (uint32_t i = 0; i < count; ++i) {
+ uint32_t func_index = elem_segment.entries[src + i];
+ int entry_index = static_cast<int>(dst + i);
+
+ if (func_index == WasmElemSegment::kNullIndex) {
+ IndirectFunctionTableEntry(instance, entry_index).clear();
+ if (!table_instance.table_object.is_null()) {
+ WasmTableObject::Set(isolate, table_instance.table_object, entry_index,
+ Handle<JSFunction>::null());
+ }
+ continue;
+ }
- uint32_t base = EvalUint32InitExpr(elem_segment.offset);
- uint32_t num_entries = static_cast<uint32_t>(elem_segment.entries.size());
- uint32_t index = elem_segment.table_index;
- TableInstance& table_instance = table_instances_[index];
- DCHECK(IsInBounds(base, num_entries, table_instance.table_size));
- for (uint32_t i = 0; i < num_entries; ++i) {
- uint32_t func_index = elem_segment.entries[i];
- const WasmFunction* function = &module_->functions[func_index];
- int table_index = static_cast<int>(i + base);
+ const WasmFunction* function = &module->functions[func_index];
- // Update the local dispatch table first.
- uint32_t sig_id = module_->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, table_index)
- .Set(sig_id, instance, func_index);
+ // Update the local dispatch table first.
+ uint32_t sig_id = module->signature_ids[function->sig_index];
+ IndirectFunctionTableEntry(instance, entry_index)
+ .Set(sig_id, instance, func_index);
- if (!table_instance.table_object.is_null()) {
- // Update the table object's other dispatch tables.
- if (js_wrappers_[func_index].is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->wasm wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function->sig, function->imported);
- MaybeHandle<String> func_name;
- if (module_->origin == kAsmJsOrigin) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- ModuleWireBytes(native_module->wire_bytes()), func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()), wrapper_code);
- js_wrappers_[func_index] = js_function;
+ if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
+ MaybeHandle<WasmExportedFunction> wasm_exported_function =
+ WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+ func_index);
+ if (wasm_exported_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->wasm wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache->GetOrCompileJSToWasmWrapper(
+ isolate, function->sig, function->imported);
+ MaybeHandle<String> func_name;
+ if (module->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ auto module_object =
+ Handle<WasmModuleObject>(instance->module_object(), isolate);
+ WireBytesRef func_name_ref = module->LookupFunctionName(
+ ModuleWireBytes(module_object->native_module()->wire_bytes()),
+ func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, func_name_ref)
+ .ToHandleChecked();
}
- table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(
- isolate_, table_instance.table_object, table_index, function->sig,
- instance, func_index);
+ wasm_exported_function = WasmExportedFunction::New(
+ isolate, instance, func_name, func_index,
+ static_cast<int>(function->sig->parameter_count()), wrapper_code);
+ WasmInstanceObject::SetWasmExportedFunction(
+ isolate, instance, func_index,
+ wasm_exported_function.ToHandleChecked());
}
+ table_instance.js_functions->set(
+ entry_index, *wasm_exported_function.ToHandleChecked());
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(
+ isolate, table_instance.table_object, entry_index, function->sig,
+ instance, func_index);
}
}
+ return true;
+}
+
+void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
+ for (auto& elem_segment : module_->elem_segments) {
+ // Passive segments are not copied during instantiation.
+ if (!elem_segment.active) continue;
+
+ uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
+ uint32_t src = 0;
+ size_t count = elem_segment.entries.size();
+
+ bool success = LoadElemSegmentImpl(
+ isolate_, instance, table_instances_[elem_segment.table_index],
+ &js_to_wasm_cache_, elem_segment, dst, src, count);
+ CHECK(success);
+ }
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
@@ -1530,6 +1594,26 @@ void InstanceBuilder::InitializeExceptions(
}
}
+bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t segment_index, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ JSToWasmWrapperCache js_to_wasm_cache;
+
+ Handle<WasmTableObject> table_object;
+ Handle<FixedArray> js_functions;
+ if (instance->has_table_object()) {
+ table_object = Handle<WasmTableObject>(instance->table_object(), isolate);
+ js_functions = Handle<FixedArray>(table_object->elements(), isolate);
+ }
+
+ TableInstance table_instance = {table_object, js_functions,
+ instance->indirect_function_table_size()};
+
+ auto& elem_segment = instance->module()->elem_segments[segment_index];
+ return LoadElemSegmentImpl(isolate, instance, table_instance,
+ &js_to_wasm_cache, elem_segment, dst, src, count);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-instantiate.h b/deps/v8/src/wasm/module-instantiate.h
index 15393969b9..9dd2cc2137 100644
--- a/deps/v8/src/wasm/module-instantiate.h
+++ b/deps/v8/src/wasm/module-instantiate.h
@@ -5,6 +5,9 @@
#ifndef V8_WASM_MODULE_INSTANTIATE_H_
#define V8_WASM_MODULE_INSTANTIATE_H_
+#include <stdint.h>
+#include "include/v8config.h"
+
namespace v8 {
namespace internal {
@@ -28,6 +31,10 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
+bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t segment_index, uint32_t dst,
+ uint32_t src, uint32_t count) V8_WARN_UNUSED_RESULT;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 1896178c48..4472eb82e7 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -123,15 +123,18 @@ namespace {
class TopTierCompiledCallback {
public:
- TopTierCompiledCallback(std::shared_ptr<NativeModule> native_module,
+ TopTierCompiledCallback(std::weak_ptr<NativeModule> native_module,
StreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
- void operator()(CompilationEvent event, const WasmError* error) const {
+ void operator()(CompilationEvent event) const {
if (event != CompilationEvent::kFinishedTopTierCompilation) return;
- DCHECK_NULL(error);
- callback_(native_module_);
+ // If the native module is still alive, get back a shared ptr and call the
+ // callback.
+ if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
+ callback_(native_module);
+ }
#ifdef DEBUG
DCHECK(!called_);
called_ = true;
@@ -139,7 +142,7 @@ class TopTierCompiledCallback {
}
private:
- const std::shared_ptr<NativeModule> native_module_;
+ const std::weak_ptr<NativeModule> native_module_;
const StreamingDecoder::ModuleCompiledCallback callback_;
#ifdef DEBUG
mutable bool called_ = false;
@@ -377,17 +380,27 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
SectionBuffer* buf =
streaming->CreateNewBuffer(module_offset_, section_id_, value_,
buffer().SubVector(0, bytes_consumed_));
- if (!buf) return nullptr;
+ DCHECK_NOT_NULL(buf);
if (value_ == 0) {
if (section_id_ == SectionCode::kCodeSectionCode) {
- return streaming->Error("Code section cannot have size 0");
+ return streaming->Error("code section cannot have size 0");
}
+ // Process section without payload as well, to enforce section order and
+ // other feature checks specific to each individual section.
streaming->ProcessSection(buf);
if (!streaming->ok()) return nullptr;
// There is no payload, we go to the next section immediately.
return base::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
if (section_id_ == SectionCode::kCodeSectionCode) {
+ // Explicitly check for multiple code sections as module decoder never
+ // sees the code section and hence cannot track this section.
+ if (streaming->code_section_processed_) {
+ // TODO(mstarzinger): This error message (and other in this class) is
+ // different for non-streaming decoding. Bring them in sync and test.
+ return streaming->Error("code section can only appear once");
+ }
+ streaming->code_section_processed_ = true;
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
return base::make_unique<DecodeNumberOfFunctions>(buf);
@@ -411,19 +424,21 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
// Copy the bytes we read into the section buffer.
Vector<uint8_t> payload_buf = section_buffer_->payload();
if (payload_buf.size() < bytes_consumed_) {
- return streaming->Error("Invalid code section length");
+ return streaming->Error("invalid code section length");
}
memcpy(payload_buf.start(), buffer().start(), bytes_consumed_);
// {value} is the number of functions.
if (value_ == 0) {
if (payload_buf.size() != bytes_consumed_) {
- return streaming->Error("not all code section bytes were consumed");
+ return streaming->Error("not all code section bytes were used");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
- streaming->StartCodeSection(value_, streaming->section_buffers_.back());
+ DCHECK_GE(kMaxInt, value_);
+ streaming->StartCodeSection(static_cast<int>(value_),
+ streaming->section_buffers_.back());
if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
@@ -437,12 +452,12 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
// Copy the bytes we consumed into the section buffer.
Vector<uint8_t> fun_length_buffer = section_buffer_->bytes() + buffer_offset_;
if (fun_length_buffer.size() < bytes_consumed_) {
- return streaming->Error("Invalid code section length");
+ return streaming->Error("read past code section end");
}
memcpy(fun_length_buffer.start(), buffer().start(), bytes_consumed_);
// {value} is the length of the function.
- if (value_ == 0) return streaming->Error("Invalid function length (0)");
+ if (value_ == 0) return streaming->Error("invalid function length (0)");
if (buffer_offset_ + bytes_consumed_ + value_ > section_buffer_->length()) {
return streaming->Error("not enough code section bytes");
@@ -480,14 +495,8 @@ StreamingDecoder::StreamingDecoder(
StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
uint32_t module_offset, uint8_t section_id, size_t length,
Vector<const uint8_t> length_bytes) {
- // Check the order of sections. Unknown sections can appear at any position.
- if (section_id != kUnknownSectionCode) {
- if (section_id < next_section_id_) {
- Error("Unexpected section");
- return nullptr;
- }
- next_section_id_ = section_id + 1;
- }
+ // Section buffers are allocated in the same order they appear in the module,
+ // they will be processed and later on concatenated in that same order.
section_buffers_.emplace_back(std::make_shared<SectionBuffer>(
module_offset, section_id, length, length_bytes));
return section_buffers_.back().get();
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index d4e3ff7d14..0d469a96b3 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// Process the start of the code section. Returns true if the processing
// finished successfully and the decoding should continue.
- virtual bool ProcessCodeSectionHeader(size_t num_functions, uint32_t offset,
+ virtual bool ProcessCodeSectionHeader(int num_functions, uint32_t offset,
std::shared_ptr<WireBytesStorage>) = 0;
// Process a function body. Returns true if the processing finished
@@ -227,7 +227,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
}
}
- void StartCodeSection(size_t num_functions,
+ void StartCodeSection(int num_functions,
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
if (!ok()) return;
// The offset passed to {ProcessCodeSectionHeader} is an error offset and
@@ -261,9 +261,9 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
std::unique_ptr<StreamingProcessor> processor_;
std::unique_ptr<DecodingState> state_;
std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
+ bool code_section_processed_ = false;
uint32_t module_offset_ = 0;
size_t total_size_ = 0;
- uint8_t next_section_id_ = kFirstSectionInModule;
// Caching support.
ModuleCompiledCallback module_compiled_callback_ = nullptr;
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 02e9c79bd2..5cb24e7911 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -25,6 +25,7 @@ enum ValueType : uint8_t {
kWasmS128,
kWasmAnyRef,
kWasmAnyFunc,
+ kWasmNullRef,
kWasmExceptRef,
kWasmVar,
};
@@ -341,6 +342,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
return "anyref";
case kWasmAnyFunc:
return "anyfunc";
+ case kWasmNullRef:
+ return "nullref";
case kWasmExceptRef:
return "exn";
case kWasmS128:
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index f55508c7a6..0f9da37fa7 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -17,6 +17,8 @@
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
+#include "src/snapshot/embedded-data.h"
+#include "src/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
@@ -34,20 +36,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-namespace {
-
-// Binary predicate to perform lookups in {NativeModule::owned_code_} with a
-// given address into a code object. Use with {std::upper_bound} for example.
-struct WasmCodeUniquePtrComparator {
- bool operator()(Address pc, const std::unique_ptr<WasmCode>& code) const {
- DCHECK_NE(kNullAddress, pc);
- DCHECK_NOT_NULL(code);
- return pc < code->instruction_start();
- }
-};
-
-} // namespace
-
void DisjointAllocationPool::Merge(base::AddressRegion region) {
auto dest_it = regions_.begin();
auto dest_end = regions_.end();
@@ -148,6 +136,9 @@ void WasmCode::RegisterTrapHandlerData() {
bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
+ // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
+ // to call {WasmEngine::EnableCodeLogging} if this return value would change
+ // for any isolate. Otherwise we might lose code events.
return isolate->logger()->is_listening_to_code_events() ||
isolate->is_profiling();
}
@@ -190,19 +181,6 @@ void WasmCode::LogCode(Isolate* isolate) const {
}
}
-const char* WasmCode::GetRuntimeStubName() const {
- DCHECK_EQ(WasmCode::kRuntimeStub, kind());
-#define RETURN_NAME(Name) \
- if (native_module_->runtime_stub_table_[WasmCode::k##Name] == this) { \
- return #Name; \
- }
-#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
- WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
-#undef RETURN_NAME_TRAP
-#undef RETURN_NAME
- return "<unknown>";
-}
-
void WasmCode::Validate() const {
#ifdef DEBUG
// We expect certain relocation info modes to never appear in {WasmCode}
@@ -217,6 +195,7 @@ void WasmCode::Validate() const {
WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code);
CHECK_EQ(WasmCode::kJumpTable, code->kind());
+ CHECK_EQ(native_module()->jump_table_, code);
CHECK(code->contains(target));
break;
}
@@ -224,8 +203,14 @@ void WasmCode::Validate() const {
Address target = it.rinfo()->wasm_stub_call_address();
WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code);
+#ifdef V8_EMBEDDED_BUILTINS
+ CHECK_EQ(WasmCode::kJumpTable, code->kind());
+ CHECK_EQ(native_module()->runtime_stub_table_, code);
+ CHECK(code->contains(target));
+#else
CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
CHECK_EQ(target, code->instruction_start());
+#endif
break;
}
case RelocInfo::INTERNAL_REFERENCE:
@@ -380,17 +365,18 @@ WasmCode::~WasmCode() {
}
}
-NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
+NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
bool can_request_more, VirtualMemory code_space,
- WasmCodeManager* code_manager,
- std::shared_ptr<const WasmModule> module)
+ std::shared_ptr<const WasmModule> module,
+ std::shared_ptr<Counters> async_counters)
: enabled_features_(enabled),
module_(std::move(module)),
- compilation_state_(CompilationState::New(isolate, this)),
+ compilation_state_(
+ CompilationState::New(this, std::move(async_counters))),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
new WasmImportWrapperCache(this))),
free_code_space_(code_space.region()),
- code_manager_(code_manager),
+ engine_(engine),
can_request_more_memory_(can_request_more),
use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler) {
@@ -403,7 +389,8 @@ NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
code_table_.reset(new WasmCode*[num_wasm_functions]);
memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
- jump_table_ = CreateEmptyJumpTable(num_wasm_functions);
+ jump_table_ = CreateEmptyJumpTable(
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
}
}
@@ -411,12 +398,15 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
DCHECK_LE(num_functions(), max_functions);
WasmCode** new_table = new WasmCode*[max_functions];
memset(new_table, 0, max_functions * sizeof(*new_table));
- memcpy(new_table, code_table_.get(),
- module_->num_declared_functions * sizeof(*new_table));
+ if (module_->num_declared_functions > 0) {
+ memcpy(new_table, code_table_.get(),
+ module_->num_declared_functions * sizeof(*new_table));
+ }
code_table_.reset(new_table);
// Re-allocate jump table.
- jump_table_ = CreateEmptyJumpTable(max_functions);
+ jump_table_ = CreateEmptyJumpTable(
+ JumpTableAssembler::SizeForNumberOfSlots(max_functions));
}
void NativeModule::LogWasmCodes(Isolate* isolate) {
@@ -436,13 +426,15 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
WasmCode* NativeModule::AddOwnedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset, size_t code_comments_offset,
- size_t unpadded_binary_size,
+ uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset, size_t constant_pool_offset,
+ size_t code_comments_offset, size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
WasmCode::Tier tier) {
+ CHECK(!FLAG_jitless); // TODO(jgruber): Support wasm in jitless mode.
+
WasmCode* code;
{
// Both allocation and insertion in owned_code_ happen in the same critical
@@ -451,25 +443,13 @@ WasmCode* NativeModule::AddOwnedCode(
Vector<byte> executable_buffer = AllocateForCode(instructions.size());
// Ownership will be transferred to {owned_code_} below.
code = new WasmCode(
- this, index, executable_buffer, stack_slots, safepoint_table_offset,
- handler_table_offset, constant_pool_offset, code_comments_offset,
- unpadded_binary_size, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_position_table), kind, tier);
-
- if (owned_code_.empty() ||
- code->instruction_start() > owned_code_.back()->instruction_start()) {
- // Common case.
- owned_code_.emplace_back(code);
- } else {
- // Slow but unlikely case.
- // TODO(mtrofin): We allocate in increasing address order, and
- // even if we end up with segmented memory, we may end up only with a few
- // large moves - if, for example, a new segment is below the current ones.
- auto insert_before = std::upper_bound(
- owned_code_.begin(), owned_code_.end(), code->instruction_start(),
- WasmCodeUniquePtrComparator{});
- owned_code_.emplace(insert_before, code);
- }
+ this, index, executable_buffer, stack_slots, tagged_parameter_slots,
+ safepoint_table_offset, handler_table_offset, constant_pool_offset,
+ code_comments_offset, unpadded_binary_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_position_table), kind, tier);
+
+ owned_code_.emplace_back(code);
}
memcpy(reinterpret_cast<void*>(code->instruction_start()),
instructions.start(), instructions.size());
@@ -494,21 +474,53 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
}
- Assembler::FlushICache(jump_table_->instructions().start(),
- jump_table_->instructions().size());
+ FlushInstructionCache(jump_table_->instructions().start(),
+ jump_table_->instructions().size());
}
+// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
+// was removed and embedded builtins are no longer optional.
void NativeModule::SetRuntimeStubs(Isolate* isolate) {
+ DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
+#ifdef V8_EMBEDDED_BUILTINS
+ WasmCode* jump_table =
+ CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
+ WasmCode::kRuntimeStubCount));
+ Address base = jump_table->instruction_start();
+ EmbeddedData embedded_data = EmbeddedData::FromBlob();
+#define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
+#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
+ std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
+ WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
+#undef RUNTIME_STUB
+#undef RUNTIME_STUB_TRAP
+ for (auto pair : wasm_runtime_stubs) {
+ CHECK(embedded_data.ContainsBuiltin(pair.first));
+ Address builtin = embedded_data.InstructionStartOfBuiltin(pair.first);
+ JumpTableAssembler::EmitRuntimeStubSlot(base, pair.second, builtin,
+ WasmCode::kNoFlushICache);
+ uint32_t slot_offset =
+ JumpTableAssembler::StubSlotIndexToOffset(pair.second);
+ runtime_stub_entries_[pair.second] = base + slot_offset;
+ }
+ FlushInstructionCache(jump_table->instructions().start(),
+ jump_table->instructions().size());
+ DCHECK_NULL(runtime_stub_table_);
+ runtime_stub_table_ = jump_table;
+#else // V8_EMBEDDED_BUILTINS
HandleScope scope(isolate);
- DCHECK_NULL(runtime_stub_table_[0]); // Only called once.
+ USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
#define COPY_BUILTIN(Name) \
- runtime_stub_table_[WasmCode::k##Name] = \
+ runtime_stub_entries_[WasmCode::k##Name] = \
AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
- WasmCode::kRuntimeStub, #Name);
+ WasmCode::kRuntimeStub, #Name) \
+ ->instruction_start();
#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
#undef COPY_BUILTIN_TRAP
#undef COPY_BUILTIN
+#endif // V8_EMBEDDED_BUILTINS
+ DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
}
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
@@ -519,24 +531,37 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
const size_t relocation_size =
code->is_off_heap_trampoline() ? 0 : code->relocation_size();
OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size);
- memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
+ if (relocation_size > 0) {
+ memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
+ }
Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
code->GetIsolate());
OwnedVector<byte> source_pos =
OwnedVector<byte>::New(source_pos_table->length());
- source_pos_table->copy_out(0, source_pos.start(), source_pos_table->length());
+ if (source_pos_table->length() > 0) {
+ source_pos_table->copy_out(0, source_pos.start(),
+ source_pos_table->length());
+ }
Vector<const byte> instructions(
reinterpret_cast<byte*>(code->InstructionStart()),
static_cast<size_t>(code->InstructionSize()));
- int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
- int safepoint_table_offset =
- code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
+ const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+
+ // TODO(jgruber,v8:8758): Remove this translation. It exists only because
+ // Code objects contains real offsets but WasmCode expects an offset of 0 to
+ // mean 'empty'.
+ const int safepoint_table_offset =
+ code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
+ const int handler_table_offset =
+ code->has_handler_table() ? code->handler_table_offset() : 0;
+
WasmCode* ret =
AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
instructions, // instructions
stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
safepoint_table_offset, // safepoint_table_offset
- code->handler_table_offset(), // handler_table_offset
+ handler_table_offset, // handler_table_offset
code->constant_pool_offset(), // constant_pool_offset
code->code_comments_offset(), // code_comments_offset
instructions.size(), // unpadded_binary_size
@@ -558,10 +583,9 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- WasmCode* code =
- runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
- it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
- SKIP_ICACHE_FLUSH);
+ Address entry = runtime_stub_entry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
}
@@ -569,8 +593,8 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
// Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above.
- Assembler::FlushICache(ret->instructions().start(),
- ret->instructions().size());
+ FlushInstructionCache(ret->instructions().start(),
+ ret->instructions().size());
ret->MaybePrint(name);
ret->Validate();
return ret;
@@ -578,28 +602,38 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
WasmCode* NativeModule::AddCode(
uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
+ uint32_t tagged_parameter_slots,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_pos_table, WasmCode::Kind kind,
WasmCode::Tier tier) {
OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
- memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
+ if (desc.reloc_size > 0) {
+ memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
+ desc.reloc_size);
+ }
+
+ // TODO(jgruber,v8:8758): Remove this translation. It exists only because
+ // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
+ // 'empty'.
+ const int safepoint_table_offset =
+ desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
+ const int handler_table_offset =
+ desc.handler_table_size == 0 ? 0 : desc.handler_table_offset;
- WasmCode* ret = AddOwnedCode(
+ WasmCode* code = AddOwnedCode(
index, {desc.buffer, static_cast<size_t>(desc.instr_size)}, stack_slots,
- safepoint_table_offset, handler_table_offset, desc.constant_pool_offset(),
- desc.code_comments_offset(), desc.instr_size,
+ tagged_parameter_slots, safepoint_table_offset, handler_table_offset,
+ desc.constant_pool_offset, desc.code_comments_offset, desc.instr_size,
std::move(protected_instructions), std::move(reloc_info),
std::move(source_pos_table), kind, tier);
// Apply the relocation delta by iterating over the RelocInfo.
- intptr_t delta = ret->instructions().start() - desc.buffer;
+ intptr_t delta = code->instructions().start() - desc.buffer;
int mode_mask = RelocInfo::kApplyMask |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
- for (RelocIterator it(ret->instructions(), ret->reloc_info(),
- ret->constant_pool(), mode_mask);
+ for (RelocIterator it(code->instructions(), code->reloc_info(),
+ code->constant_pool(), mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
@@ -609,10 +643,9 @@ WasmCode* NativeModule::AddCode(
} else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
- WasmCode* code =
- runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
- it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
- SKIP_ICACHE_FLUSH);
+ Address entry = runtime_stub_entry(
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
+ it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
}
@@ -620,27 +653,39 @@ WasmCode* NativeModule::AddCode(
// Flush the i-cache here instead of in AddOwnedCode, to include the changes
// made while iterating over the RelocInfo above.
- Assembler::FlushICache(ret->instructions().start(),
- ret->instructions().size());
- ret->MaybePrint();
- ret->Validate();
- return ret;
+ FlushInstructionCache(code->instructions().start(),
+ code->instructions().size());
+ code->MaybePrint();
+ code->Validate();
+
+ if (!code->protected_instructions_.is_empty()) {
+ code->RegisterTrapHandlerData();
+ }
+
+ base::MutexGuard lock(&allocation_mutex_);
+ // Skip publishing code if there is an active redirection to the interpreter
+ // for the given function index, in order to preserve the redirection.
+ if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
+ InstallCode(code);
+ }
+
+ return code;
}
WasmCode* NativeModule::AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset, size_t code_comments_offset,
- size_t unpadded_binary_size,
+ uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset, size_t constant_pool_offset,
+ size_t code_comments_offset, size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
- WasmCode* code =
- AddOwnedCode(index, instructions, stack_slots, safepoint_table_offset,
- handler_table_offset, constant_pool_offset,
- code_comments_offset, unpadded_binary_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_position_table), WasmCode::kFunction, tier);
+ WasmCode* code = AddOwnedCode(
+ index, instructions, stack_slots, tagged_parameter_slots,
+ safepoint_table_offset, handler_table_offset, constant_pool_offset,
+ code_comments_offset, unpadded_binary_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_position_table), WasmCode::kFunction, tier);
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
@@ -652,18 +697,6 @@ WasmCode* NativeModule::AddDeserializedCode(
return code;
}
-void NativeModule::PublishCode(WasmCode* code) {
- base::MutexGuard lock(&allocation_mutex_);
- // Skip publishing code if there is an active redirection to the interpreter
- // for the given function index, in order to preserve the redirection.
- if (has_interpreter_redirection(code->index())) return;
-
- if (!code->protected_instructions_.is_empty()) {
- code->RegisterTrapHandlerData();
- }
- InstallCode(code);
-}
-
void NativeModule::PublishInterpreterEntry(WasmCode* code,
uint32_t func_index) {
code->index_ = func_index;
@@ -680,17 +713,17 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return result;
}
-WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
+WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
// Only call this if we really need a jump table.
- DCHECK_LT(0, num_wasm_functions);
- OwnedVector<byte> instructions = OwnedVector<byte>::New(
- JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
+ DCHECK_LT(0, jump_table_size);
+ OwnedVector<byte> instructions = OwnedVector<byte>::New(jump_table_size);
memset(instructions.start(), 0, instructions.size());
return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
instructions.as_vector(), // instructions
0, // stack_slots
- instructions.size(), // safepoint_table_offset
- instructions.size(), // handler_table_offset
+ 0, // tagged_parameter_slots
+ 0, // safepoint_table_offset
+ 0, // handler_table_offset
instructions.size(), // constant_pool_offset
instructions.size(), // code_comments_offset
instructions.size(), // unpadded_binary_size
@@ -733,14 +766,15 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
Address hint = owned_code_space_.empty() ? kNullAddress
: owned_code_space_.back().end();
- VirtualMemory new_mem =
- code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
+ VirtualMemory new_mem = engine_->code_manager()->TryAllocate(
+ size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode reservation");
UNREACHABLE();
}
- code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
+ engine_->code_manager()->AssignRanges(new_mem.address(), new_mem.end(),
+ this);
free_code_space_.Merge(new_mem.region());
owned_code_space_.emplace_back(std::move(new_mem));
@@ -773,7 +807,7 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
Address start = std::max(commit_start, vmem.address());
Address end = std::min(commit_end, vmem.end());
size_t commit_size = static_cast<size_t>(end - start);
- if (!code_manager_->Commit(start, commit_size)) {
+ if (!engine_->code_manager()->Commit(start, commit_size)) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
@@ -785,7 +819,8 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
if (commit_start >= commit_end) break;
}
#else
- if (!code_manager_->Commit(commit_start, commit_end - commit_start)) {
+ if (!engine_->code_manager()->Commit(commit_start,
+ commit_end - commit_start)) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
@@ -829,13 +864,38 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
WasmCode* NativeModule::Lookup(Address pc) const {
base::MutexGuard lock(&allocation_mutex_);
if (owned_code_.empty()) return nullptr;
- auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
- WasmCodeUniquePtrComparator());
- if (iter == owned_code_.begin()) return nullptr;
- --iter;
- WasmCode* candidate = iter->get();
- DCHECK_NOT_NULL(candidate);
- return candidate->contains(pc) ? candidate : nullptr;
+ // First update the sorted portion counter.
+ if (owned_code_sorted_portion_ == 0) ++owned_code_sorted_portion_;
+ while (owned_code_sorted_portion_ < owned_code_.size() &&
+ owned_code_[owned_code_sorted_portion_ - 1]->instruction_start() <=
+ owned_code_[owned_code_sorted_portion_]->instruction_start()) {
+ ++owned_code_sorted_portion_;
+ }
+ // Execute at most two rounds: First check whether the {pc} is within the
+ // sorted portion of {owned_code_}. If it's not, then sort the whole vector
+ // and retry.
+ while (true) {
+ auto iter =
+ std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
+ [](Address pc, const std::unique_ptr<WasmCode>& code) {
+ DCHECK_NE(kNullAddress, pc);
+ DCHECK_NOT_NULL(code);
+ return pc < code->instruction_start();
+ });
+ if (iter != owned_code_.begin()) {
+ --iter;
+ WasmCode* candidate = iter->get();
+ DCHECK_NOT_NULL(candidate);
+ if (candidate->contains(pc)) return candidate;
+ }
+ if (owned_code_sorted_portion_ == owned_code_.size()) return nullptr;
+ std::sort(owned_code_.begin(), owned_code_.end(),
+ [](const std::unique_ptr<WasmCode>& code1,
+ const std::unique_ptr<WasmCode>& code2) {
+ return code1->instruction_start() < code2->instruction_start();
+ });
+ owned_code_sorted_portion_ = owned_code_.size();
+ }
}
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
@@ -875,12 +935,24 @@ void NativeModule::DisableTrapHandler() {
// recycled.
}
+const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
+#define RETURN_NAME(Name) \
+ if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
+ return #Name; \
+ }
+#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
+ WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
+#undef RETURN_NAME_TRAP
+#undef RETURN_NAME
+ return "<unknown>";
+}
+
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
// Cancel all background compilation before resetting any field of the
// NativeModule or freeing anything.
- compilation_state_->CancelAndWait();
- code_manager_->FreeNativeModule(this);
+ compilation_state_->AbortCompilation();
+ engine_->FreeNativeModule(this);
}
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
@@ -931,13 +1003,6 @@ void WasmCodeManager::AssignRanges(Address start, Address end,
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
}
-void WasmCodeManager::AssignRangesAndAddModule(Address start, Address end,
- NativeModule* native_module) {
- base::MutexGuard lock(&native_modules_mutex_);
- lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
- native_modules_.emplace(native_module);
-}
-
VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
@@ -966,36 +1031,11 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
return mem;
}
-void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
- base::MutexGuard lock(&native_modules_mutex_);
- for (NativeModule* native_module : native_modules_) {
- int code_size =
- static_cast<int>(native_module->committed_code_space_.load() / MB);
- isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
- }
-}
-
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
remaining_uncommitted_code_space_.store(limit);
critical_uncommitted_code_space_.store(limit / 2);
}
-namespace {
-
-void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type,
- v8::GCCallbackFlags flags, void* data) {
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- isolate->wasm_engine()->code_manager()->SampleModuleSizes(isolate);
-}
-
-} // namespace
-
-// static
-void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) {
- isolate->heap()->AddGCEpilogueCallback(ModuleSamplingCallback,
- v8::kGCTypeMarkSweepCompact, nullptr);
-}
-
// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
@@ -1031,8 +1071,9 @@ size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
- bool can_request_more, std::shared_ptr<const WasmModule> module) {
+ WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
+ size_t code_size_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
if (remaining_uncommitted_code_space_.load() <
critical_uncommitted_code_space_.load()) {
@@ -1065,12 +1106,13 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Address start = code_space.address();
size_t size = code_space.size();
Address end = code_space.end();
- std::unique_ptr<NativeModule> ret(new NativeModule(
- isolate, enabled, can_request_more, std::move(code_space),
- isolate->wasm_engine()->code_manager(), std::move(module)));
+ std::unique_ptr<NativeModule> ret(
+ new NativeModule(engine, enabled, can_request_more, std::move(code_space),
+ std::move(module), isolate->async_counters()));
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
- AssignRangesAndAddModule(start, end, ret.get());
+ base::MutexGuard lock(&native_modules_mutex_);
+ lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
return ret;
}
@@ -1125,8 +1167,6 @@ bool NativeModule::SetExecutable(bool executable) {
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
base::MutexGuard lock(&native_modules_mutex_);
- DCHECK_EQ(1, native_modules_.count(native_module));
- native_modules_.erase(native_module);
TRACE_HEAP("Freeing NativeModule %p\n", native_module);
for (auto& code_space : native_module->owned_code_space_) {
DCHECK(code_space.IsReserved());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 4247350ceb..272dab0b03 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -23,8 +23,9 @@
namespace v8 {
namespace internal {
-struct CodeDesc;
class Code;
+class CodeDesc;
+class Isolate;
namespace wasm {
@@ -121,6 +122,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t code_comments_offset() const { return code_comments_offset_; }
size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
+ uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
bool is_liftoff() const { return tier_ == kLiftoff; }
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_.start()) <= pc &&
@@ -132,8 +134,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
return protected_instructions_.as_vector();
}
- const char* GetRuntimeStubName() const;
-
void Validate() const;
void Print(const char* name = nullptr) const;
void MaybePrint(const char* name = nullptr) const;
@@ -155,9 +155,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset, size_t code_comments_offset,
- size_t unpadded_binary_size,
+ uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset, size_t constant_pool_offset,
+ size_t code_comments_offset, size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -170,6 +170,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
kind_(kind),
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
+ tagged_parameter_slots_(tagged_parameter_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
code_comments_offset_(code_comments_offset),
@@ -200,6 +201,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
Kind kind_;
size_t constant_pool_offset_ = 0;
uint32_t stack_slots_ = 0;
+ // Number of tagged parameters passed to this function via the stack. This
+ // value is used by the stack walker (e.g. GC) to find references.
+ uint32_t tagged_parameter_slots_ = 0;
// we care about safepoint data for wasm-to-js functions,
// since there may be stack/register tagged values for large number
// conversions.
@@ -227,8 +231,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
+ // {AddCode} also makes the code available to the system by entering it into
+ // the code table and patching the jump table.
WasmCode* AddCode(uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
+ uint32_t tagged_parameter_slots,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table,
@@ -236,9 +242,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset, size_t code_comments_offset,
- size_t unpadded_binary_size,
+ uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset, size_t constant_pool_offset,
+ size_t code_comments_offset, size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -252,17 +258,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// jump table will be populated with that copy.
void SetLazyBuiltin(Handle<Code> code);
- // Initializes all runtime stubs by copying them over from the JS-allocated
- // heap into this native module. It must be called exactly once per native
- // module before adding other WasmCode so that runtime stub ids can be
- // resolved during relocation.
+ // Initializes all runtime stubs by setting up entry addresses in the runtime
+ // stub table. It must be called exactly once per native module before adding
+ // other WasmCode so that runtime stub ids can be resolved during relocation.
void SetRuntimeStubs(Isolate* isolate);
- // Makes the code available to the system (by entering it into the code table
- // and patching the jump table). Callers have to take care not to race with
- // threads executing the old code.
- void PublishCode(WasmCode* code);
-
// Switch a function to an interpreter entry wrapper. When adding interpreter
// wrappers, we do not insert them in the code_table, however, we let them
// self-identify as the {index} function.
@@ -280,11 +280,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
bool has_code(uint32_t index) const { return code(index) != nullptr; }
- WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const {
+ Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
DCHECK_LT(index, WasmCode::kRuntimeStubCount);
- WasmCode* code = runtime_stub_table_[index];
- DCHECK_NOT_NULL(code);
- return code;
+ Address entry_address = runtime_stub_entries_[index];
+ DCHECK_NE(kNullAddress, entry_address);
+ return entry_address;
}
Address jump_table_start() const {
@@ -326,8 +326,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
CompilationState* compilation_state() { return compilation_state_.get(); }
- // Create a {CompilationEnv} object for compilation. Only valid as long as
- // this {NativeModule} is alive.
+ // Create a {CompilationEnv} object for compilation. The caller has to ensure
+ // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
+ // being used.
CompilationEnv CreateCompilationEnv() const;
uint32_t num_functions() const {
@@ -341,7 +342,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
+ std::shared_ptr<const WasmModule> shared_module() const { return module_; }
size_t committed_code_space() const { return committed_code_space_.load(); }
+ WasmEngine* engine() const { return engine_; }
void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
@@ -355,15 +358,17 @@ class V8_EXPORT_PRIVATE NativeModule final {
const WasmFeatures& enabled_features() const { return enabled_features_; }
+ const char* GetRuntimeStubName(Address runtime_stub_entry) const;
+
private:
friend class WasmCode;
friend class WasmCodeManager;
friend class NativeModuleModificationScope;
- NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
+ NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
bool can_request_more, VirtualMemory code_space,
- WasmCodeManager* code_manager,
- std::shared_ptr<const WasmModule> module);
+ std::shared_ptr<const WasmModule> module,
+ std::shared_ptr<Counters> async_counters);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
@@ -375,7 +380,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// code is obtained (CodeDesc vs, as a point in time, Code), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
- uint32_t stack_slots, size_t safepoint_table_offset,
+ uint32_t stack_slots, uint32_t tagged_parameter_slots,
+ size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
size_t code_comments_offset,
@@ -385,7 +391,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<const byte> source_position_table,
WasmCode::Kind, WasmCode::Tier);
- WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
+ WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
// Hold the {allocation_mutex_} when calling this method.
void InstallCode(WasmCode* code);
@@ -423,15 +429,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to be consistent across asynchronous compilations later.
const WasmFeatures enabled_features_;
- // TODO(clemensh): Make this a unique_ptr (requires refactoring
- // AsyncCompileJob).
+ // The decoded module, stored in a shared_ptr such that background compile
+ // tasks can keep this alive.
std::shared_ptr<const WasmModule> module_;
// Wire bytes, held in a shared_ptr so they can be kept alive by the
// {WireBytesStorage}, held by background compile tasks.
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
- WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
+ // Contains entry points for runtime stub calls via {WASM_STUB_CALL}.
+ Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress};
+
+ // Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
+ WasmCode* runtime_stub_table_ = nullptr;
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
@@ -450,9 +460,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
- // Holds all allocated code objects, is maintained to be in ascending order
- // according to the codes instruction start address to allow lookups.
- std::vector<std::unique_ptr<WasmCode>> owned_code_;
+ // Holds all allocated code objects. Mutable because it might get sorted in
+ // {Lookup()}.
+ mutable std::vector<std::unique_ptr<WasmCode>> owned_code_;
+
+ // Keep track of the portion of {owned_code_} that is sorted.
+ // Entries [0, owned_code_sorted_portion_) are known to be sorted.
+ // Mutable because it might get modified in {Lookup()}.
+ mutable size_t owned_code_sorted_portion_ = 0;
std::unique_ptr<WasmCode* []> code_table_;
@@ -467,7 +482,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- WasmCodeManager* const code_manager_;
+ WasmEngine* const engine_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
@@ -482,36 +497,24 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed);
- // Create a new NativeModule. The caller is responsible for its
- // lifetime. The native module will be given some memory for code,
- // which will be page size aligned. The size of the initial memory
- // is determined with a heuristic based on the total size of wasm
- // code. The native module may later request more memory.
- // TODO(titzer): isolate is only required here for CompilationState.
- std::unique_ptr<NativeModule> NewNativeModule(
- Isolate* isolate, const WasmFeatures& enabled_features,
- size_t code_size_estimate, bool can_request_more,
- std::shared_ptr<const WasmModule> module);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
size_t remaining_uncommitted_code_space() const;
- // Add a sample of all module sizes.
- void SampleModuleSizes(Isolate* isolate) const;
-
void SetMaxCommittedMemoryForTesting(size_t limit);
- // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
- // bias samples towards apps with high memory pressure. We should switch to
- // using sampling based on regular intervals independent of the GC.
- static void InstallSamplingGCCallback(Isolate* isolate);
-
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
friend class NativeModule;
+ friend class WasmEngine;
+
+ std::unique_ptr<NativeModule> NewNativeModule(
+ WasmEngine* engine, Isolate* isolate,
+ const WasmFeatures& enabled_features, size_t code_size_estimate,
+ bool can_request_more, std::shared_ptr<const WasmModule> module);
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
void* hint = nullptr);
@@ -521,8 +524,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// There's no separate Uncommit.
void FreeNativeModule(NativeModule*);
+
void AssignRanges(Address start, Address end, NativeModule*);
- void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
@@ -537,7 +540,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Protected by {native_modules_mutex_}:
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
- std::unordered_set<NativeModule*> native_modules_;
// End of fields protected by {native_modules_mutex_}.
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 668b08eba9..ba42cef4f5 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -71,8 +71,8 @@ enum SectionCode : int8_t {
kElementSectionCode = 9, // Elements section
kCodeSectionCode = 10, // Function code
kDataSectionCode = 11, // Data segments
- kExceptionSectionCode = 12, // Exception section
- kDataCountSectionCode = 13, // Number of data segments
+ kDataCountSectionCode = 12, // Number of data segments
+ kExceptionSectionCode = 13, // Exception section
// The following sections are custom sections, and are identified using a
// string rather than an integer. Their enumeration values are not guaranteed
@@ -83,7 +83,7 @@ enum SectionCode : int8_t {
// Helper values
kFirstSectionInModule = kTypeSectionCode,
kLastKnownModuleSection = kSourceMappingURLSectionCode,
- kFirstUnorderedSection = kExceptionSectionCode,
+ kFirstUnorderedSection = kDataCountSectionCode,
};
// Binary encoding of name section kinds.
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 98619b5c14..8420e63eb6 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -215,16 +215,20 @@ class InterpreterHandle {
WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
Handle<Object> exception =
isolate_->factory()->NewWasmRuntimeError(message_id);
- isolate_->Throw(*exception);
- // Handle this exception. Return without trying to read back the
- // return value.
- auto result = thread->HandleException(isolate_);
- return result == WasmInterpreter::Thread::HANDLED;
- } break;
+ auto result = thread->RaiseException(isolate_, exception);
+ if (result == WasmInterpreter::Thread::HANDLED) break;
+ // If no local handler was found, we fall-thru to {STOPPED}.
+ DCHECK_EQ(WasmInterpreter::State::STOPPED, thread->state());
+ V8_FALLTHROUGH;
+ }
case WasmInterpreter::State::STOPPED:
- // An exception happened, and the current activation was unwound.
+ // An exception happened, and the current activation was unwound
+ // without hitting a local exception handler. All that remains to be
+ // done is finish the activation and let the exception propagate.
DCHECK_EQ(thread->ActivationFrameBase(activation_id),
thread->GetFrameCount());
+ DCHECK(isolate_->has_pending_exception());
+ FinishActivation(frame_pointer, activation_id);
return false;
// RUNNING should never occur here.
case WasmInterpreter::State::RUNNING:
@@ -379,26 +383,6 @@ class InterpreterHandle {
return thread->GetFrame(frame_range.first + idx);
}
- void Unwind(Address frame_pointer) {
- // Find the current activation.
- DCHECK_EQ(1, activations_.count(frame_pointer));
- // Activations must be properly stacked:
- DCHECK_EQ(activations_.size() - 1, activations_[frame_pointer]);
- uint32_t activation_id = static_cast<uint32_t>(activations_.size() - 1);
-
- // Unwind the frames of the current activation if not already unwound.
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
- if (static_cast<uint32_t>(thread->GetFrameCount()) >
- thread->ActivationFrameBase(activation_id)) {
- using ExceptionResult = WasmInterpreter::Thread::ExceptionHandlingResult;
- ExceptionResult result = thread->HandleException(isolate_);
- // TODO(wasm): Handle exceptions caught in wasm land.
- CHECK_EQ(ExceptionResult::UNWOUND, result);
- }
-
- FinishActivation(frame_pointer, activation_id);
- }
-
uint64_t NumInterpretedCalls() {
DCHECK_EQ(1, interpreter()->GetThreadCount());
return interpreter()->GetThread(0)->NumInterpretedCalls();
@@ -483,38 +467,6 @@ class InterpreterHandle {
}
return local_scope_object;
}
-
- Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
- Handle<WasmDebugInfo> debug_info) {
- auto frame = GetInterpretedFrame(frame_pointer, frame_index);
-
- Handle<FixedArray> global_scope =
- isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
- global_scope->set(ScopeIterator::kScopeDetailsTypeIndex,
- Smi::FromInt(ScopeIterator::ScopeTypeGlobal));
- Handle<JSObject> global_scope_object =
- GetGlobalScopeObject(frame.get(), debug_info);
- global_scope->set(ScopeIterator::kScopeDetailsObjectIndex,
- *global_scope_object);
-
- Handle<FixedArray> local_scope =
- isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
- local_scope->set(ScopeIterator::kScopeDetailsTypeIndex,
- Smi::FromInt(ScopeIterator::ScopeTypeLocal));
- Handle<JSObject> local_scope_object =
- GetLocalScopeObject(frame.get(), debug_info);
- local_scope->set(ScopeIterator::kScopeDetailsObjectIndex,
- *local_scope_object);
-
- Handle<JSArray> global_jsarr =
- isolate_->factory()->NewJSArrayWithElements(global_scope);
- Handle<JSArray> local_jsarr =
- isolate_->factory()->NewJSArrayWithElements(local_scope);
- Handle<FixedArray> all_scopes = isolate_->factory()->NewFixedArray(2);
- all_scopes->set(0, *global_jsarr);
- all_scopes->set(1, *local_jsarr);
- return isolate_->factory()->NewJSArrayWithElements(all_scopes);
- }
};
} // namespace
@@ -664,23 +616,12 @@ wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
return GetInterpreterHandle(*this)->GetInterpretedFrame(frame_pointer, idx);
}
-void WasmDebugInfo::Unwind(Address frame_pointer) {
- return GetInterpreterHandle(*this)->Unwind(frame_pointer);
-}
-
uint64_t WasmDebugInfo::NumInterpretedCalls() {
auto* handle = GetInterpreterHandleOrNull(*this);
return handle ? handle->NumInterpretedCalls() : 0;
}
// static
-Handle<JSObject> WasmDebugInfo::GetScopeDetails(
- Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- auto* interp_handle = GetInterpreterHandle(*debug_info);
- return interp_handle->GetScopeDetails(frame_pointer, frame_index, debug_info);
-}
-
-// static
Handle<JSObject> WasmDebugInfo::GetGlobalScopeObject(
Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
auto* interp_handle = GetInterpreterHandle(*debug_info);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index d948157a12..8a21252ddf 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -6,6 +6,7 @@
#include "src/code-tracer.h"
#include "src/compilation-statistics.h"
+#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
@@ -21,14 +22,100 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace {
+class LogCodesTask : public Task {
+ public:
+ LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate)
+ : mutex_(mutex), task_slot_(task_slot), isolate_(isolate) {
+ DCHECK_NOT_NULL(task_slot);
+ DCHECK_NOT_NULL(isolate);
+ }
+
+ ~LogCodesTask() {
+ // If the platform deletes this task before executing it, we also deregister
+ // it to avoid use-after-free from still-running background threads.
+ if (!cancelled()) DeregisterTask();
+ }
+
+ // Hold the {mutex_} when calling this method.
+ void AddCode(WasmCode* code) { code_to_log_.push_back(code); }
+
+ void Run() override {
+ if (cancelled()) return;
+ DeregisterTask();
+ // If by now we should not log code any more, do not log it.
+ if (!WasmCode::ShouldBeLogged(isolate_)) return;
+ for (WasmCode* code : code_to_log_) {
+ code->LogCode(isolate_);
+ }
+ }
+
+ void Cancel() {
+ // Cancel will only be called on Isolate shutdown, which happens on the
+ // Isolate's foreground thread. Thus no synchronization needed.
+ isolate_ = nullptr;
+ }
+
+ bool cancelled() const { return isolate_ == nullptr; }
+
+ void DeregisterTask() {
+ // The task will only be deregistered from the foreground thread (executing
+ // this task or calling its destructor), thus we do not need synchronization
+ // on this field access.
+ if (task_slot_ == nullptr) return; // already deregistered.
+ // Remove this task from the {IsolateInfo} in the engine. The next
+ // logging request will allocate and schedule a new task.
+ base::MutexGuard guard(mutex_);
+ DCHECK_EQ(this, *task_slot_);
+ *task_slot_ = nullptr;
+ task_slot_ = nullptr;
+ }
+
+ private:
+ // The mutex of the WasmEngine.
+ base::Mutex* const mutex_;
+ // The slot in the WasmEngine where this LogCodesTask is stored. This is
+ // cleared by this task before execution or on task destruction.
+ LogCodesTask** task_slot_;
+ Isolate* isolate_;
+ std::vector<WasmCode*> code_to_log_;
+};
+} // namespace
+
+struct WasmEngine::IsolateInfo {
+ explicit IsolateInfo(Isolate* isolate)
+ : log_codes(WasmCode::ShouldBeLogged(isolate)) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Platform* platform = V8::GetCurrentPlatform();
+ foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
+ }
+
+ // All native modules that are being used by this Isolate (currently only
+ // grows, never shrinks).
+ std::set<NativeModule*> native_modules;
+
+ // Caches whether code needs to be logged on this isolate.
+ bool log_codes;
+
+ // The currently scheduled LogCodesTask.
+ LogCodesTask* log_codes_task = nullptr;
+
+ // The foreground task runner of the isolate (can be called from background).
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner;
+};
+
WasmEngine::WasmEngine()
: code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
+ // Synchronize on all background compile tasks.
+ background_compile_task_manager_.CancelAndWait();
// All AsyncCompileJobs have been canceled.
- DCHECK(jobs_.empty());
+ DCHECK(async_compile_jobs_.empty());
// All Isolates have been deregistered.
DCHECK(isolates_.empty());
+ // All NativeModules did die.
+ DCHECK(isolates_per_native_module_.empty());
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
@@ -94,7 +181,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), allocator());
if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result.error());
+ thrower->CompileFailed(result.error());
return {};
}
@@ -141,7 +228,7 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
void WasmEngine::AsyncInstantiate(
Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, "WebAssembly Instantiation");
+ ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
// TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke
@@ -244,16 +331,24 @@ std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
}
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
- Isolate* isolate, std::shared_ptr<NativeModule> shared_module) {
- ModuleWireBytes wire_bytes(shared_module->wire_bytes());
- const WasmModule* module = shared_module->module();
+ Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
+ NativeModule* native_module = shared_native_module.get();
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ const WasmModule* module = native_module->module();
Handle<Script> script =
CreateWasmScript(isolate, wire_bytes, module->source_map_url);
- size_t code_size = shared_module->committed_code_space();
+ size_t code_size = native_module->committed_code_space();
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(shared_module), script, code_size);
- CompileJsToWasmWrappers(isolate, module_object->native_module()->module(),
+ isolate, std::move(shared_native_module), script, code_size);
+ CompileJsToWasmWrappers(isolate, native_module->module(),
handle(module_object->export_wrappers(), isolate));
+ {
+ base::MutexGuard lock(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ isolates_[isolate]->native_modules.insert(native_module);
+ DCHECK_EQ(1, isolates_per_native_module_.count(native_module));
+ isolates_per_native_module_[native_module].insert(isolate);
+ }
return module_object;
}
@@ -287,39 +382,46 @@ AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
AsyncCompileJob* job =
new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
context, std::move(resolver));
- // Pass ownership to the unique_ptr in {jobs_}.
+ // Pass ownership to the unique_ptr in {async_compile_jobs_}.
base::MutexGuard guard(&mutex_);
- jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
+ async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
return job;
}
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
AsyncCompileJob* job) {
base::MutexGuard guard(&mutex_);
- auto item = jobs_.find(job);
- DCHECK(item != jobs_.end());
+ auto item = async_compile_jobs_.find(job);
+ DCHECK(item != async_compile_jobs_.end());
std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
- jobs_.erase(item);
+ async_compile_jobs_.erase(item);
return result;
}
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
- for (auto& entry : jobs_) {
+ for (auto& entry : async_compile_jobs_) {
if (entry.first->isolate() == isolate) return true;
}
return false;
}
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
- base::MutexGuard guard(&mutex_);
- DCHECK_EQ(1, isolates_.count(isolate));
- for (auto it = jobs_.begin(); it != jobs_.end();) {
- if (it->first->isolate() == isolate) {
- it = jobs_.erase(it);
- } else {
- ++it;
+ // Under the mutex get all jobs to delete. Then delete them without holding
+ // the mutex, such that deletion can reenter the WasmEngine.
+ std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ for (auto it = async_compile_jobs_.begin();
+ it != async_compile_jobs_.end();) {
+ if (it->first->isolate() != isolate) {
+ ++it;
+ continue;
+ }
+ jobs_to_delete.push_back(std::move(it->second));
+ it = async_compile_jobs_.erase(it);
}
}
}
@@ -327,19 +429,98 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
- isolates_.insert(isolate);
+ isolates_.emplace(isolate, base::make_unique<IsolateInfo>(isolate));
+
+ // Install sampling GC callback.
+ // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
+ // bias samples towards apps with high memory pressure. We should switch to
+ // using sampling based on regular intervals independent of the GC.
+ auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
+ v8::GCCallbackFlags flags, void* data) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ WasmEngine* engine = isolate->wasm_engine();
+ base::MutexGuard lock(&engine->mutex_);
+ DCHECK_EQ(1, engine->isolates_.count(isolate));
+ for (NativeModule* native_module :
+ engine->isolates_[isolate]->native_modules) {
+ int code_size =
+ static_cast<int>(native_module->committed_code_space() / MB);
+ isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
+ }
+ };
+ isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
+ nullptr);
}
void WasmEngine::RemoveIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
+ auto it = isolates_.find(isolate);
+ DCHECK_NE(isolates_.end(), it);
+ for (NativeModule* native_module : it->second->native_modules) {
+ DCHECK_EQ(1, isolates_per_native_module_[native_module].count(isolate));
+ isolates_per_native_module_[native_module].erase(isolate);
+ }
+ if (auto* task = it->second->log_codes_task) task->Cancel();
+ isolates_.erase(it);
+}
+
+void WasmEngine::LogCode(WasmCode* code) {
+ base::MutexGuard guard(&mutex_);
+ NativeModule* native_module = code->native_module();
+ DCHECK_EQ(1, isolates_per_native_module_.count(native_module));
+ for (Isolate* isolate : isolates_per_native_module_[native_module]) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ IsolateInfo* info = isolates_[isolate].get();
+ if (info->log_codes == false) continue;
+ if (info->log_codes_task == nullptr) {
+ auto new_task = base::make_unique<LogCodesTask>(
+ &mutex_, &info->log_codes_task, isolate);
+ info->log_codes_task = new_task.get();
+ info->foreground_task_runner->PostTask(std::move(new_task));
+ }
+ info->log_codes_task->AddCode(code);
+ }
+}
+
+void WasmEngine::EnableCodeLogging(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ auto it = isolates_.find(isolate);
+ DCHECK_NE(isolates_.end(), it);
+ it->second->log_codes = true;
+}
+
+std::unique_ptr<NativeModule> WasmEngine::NewNativeModule(
+ Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
+ bool can_request_more, std::shared_ptr<const WasmModule> module) {
+ std::unique_ptr<NativeModule> native_module =
+ code_manager_.NewNativeModule(this, isolate, enabled, code_size_estimate,
+ can_request_more, std::move(module));
+ base::MutexGuard lock(&mutex_);
+ isolates_per_native_module_[native_module.get()].insert(isolate);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_.erase(isolate);
+ isolates_[isolate]->native_modules.insert(native_module.get());
+ return native_module;
+}
+
+void WasmEngine::FreeNativeModule(NativeModule* native_module) {
+ {
+ base::MutexGuard guard(&mutex_);
+ auto it = isolates_per_native_module_.find(native_module);
+ DCHECK_NE(isolates_per_native_module_.end(), it);
+ for (Isolate* isolate : it->second) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module));
+ isolates_[isolate]->native_modules.erase(native_module);
+ }
+ isolates_per_native_module_.erase(it);
+ }
+ code_manager_.FreeNativeModule(native_module);
}
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
- GetSharedWasmEngine);
+ GetSharedWasmEngine)
} // namespace
@@ -367,6 +548,12 @@ uint32_t max_mem_pages() {
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
+// {max_table_init_entries} is declared in wasm-limits.h.
+uint32_t max_table_init_entries() {
+ return std::min(uint32_t{kV8MaxWasmTableInitEntries},
+ FLAG_wasm_max_table_size);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 4aa9331268..01c353e3c9 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -8,6 +8,7 @@
#include <memory>
#include <unordered_set>
+#include "src/cancelable-task.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-tier.h"
@@ -146,6 +147,35 @@ class V8_EXPORT_PRIVATE WasmEngine {
void AddIsolate(Isolate* isolate);
void RemoveIsolate(Isolate* isolate);
+ template <typename T, typename... Args>
+ std::unique_ptr<T> NewBackgroundCompileTask(Args&&... args) {
+ return base::make_unique<T>(&background_compile_task_manager_,
+ std::forward<Args>(args)...);
+ }
+
+ // Trigger code logging for this WasmCode in all Isolates which have access to
+ // the NativeModule containing this code. This method can be called from
+ // background threads.
+ void LogCode(WasmCode*);
+
+ // Enable code logging for the given Isolate. Initially, code logging is
+ // enabled if {WasmCode::ShouldBeLogged(Isolate*)} returns true during
+ // {AddIsolate}.
+ void EnableCodeLogging(Isolate*);
+
+ // Create a new NativeModule. The caller is responsible for its
+ // lifetime. The native module will be given some memory for code,
+ // which will be page size aligned. The size of the initial memory
+ // is determined with a heuristic based on the total size of wasm
+ // code. The native module may later request more memory.
+ // TODO(titzer): isolate is only required here for CompilationState.
+ std::unique_ptr<NativeModule> NewNativeModule(
+ Isolate* isolate, const WasmFeatures& enabled_features,
+ size_t code_size_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module);
+
+ void FreeNativeModule(NativeModule*);
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -155,6 +185,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
static std::shared_ptr<WasmEngine> GetWasmEngine();
private:
+ struct IsolateInfo;
+
AsyncCompileJob* CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length,
@@ -165,6 +197,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager code_manager_;
AccountingAllocator allocator_;
+ // Task manager managing all background compile jobs. Before shut down of the
+ // engine, they must all be finished because they access the allocator.
+ CancelableTaskManager background_compile_task_manager_;
+
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
@@ -174,13 +210,19 @@ class V8_EXPORT_PRIVATE WasmEngine {
// We use an AsyncCompileJob as the key for itself so that we can delete the
// job from the map when it is finished.
- std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_;
+ std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>>
+ async_compile_jobs_;
std::unique_ptr<CompilationStatistics> compilation_stats_;
std::unique_ptr<CodeTracer> code_tracer_;
- // Set of isolates which use this WasmEngine. Used for cross-isolate GCs.
- std::unordered_set<Isolate*> isolates_;
+ // Set of isolates which use this WasmEngine.
+ std::unordered_map<Isolate*, std::unique_ptr<IsolateInfo>> isolates_;
+
+ // Maps each NativeModule to the set of Isolates that have access to that
+ // NativeModule. The isolate sets currently only grow, they never shrink.
+ std::unordered_map<NativeModule*, std::unordered_set<Isolate*>>
+ isolates_per_native_module_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 9fc3b707c4..0dcd3edf70 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/base/bits.h"
+#include "src/base/ieee754.h"
#include "src/memcopy.h"
#include "src/utils.h"
#include "src/v8memory.h"
@@ -245,7 +246,7 @@ uint32_t word32_ror_wrapper(Address data) {
void float64_pow_wrapper(Address data) {
double x = ReadUnalignedValue<double>(data);
double y = ReadUnalignedValue<double>(data + sizeof(x));
- WriteUnalignedValue<double>(data, Pow(x, y));
+ WriteUnalignedValue<double>(data, base::ieee754::pow(x, y));
}
void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 711c747d8e..d32ac3a788 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -23,6 +23,8 @@
SEPARATOR \
V(bigint, "JS BigInt support", false) \
SEPARATOR \
- V(bulk_memory, "bulk memory opcodes", false)
+ V(bulk_memory, "bulk memory opcodes", false) \
+ SEPARATOR \
+ V(return_call, "return call opcodes", false)
#endif // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 8e75ad233f..aca754095a 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -638,6 +638,8 @@ const char* OpcodeName(uint32_t val) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
}
+constexpr uint32_t kCatchInArity = 1;
+
} // namespace
class SideTable;
@@ -754,6 +756,11 @@ class SideTable : public ZoneObject {
// bytecodes with their target, as well as determining whether the current
// bytecodes are within the true or false block of an else.
ZoneVector<Control> control_stack(&control_transfer_zone);
+ // It also maintains a stack of all nested {try} blocks to resolve local
+ // handler targets for potentially throwing operations. These exceptional
+ // control transfers are treated just like other branches in the resulting
+ // map. This stack contains indices into the above control stack.
+ ZoneVector<size_t> exception_stack(zone);
uint32_t stack_height = 0;
uint32_t func_arity =
static_cast<uint32_t>(code->function->sig->return_count());
@@ -770,6 +777,7 @@ class SideTable : public ZoneObject {
for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
+ uint32_t exceptional_stack_height = 0;
if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
bool unreachable = control_stack.back().unreachable;
if (unreachable) {
@@ -784,9 +792,20 @@ class SideTable : public ZoneObject {
DCHECK_GE(stack_height, stack_effect.first);
DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
stack_effect.first + stack_effect.second);
+ exceptional_stack_height = stack_height - stack_effect.first;
stack_height = stack_height - stack_effect.first + stack_effect.second;
if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
}
+ if (!exception_stack.empty() && WasmOpcodes::IsThrowingOpcode(opcode)) {
+ // Record exceptional control flow from potentially throwing opcodes to
+ // the local handler if one is present. The stack height at the throw
+ // point is assumed to have popped all operands and not pushed any yet.
+ DCHECK_GE(control_stack.size() - 1, exception_stack.back());
+ const Control* c = &control_stack[exception_stack.back()];
+ if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
+ TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(), OpcodeName(opcode),
+ static_cast<uint32_t>(c->pc - code->start));
+ }
switch (opcode) {
case kExprBlock:
case kExprLoop: {
@@ -839,6 +858,41 @@ class SideTable : public ZoneObject {
stack_height = c->end_label->target_stack_height;
break;
}
+ case kExprTry: {
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
+ if (imm.type == kWasmVar) {
+ imm.sig = module->signatures[imm.sig_index];
+ }
+ TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
+ imm.in_arity(), imm.out_arity());
+ CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
+ imm.out_arity());
+ CLabel* catch_label =
+ CLabel::New(&control_transfer_zone, stack_height, kCatchInArity);
+ control_stack.emplace_back(i.pc(), end_label, catch_label,
+ imm.out_arity());
+ exception_stack.push_back(control_stack.size() - 1);
+ copy_unreachable();
+ break;
+ }
+ case kExprCatch: {
+ DCHECK_EQ(control_stack.size() - 1, exception_stack.back());
+ Control* c = &control_stack.back();
+ exception_stack.pop_back();
+ copy_unreachable();
+ TRACE("control @%u: Catch\n", i.pc_offset());
+ if (!control_parent().unreachable) {
+ c->end_label->Ref(i.pc(), stack_height);
+ }
+ DCHECK_NOT_NULL(c->else_label);
+ c->else_label->Bind(i.pc() + 1);
+ c->else_label->Finish(&map_, code->orig_start);
+ c->else_label = nullptr;
+ DCHECK_GE(stack_height, c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height + kCatchInArity;
+ break;
+ }
case kExprEnd: {
Control* c = &control_stack.back();
TRACE("control @%u: End\n", i.pc_offset());
@@ -894,6 +948,11 @@ class SideTable : public ZoneObject {
DCHECK_EQ(func_arity, stack_height);
}
+ bool HasEntryAt(pc_t from) {
+ auto result = map_.find(from);
+ return result != map_.end();
+ }
+
ControlTransferEntry& Lookup(pc_t from) {
auto result = map_.find(from);
DCHECK(result != map_.end());
@@ -1023,7 +1082,9 @@ struct ExternalCallResult {
// The function was executed and returned normally.
EXTERNAL_RETURNED,
// The function was executed, threw an exception, and the stack was unwound.
- EXTERNAL_UNWOUND
+ EXTERNAL_UNWOUND,
+ // The function was executed and threw an exception that was locally caught.
+ EXTERNAL_CAUGHT
};
Type type;
// If type is INTERNAL, this field holds the function to call internally.
@@ -1206,25 +1267,53 @@ class ThreadImpl {
return activations_[id].fp;
}
+ WasmInterpreter::Thread::ExceptionHandlingResult RaiseException(
+ Isolate* isolate, Handle<Object> exception) {
+ DCHECK_EQ(WasmInterpreter::TRAPPED, state_);
+ isolate->Throw(*exception); // Will check that none is pending.
+ if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
+ DCHECK_EQ(WasmInterpreter::STOPPED, state_);
+ return WasmInterpreter::Thread::UNWOUND;
+ }
+ state_ = WasmInterpreter::PAUSED;
+ return WasmInterpreter::Thread::HANDLED;
+ }
+
+ private:
// Handle a thrown exception. Returns whether the exception was handled inside
// the current activation. Unwinds the interpreted stack accordingly.
WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
Isolate* isolate) {
DCHECK(isolate->has_pending_exception());
- // TODO(wasm): Add wasm exception handling (would return HANDLED).
- USE(isolate->pending_exception());
- TRACE("----- UNWIND -----\n");
DCHECK_LT(0, activations_.size());
Activation& act = activations_.back();
- DCHECK_LE(act.fp, frames_.size());
- frames_.resize(act.fp);
- DCHECK_LE(act.sp, StackHeight());
- sp_ = stack_.get() + act.sp;
+ while (frames_.size() > act.fp) {
+ Frame& frame = frames_.back();
+ InterpreterCode* code = frame.code;
+ if (code->side_table->HasEntryAt(frame.pc)) {
+ TRACE("----- HANDLE -----\n");
+ // TODO(mstarzinger): Push a reference to the pending exception instead
+ // of a bogus {int32_t(0)} value here once the interpreter supports it.
+ USE(isolate->pending_exception());
+ Push(WasmValue(int32_t{0}));
+ isolate->clear_pending_exception();
+ frame.pc += JumpToHandlerDelta(code, frame.pc);
+ TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
+ code->function->func_index, frame.pc);
+ return WasmInterpreter::Thread::HANDLED;
+ }
+ TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
+ code->function->func_index, frame.pc);
+ sp_ = stack_.get() + frame.sp;
+ frames_.pop_back();
+ }
+ TRACE("----- UNWIND -----\n");
+ DCHECK_EQ(act.fp, frames_.size());
+ DCHECK_EQ(act.sp, StackHeight());
state_ = WasmInterpreter::STOPPED;
return WasmInterpreter::Thread::UNWOUND;
}
- private:
// Entries on the stack of functions being evaluated.
struct Frame {
InterpreterCode* code;
@@ -1237,13 +1326,6 @@ class ThreadImpl {
sp_t llimit() { return plimit() + code->locals.type_list.size(); }
};
- struct Block {
- pc_t pc;
- sp_t sp;
- size_t fp;
- uint32_t arity;
- };
-
friend class InterpretedFrameImpl;
CodeMap* codemap_;
@@ -1272,17 +1354,24 @@ class ThreadImpl {
CommitPc(pc);
}
+ // Check if there is room for a function's activation.
+ void EnsureStackSpaceForCall(InterpreterCode* code) {
+ EnsureStackSpace(code->side_table->max_stack_height_ +
+ code->locals.type_list.size());
+ DCHECK_GE(StackHeight(), code->function->sig->parameter_count());
+ }
+
// Push a frame with arguments already on the stack.
void PushFrame(InterpreterCode* code) {
DCHECK_NOT_NULL(code);
DCHECK_NOT_NULL(code->side_table);
- EnsureStackSpace(code->side_table->max_stack_height_ +
- code->locals.type_list.size());
+ EnsureStackSpaceForCall(code);
++num_interpreted_calls_;
size_t arity = code->function->sig->parameter_count();
// The parameters will overlap the arguments already on the stack.
DCHECK_GE(StackHeight(), arity);
+
frames_.push_back({code, 0, StackHeight() - arity});
frames_.back().pc = InitLocals(code);
TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
@@ -1322,10 +1411,26 @@ class ThreadImpl {
return false;
}
+ void ReloadFromFrameOnException(Decoder* decoder, InterpreterCode** code,
+ pc_t* pc, pc_t* limit) {
+ Frame* top = &frames_.back();
+ *code = top->code;
+ *pc = top->pc;
+ *limit = top->code->end - top->code->start;
+ decoder->Reset(top->code->start, top->code->end);
+ }
+
int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
}
+ int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
+ ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
+ DoStackTransfer(sp_ - (control_transfer_entry.sp_diff + kCatchInArity),
+ control_transfer_entry.target_arity);
+ return control_transfer_entry.pc_diff;
+ }
+
int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
@@ -1386,6 +1491,41 @@ class ThreadImpl {
return true;
}
+ // Returns true if the tail call was successful, false if the stack check
+ // failed.
+ bool DoReturnCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
+ pc_t* limit) V8_WARN_UNUSED_RESULT {
+ DCHECK_NOT_NULL(target);
+ DCHECK_NOT_NULL(target->side_table);
+ EnsureStackSpaceForCall(target);
+
+ ++num_interpreted_calls_;
+
+ Frame* top = &frames_.back();
+
+ // Drop everything except current parameters.
+ WasmValue* sp_dest = stack_.get() + top->sp;
+ size_t arity = target->function->sig->parameter_count();
+
+ DoStackTransfer(sp_dest, arity);
+
+ *limit = target->end - target->start;
+ decoder->Reset(target->start, target->end);
+
+ // Rebuild current frame to look like a call to callee.
+ top->code = target;
+ top->pc = 0;
+ top->sp = StackHeight() - arity;
+ top->pc = InitLocals(target);
+
+ *pc = top->pc;
+
+ TRACE(" => ReturnCall #%zu (#%u @%zu)\n", frames_.size() - 1,
+ target->function->func_index, top->pc);
+
+ return true;
+ }
+
// Copies {arity} values on the top of the stack down the stack to {dest},
// dropping the values in-between.
void DoStackTransfer(WasmValue* dest, size_t arity) {
@@ -1522,17 +1662,42 @@ class ThreadImpl {
return false;
}
+ template <typename type, typename op_type, typename func>
+ op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
+ type old_val;
+ type new_val;
+ old_val = ReadUnalignedValue<type>(addr);
+ do {
+ new_val =
+ ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
+ } while (!(std::atomic_compare_exchange_strong(
+ reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
+ return static_cast<op_type>(ByteReverse<type>(old_val));
+ }
+
+ template <typename type>
+ type AdjustByteOrder(type param) {
+#if V8_TARGET_BIG_ENDIAN
+ return ByteReverse(param);
+#else
+ return param;
+#endif
+ }
+
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
InterpreterCode* code, pc_t pc, int& len) {
+#if V8_TARGET_BIG_ENDIAN
+ constexpr bool kBigEndian = true;
+#else
+ constexpr bool kBigEndian = false;
+#endif
WasmValue result;
switch (opcode) {
-// Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
-// on big endian and 64 bit atomics fail to compile.
-#if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
case kExpr##name: { \
type val; \
Address addr; \
+ op_type result; \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
&val)) { \
return false; \
@@ -1540,74 +1705,95 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = WasmValue(static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
- Push(result); \
+ if (kBigEndian) { \
+ auto oplambda = [](type a, type b) { return a op b; }; \
+ result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
+ } else { \
+ result = static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ } \
+ Push(WasmValue(result)); \
break; \
}
- ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
- atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
+ ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
+ ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
+ +);
+ ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
+ ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
+ ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
+ -);
+ ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
+ atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
+ ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
+ ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
+ ^);
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
+ =);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
+ =);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
- atomic_exchange);
- ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
- ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
- ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
- ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
- ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
- ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
- ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
- atomic_exchange);
+ atomic_exchange, =);
+ ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
+ ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
+ ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
+ +);
+ ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
+ +);
+ ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
+ ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
+ ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
+ -);
+ ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
+ -);
+ ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
+ atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
+ atomic_fetch_and, &);
+ ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
+ ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
+ ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
+ ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
+ ^);
+ ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
+ ^);
+ ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
+ =);
+ ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
+ =);
ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
- atomic_exchange);
+ atomic_exchange, =);
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
- atomic_exchange);
+ atomic_exchange, =);
#undef ATOMIC_BINOP_CASE
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
case kExpr##name: { \
- type val; \
- type val2; \
+ type old_val; \
+ type new_val; \
Address addr; \
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val, &val2)) { \
+ &old_val, &new_val)) { \
return false; \
} \
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
+ old_val = AdjustByteOrder<type>(old_val); \
+ new_val = AdjustByteOrder<type>(new_val); \
std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
- Push(WasmValue(static_cast<op_type>(val))); \
+ reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
+ Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
@@ -1634,8 +1820,8 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- result = WasmValue(static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
+ result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
Push(result); \
break; \
}
@@ -1658,7 +1844,8 @@ class ThreadImpl {
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
"Size mismatch for types std::atomic<" #type \
">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
+ AdjustByteOrder<type>(val)); \
break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
@@ -1669,7 +1856,6 @@ class ThreadImpl {
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
-#endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
default:
UNREACHABLE();
return false;
@@ -1900,15 +2086,18 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
- SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
+ SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
+ static_cast<uint32_t>(a) << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
static_cast<uint32_t>(a) >> imm.shift)
- SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
+ SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
+ static_cast<uint16_t>(a) << imm.shift)
SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
static_cast<uint16_t>(a) >> imm.shift)
- SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
+ SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
+ static_cast<uint8_t>(a) << imm.shift)
SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
static_cast<uint8_t>(a) >> imm.shift)
@@ -2080,6 +2269,87 @@ class ThreadImpl {
return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
}
+ void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint32_t value) {
+ encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
+ encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
+ }
+
+ void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
+ uint32_t* encoded_index, uint64_t value) {
+ EncodeI32ExceptionValue(encoded_values, encoded_index,
+ static_cast<uint32_t>(value >> 32));
+ EncodeI32ExceptionValue(encoded_values, encoded_index,
+ static_cast<uint32_t>(value));
+ }
+
+ // Allocate, initialize and throw a new exception. The exception values are
+ // being popped off the operand stack. Returns true if the exception is being
+ // handled locally by the interpreter, false otherwise (interpreter exits).
+ bool DoThrowException(const WasmException* exception,
+ uint32_t index) V8_WARN_UNUSED_RESULT {
+ Isolate* isolate = instance_object_->GetIsolate();
+ Handle<WasmExceptionTag> exception_tag(
+ WasmExceptionTag::cast(
+ instance_object_->exceptions_table()->get(index)),
+ isolate);
+ uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
+ Handle<Object> exception_object =
+ WasmExceptionPackage::New(isolate, exception_tag, encoded_size);
+ Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
+ WasmExceptionPackage::GetExceptionValues(isolate, exception_object));
+ // Encode the exception values on the operand stack into the exception
+ // package allocated above. This encoding has to be in sync with other
+ // backends so that exceptions can be passed between them.
+ const wasm::WasmExceptionSig* sig = exception->sig;
+ uint32_t encoded_index = 0;
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ WasmValue value = sp_[i - sig->parameter_count()];
+ switch (sig->GetParam(i)) {
+ case wasm::kWasmI32: {
+ uint32_t u32 = value.to_u32();
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
+ break;
+ }
+ case wasm::kWasmF32: {
+ uint32_t f32 = value.to_f32_boxed().get_bits();
+ EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
+ break;
+ }
+ case wasm::kWasmI64: {
+ uint64_t u64 = value.to_u64();
+ EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
+ break;
+ }
+ case wasm::kWasmF64: {
+ uint64_t f64 = value.to_f64_boxed().get_bits();
+ EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
+ break;
+ }
+ case wasm::kWasmAnyRef:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ DCHECK_EQ(encoded_size, encoded_index);
+ PopN(static_cast<int>(sig->parameter_count()));
+ // Now that the exception is ready, set it as pending.
+ isolate->Throw(*exception_object);
+ return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ }
+
+ // Throw a given existing exception. Returns true if the exception is being
+ // handled locally by the interpreter, false otherwise (interpreter exits).
+ bool DoRethrowException(WasmValue* exception) {
+ Isolate* isolate = instance_object_->GetIsolate();
+ // TODO(mstarzinger): Use the passed {exception} here once reference types
+ // as values on the operand stack are supported by the interpreter.
+ isolate->ReThrow(*isolate->factory()->undefined_value());
+ return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ }
+
void Execute(InterpreterCode* code, pc_t pc, int max) {
DCHECK_NOT_NULL(code->side_table);
DCHECK(!frames_.empty());
@@ -2152,13 +2422,9 @@ class ThreadImpl {
switch (orig) {
case kExprNop:
break;
- case kExprBlock: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
- &decoder, code->at(pc));
- len = 1 + imm.length;
- break;
- }
- case kExprLoop: {
+ case kExprBlock:
+ case kExprLoop:
+ case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
&decoder, code->at(pc));
len = 1 + imm.length;
@@ -2179,11 +2445,28 @@ class ThreadImpl {
}
break;
}
- case kExprElse: {
+ case kExprElse:
+ case kExprCatch: {
len = LookupTargetDelta(code, pc);
TRACE(" end => @%zu\n", pc + len);
break;
}
+ case kExprThrow: {
+ ExceptionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
+ CommitPc(pc); // Needed for local unwinding.
+ const WasmException* exception = &module()->exceptions[imm.index];
+ if (!DoThrowException(exception, imm.index)) return;
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue; // Do not bump pc.
+ }
+ case kExprRethrow: {
+ WasmValue ex = Pop();
+ CommitPc(pc); // Needed for local unwinding.
+ if (!DoRethrowException(&ex)) return;
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue; // Do not bump pc.
+ }
case kExprSelect: {
WasmValue cond = Pop();
WasmValue fval = Pop();
@@ -2231,7 +2514,7 @@ class ThreadImpl {
size_t arity = code->function->sig->return_count();
if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
PAUSE_IF_BREAK_FLAG(AfterReturn);
- continue;
+ continue; // Do not bump pc.
}
case kExprUnreachable: {
return DoTrap(kTrapUnreachable, pc);
@@ -2299,8 +2582,7 @@ class ThreadImpl {
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
- target = result.interpreter_code;
- DCHECK(!target->function->imported);
+ DCHECK(!result.interpreter_code->function->imported);
break;
case ExternalCallResult::INVALID_FUNC:
case ExternalCallResult::SIGNATURE_MISMATCH:
@@ -2312,6 +2594,9 @@ class ThreadImpl {
break;
case ExternalCallResult::EXTERNAL_UNWOUND:
return;
+ case ExternalCallResult::EXTERNAL_CAUGHT:
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue; // Do not bump pc.
}
if (result.type != ExternalCallResult::INTERNAL) break;
}
@@ -2319,8 +2604,9 @@ class ThreadImpl {
if (!DoCall(&decoder, target, &pc, &limit)) return;
code = target;
PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // don't bump pc
+ continue; // Do not bump pc.
} break;
+
case kExprCallIndirect: {
CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
@@ -2337,7 +2623,7 @@ class ThreadImpl {
return;
code = result.interpreter_code;
PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // don't bump pc
+ continue; // Do not bump pc.
case ExternalCallResult::INVALID_FUNC:
return DoTrap(kTrapFuncInvalid, pc);
case ExternalCallResult::SIGNATURE_MISMATCH:
@@ -2348,8 +2634,96 @@ class ThreadImpl {
break;
case ExternalCallResult::EXTERNAL_UNWOUND:
return;
+ case ExternalCallResult::EXTERNAL_CAUGHT:
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue; // Do not bump pc.
+ }
+ } break;
+
+ case kExprReturnCall: {
+ CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
+ InterpreterCode* target = codemap()->GetCode(imm.index);
+
+ if (!target->function->imported) {
+ // Enter internal found function.
+ if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
+ code = target;
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+
+ continue; // Do not bump pc.
+ }
+ // Function is imported.
+ CommitPc(pc);
+ ExternalCallResult result =
+ CallImportedFunction(target->function->func_index);
+ switch (result.type) {
+ case ExternalCallResult::INTERNAL:
+ // Cannot import internal functions.
+ case ExternalCallResult::INVALID_FUNC:
+ case ExternalCallResult::SIGNATURE_MISMATCH:
+ // Direct calls are checked statically.
+ UNREACHABLE();
+ case ExternalCallResult::EXTERNAL_RETURNED:
+ len = 1 + imm.length;
+ break;
+ case ExternalCallResult::EXTERNAL_UNWOUND:
+ return;
+ case ExternalCallResult::EXTERNAL_CAUGHT:
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue;
+ }
+ size_t arity = code->function->sig->return_count();
+ if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
+ PAUSE_IF_BREAK_FLAG(AfterReturn);
+ continue;
+ } break;
+
+ case kExprReturnCallIndirect: {
+ CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
+ uint32_t entry_index = Pop().to<uint32_t>();
+ // Assume only one table for now.
+ DCHECK_LE(module()->tables.size(), 1u);
+ CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
+
+ // TODO(wasm): Calling functions needs some refactoring to avoid
+ // multi-exit code like this.
+ ExternalCallResult result =
+ CallIndirectFunction(0, entry_index, imm.sig_index);
+ switch (result.type) {
+ case ExternalCallResult::INTERNAL: {
+ InterpreterCode* target = result.interpreter_code;
+
+ DCHECK(!target->function->imported);
+
+ // The function belongs to this instance. Enter it directly.
+ if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
+ code = result.interpreter_code;
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+ continue; // Do not bump pc.
+ }
+ case ExternalCallResult::INVALID_FUNC:
+ return DoTrap(kTrapFuncInvalid, pc);
+ case ExternalCallResult::SIGNATURE_MISMATCH:
+ return DoTrap(kTrapFuncSigMismatch, pc);
+ case ExternalCallResult::EXTERNAL_RETURNED: {
+ len = 1 + imm.length;
+
+ size_t arity = code->function->sig->return_count();
+ if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+ break;
+ }
+ case ExternalCallResult::EXTERNAL_UNWOUND:
+ return;
+
+ case ExternalCallResult::EXTERNAL_CAUGHT:
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ break;
}
} break;
+
case kExprGetGlobal: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
@@ -2604,9 +2978,9 @@ class ThreadImpl {
if (pc == limit) {
// Fell off end of code; do an implicit return.
TRACE("@%-3zu: ImplicitReturn\n", pc);
- if (!DoReturn(&decoder, &code, &pc, &limit,
- code->function->sig->return_count()))
- return;
+ size_t arity = code->function->sig->return_count();
+ DCHECK_EQ(StackHeight() - arity, frames_.back().llimit());
+ if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
PAUSE_IF_BREAK_FLAG(AfterReturn);
}
#undef PAUSE_IF_BREAK_FLAG
@@ -2648,7 +3022,9 @@ class ThreadImpl {
for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
DCHECK_NE(kWasmStmt, val->type());
}
- memcpy(sp_, vals, arity * sizeof(*sp_));
+ if (arity > 0) {
+ memcpy(sp_, vals, arity * sizeof(*sp_));
+ }
sp_ += arity;
}
@@ -2659,7 +3035,9 @@ class ThreadImpl {
base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
- memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
+ if (old_size > 0) {
+ memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
+ }
sp_ = new_stack.get() + (sp_ - stack_.get());
stack_ = std::move(new_stack);
stack_limit_ = stack_.get() + new_size;
@@ -2707,21 +3085,24 @@ class ThreadImpl {
}
ExternalCallResult TryHandleException(Isolate* isolate) {
+ DCHECK(isolate->has_pending_exception()); // Assume exceptional return.
if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
return {ExternalCallResult::EXTERNAL_UNWOUND};
}
- return {ExternalCallResult::EXTERNAL_RETURNED};
+ return {ExternalCallResult::EXTERNAL_CAUGHT};
}
ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
Handle<Object> object_ref,
const WasmCode* code,
FunctionSig* sig) {
+ int num_args = static_cast<int>(sig->parameter_count());
wasm::WasmFeatures enabled_features =
wasm::WasmFeaturesFromIsolate(isolate);
if (code->kind() == WasmCode::kWasmToJsWrapper &&
!IsJSCompatibleSignature(sig, enabled_features.bigint)) {
+ sp_ -= num_args; // Pop arguments before throwing.
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
return TryHandleException(isolate);
@@ -2736,7 +3117,6 @@ class ThreadImpl {
// Copy the arguments to one buffer.
// TODO(clemensh): Introduce a helper for all argument buffer
// con-/destruction.
- int num_args = static_cast<int>(sig->parameter_count());
std::vector<uint8_t> arg_buffer(num_args * 8);
size_t offset = 0;
WasmValue* wasm_args = sp_ - num_args;
@@ -2797,6 +3177,9 @@ class ThreadImpl {
TRACE(" => External wasm function returned%s\n",
maybe_retval.is_null() ? " with exception" : "");
+ // Pop arguments off the stack.
+ sp_ -= num_args;
+
if (maybe_retval.is_null()) {
// JSEntry may throw a stack overflow before we actually get to wasm code
// or back to the interpreter, meaning the thread-in-wasm flag won't be
@@ -2809,8 +3192,6 @@ class ThreadImpl {
trap_handler::ClearThreadInWasm();
- // Pop arguments off the stack.
- sp_ -= num_args;
// Push return values.
if (sig->return_count() > 0) {
// TODO(wasm): Handle multiple returns.
@@ -2883,6 +3264,9 @@ class ThreadImpl {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
}
+ if (code->function->imported) {
+ return CallImportedFunction(code->function->func_index);
+ }
return {ExternalCallResult::INTERNAL, code};
}
@@ -3030,8 +3414,9 @@ WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
WasmInterpreter::Thread::ExceptionHandlingResult
-WasmInterpreter::Thread::HandleException(Isolate* isolate) {
- return ToImpl(this)->HandleException(isolate);
+WasmInterpreter::Thread::RaiseException(Isolate* isolate,
+ Handle<Object> exception) {
+ return ToImpl(this)->RaiseException(isolate, exception);
}
pc_t WasmInterpreter::Thread::GetBreakpointPc() {
return ToImpl(this)->GetBreakpointPc();
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 31021a5d4e..1de6a491b6 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -85,13 +85,14 @@ struct InterpretedFrameDeleter {
class V8_EXPORT_PRIVATE WasmInterpreter {
public:
// State machine for a Thread:
- // +---------Run()/Step()--------+
- // V |
- // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED
- // ^ | | | | /
- // +- HandleException -+ | | +--- Breakpoint ---+
- // | |
- // | +---------- Trap --------------> TRAPPED
+ // +----------------------------------------------------------+
+ // | +--------Run()/Step()---------+ |
+ // V V | |
+ // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED <--+
+ // ^ | | | | / |
+ // +--- Exception ---+ | | +--- Breakpoint ---+ RaiseException() <--+
+ // | | |
+ // | +---------- Trap --------------> TRAPPED --------+
// +----------- Finish -------------> FINISHED
enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
@@ -121,9 +122,12 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
State Step() { return Run(1); }
void Pause();
void Reset();
- // Handle the pending exception in the passed isolate. Unwind the stack
- // accordingly. Return whether the exception was handled inside wasm.
- ExceptionHandlingResult HandleException(Isolate* isolate);
+
+ // Raise an exception in the current activation and unwind the stack
+ // accordingly. Return whether the exception was handled inside wasm:
+ // - HANDLED: Activation at handler position and in {PAUSED} state.
+ // - UNWOUND: Frames unwound, exception pending, and in {STOPPED} state.
+ ExceptionHandlingResult RaiseException(Isolate*, Handle<Object> exception);
// Stack inspection and modification.
pc_t GetBreakpointPc();
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 4ad7d49076..302002b7c9 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -19,7 +19,9 @@
#include "src/objects/js-promise-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
+#include "src/task-utils.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/v8.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -69,15 +71,24 @@ class WasmStreaming::WasmStreamingImpl {
void SetClient(std::shared_ptr<Client> client) {
// There are no other event notifications so just pass client to decoder.
- // Wrap the client with a callback here so we can also wrap the result.
+ // Wrap the client with a callback to trigger the callback in a new
+ // foreground task.
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ v8::Platform* platform = i::V8::GetCurrentPlatform();
+ std::shared_ptr<TaskRunner> foreground_task_runner =
+ platform->GetForegroundTaskRunner(isolate_);
streaming_decoder_->SetModuleCompiledCallback(
- [client](const std::shared_ptr<i::wasm::NativeModule>& native_module) {
- client->OnModuleCompiled(Utils::Convert(native_module));
+ [client, i_isolate, foreground_task_runner](
+ const std::shared_ptr<i::wasm::NativeModule>& native_module) {
+ foreground_task_runner->PostTask(
+ i::MakeCancelableTask(i_isolate, [client, native_module] {
+ client->OnModuleCompiled(Utils::Convert(native_module));
+ }));
});
}
private:
- Isolate* isolate_ = nullptr;
+ Isolate* const isolate_;
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
std::shared_ptr<internal::wasm::CompilationResultResolver> resolver_;
};
@@ -689,6 +700,11 @@ void WebAssemblyModuleCustomSections(
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
+ if (args[1]->IsUndefined()) {
+ thrower.TypeError("Argument 1 is required");
+ return;
+ }
+
i::MaybeHandle<i::Object> maybe_name =
i::Object::ToString(i_isolate, Utils::OpenHandle(*args[1]));
i::Handle<i::Object> name;
@@ -707,7 +723,7 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::MaybeHandle<i::Object> instance_object;
{
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
// TODO(ahaas): These checks on the module should not be necessary here They
// are just a workaround for https://crbug.com/837417.
@@ -846,7 +862,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
@@ -1022,14 +1038,14 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
int64_t initial = 0;
if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
- i::FLAG_wasm_max_table_size)) {
+ i::wasm::max_table_init_entries())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "maximum"), &maximum, initial,
- i::wasm::kSpecMaxWasmTableSize)) {
+ i::wasm::max_table_init_entries())) {
return;
}
@@ -1076,41 +1092,40 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (enabled_features.threads) {
// Shared property of descriptor
Local<String> shared_key = v8_str(isolate, "shared");
- Maybe<bool> has_shared = descriptor->Has(context, shared_key);
- if (!has_shared.IsNothing() && has_shared.FromJust()) {
- v8::MaybeLocal<v8::Value> maybe = descriptor->Get(context, shared_key);
- v8::Local<v8::Value> value;
- if (maybe.ToLocal(&value)) {
- is_shared_memory = value->BooleanValue(isolate);
- }
+ v8::MaybeLocal<v8::Value> maybe_value =
+ descriptor->Get(context, shared_key);
+ v8::Local<v8::Value> value;
+ if (maybe_value.ToLocal(&value)) {
+ is_shared_memory = value->BooleanValue(isolate);
}
// Throw TypeError if shared is true, and the descriptor has no "maximum"
if (is_shared_memory && maximum == -1) {
thrower.TypeError(
"If shared is true, maximum property should be defined.");
+ return;
}
}
- i::SharedFlag shared_flag =
- is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared;
- i::Handle<i::JSArrayBuffer> buffer;
- size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
- static_cast<size_t>(initial);
- if (!i::wasm::NewArrayBuffer(i_isolate, size, shared_flag)
- .ToHandle(&buffer)) {
+ i::Handle<i::JSObject> memory_obj;
+ if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial),
+ static_cast<uint32_t>(maximum),
+ is_shared_memory)
+ .ToHandle(&memory_obj)) {
thrower.RangeError("could not allocate memory");
return;
}
- if (buffer->is_shared()) {
+ if (is_shared_memory) {
+ i::Handle<i::JSArrayBuffer> buffer(
+ i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
+ i_isolate);
Maybe<bool> result =
buffer->SetIntegrityLevel(buffer, i::FROZEN, i::kDontThrow);
if (!result.FromJust()) {
thrower.TypeError(
"Status of setting SetIntegrityLevel of buffer is false.");
+ return;
}
}
- i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
- i_isolate, buffer, static_cast<uint32_t>(maximum));
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
@@ -1314,7 +1329,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
+ i::Handle<i::FixedArray> old_array(receiver->elements(), i_isolate);
uint32_t old_size = static_cast<uint32_t>(old_array->length());
uint64_t max_size64 = receiver->maximum_length()->Number();
@@ -1342,7 +1357,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Object null = i::ReadOnlyRoots(i_isolate).null_value();
for (uint32_t i = old_size; i < new_size; ++i) new_array->set(i, null);
- receiver->set_functions(*new_array);
+ receiver->set_elements(*new_array);
}
// TODO(gdeepti): use weak links for instances
@@ -1358,7 +1373,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
+ i::Handle<i::FixedArray> array(receiver->elements(), i_isolate);
uint32_t index;
if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
@@ -1398,7 +1413,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (index >= static_cast<uint64_t>(receiver->functions()->length())) {
+ if (index >= static_cast<uint64_t>(receiver->elements()->length())) {
thrower.RangeError("index out of bounds");
return;
}
@@ -1545,7 +1560,7 @@ void WebAssemblyGlobalSetValue(
return;
}
if (args[0]->IsUndefined()) {
- thrower.TypeError("Argument 0: must be a value");
+ thrower.TypeError("Argument 0 is required");
return;
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 0fed6e9628..c7c95aca26 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -38,13 +38,16 @@ constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
constexpr size_t kV8MaxWasmFunctionReturns = 1;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
-constexpr size_t kV8MaxWasmTableEntries = 10000000;
+constexpr size_t kV8MaxWasmTableInitEntries = 10000000;
constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1;
static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
"v8 should not be more permissive than the spec");
-constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
+static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
+ "v8 should not exceed WebAssembly's non-web embedding limits");
+static_assert(kV8MaxWasmTableInitEntries <= kV8MaxWasmTableSize,
+ "JS-API should not exceed v8's limit");
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
@@ -55,6 +58,7 @@ constexpr uint64_t kWasmMaxHeapOffset =
// TODO(wasm): Make this size_t for wasm64. Currently the --wasm-max-mem-pages
// flag is only uint32_t.
uint32_t max_mem_pages();
+uint32_t max_table_init_entries();
inline uint64_t max_mem_bytes() {
return uint64_t{max_mem_pages()} * kWasmPageSize;
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index b4aee28d78..78ea260f64 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -27,8 +27,19 @@ void AddAllocationStatusSample(Isolate* isolate,
static_cast<int>(status));
}
+size_t GetAllocationLength(uint32_t size, bool require_full_guard_regions) {
+ if (require_full_guard_regions) {
+ return RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize());
+ } else {
+ return RoundUp(
+ base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
+ kWasmPageSize);
+ }
+}
+
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
- size_t size, void** allocation_base,
+ size_t size, size_t max_size,
+ void** allocation_base,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_64_BIT
@@ -42,6 +53,10 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
static constexpr int kAllocationRetries = 2;
+ // TODO(7881): do not use static_cast<uint32_t>() here
+ uint32_t reservation_size =
+ static_cast<uint32_t>((max_size > size) ? max_size : size);
+ // TODO(8898): Cleanup the allocation retry flow
bool did_retry = false;
for (int trial = 0;; ++trial) {
// For guard regions, we always allocate the largest possible offset into
@@ -50,13 +65,8 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
//
// To protect against 32-bit integer overflow issues, we also protect the
// 2GiB before the valid part of the memory buffer.
- // TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
- require_full_guard_regions
- ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
- : RoundUp(base::bits::RoundUpToPowerOfTwo32(
- static_cast<uint32_t>(size)),
- kWasmPageSize);
+ GetAllocationLength(reservation_size, require_full_guard_regions);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
@@ -67,6 +77,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
did_retry = true;
// After first and second GC: retry.
if (trial == kAllocationRetries) {
+ // Always reset reservation_size to initial size so that at least the
+ // initial size can be allocated if maximum size reservation is not
+ // possible.
+ reservation_size = static_cast<uint32_t>(size);
+
// If we fail to allocate guard regions and the fallback is enabled, then
// retry without full guard regions.
if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
@@ -157,7 +172,7 @@ WasmMemoryTracker::~WasmMemoryTracker() {
void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
Heap* heap, size_t size, void** allocation_base,
size_t* allocation_length) {
- return TryAllocateBackingStore(this, heap, size, allocation_base,
+ return TryAllocateBackingStore(this, heap, size, size, allocation_base,
allocation_length);
}
@@ -290,8 +305,10 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
return buffer;
}
-MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
- SharedFlag shared) {
+MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
+ size_t size,
+ size_t maximum_size,
+ SharedFlag shared) {
// Enforce flag-limited maximum allocation size.
if (size > max_mem_bytes()) return {};
@@ -302,7 +319,8 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
size_t allocation_length = 0;
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
- &allocation_base, &allocation_length);
+ maximum_size, &allocation_base,
+ &allocation_length);
if (memory == nullptr) return {};
#if DEBUG
@@ -320,6 +338,18 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
return SetupArrayBuffer(isolate, memory, size, is_external, shared);
}
+MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
+ return AllocateAndSetupArrayBuffer(isolate, size, size,
+ SharedFlag::kNotShared);
+}
+
+MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
+ size_t initial_size,
+ size_t max_size) {
+ return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
+ SharedFlag::kShared);
+}
+
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 5fb4554cc2..09832146b7 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -142,8 +142,14 @@ class WasmMemoryTracker {
// Attempts to allocate an array buffer with guard regions suitable for trap
// handling. If address space is not available, it will return a buffer with
// mini-guards that will require bounds checks.
-MaybeHandle<JSArrayBuffer> NewArrayBuffer(
- Isolate*, size_t size, SharedFlag shared = SharedFlag::kNotShared);
+MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*, size_t size);
+
+// Attempts to allocate a SharedArrayBuffer with guard regions suitable for
+// trap handling. If address space is not available, it will try to reserve
+// up to the maximum for that memory. If all else fails, it will return a
+// buffer with mini-guards of initial size.
+MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate*, size_t initial_size,
+ size_t max_size);
Handle<JSArrayBuffer> SetupArrayBuffer(
Isolate*, void* backing_store, size_t size, bool is_external,
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 131cda747c..e9b22a392c 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -9,6 +9,7 @@
#include "src/zone/zone-containers.h"
#include "src/v8memory.h"
+#include "src/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/wasm-opcodes.h"
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 75f6e98ca5..bd507e5c15 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -120,6 +120,10 @@ struct WasmElemSegment {
// Construct a passive segment, which has no table index or offset.
WasmElemSegment() : table_index(0), active(false) {}
+ // Used in the {entries} vector to represent a `ref.null` entry in a passive
+ // segment.
+ static const uint32_t kNullIndex = ~0u;
+
uint32_t table_index;
WasmInitExpr offset;
std::vector<uint32_t> entries;
@@ -169,6 +173,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t tagged_globals_buffer_size = 0;
uint32_t num_imported_mutable_globals = 0;
uint32_t num_imported_functions = 0;
+ uint32_t num_imported_tables = 0;
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 9adcc94f12..9a25995203 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -8,12 +8,15 @@
#include "src/wasm/wasm-objects.h"
#include "src/contexts-inl.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-objects-inl.h"
#include "src/objects/managed.h"
#include "src/objects/oddball-inl.h"
+#include "src/objects/script-inl.h"
+#include "src/roots.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
@@ -62,10 +65,10 @@ CAST_ACCESSOR(AsmWasmData)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
- return READ_PRIMITIVE_FIELD(this, type, offset); \
+ return READ_PRIMITIVE_FIELD(*this, type, offset); \
} \
void holder::set_##name(type value) { \
- WRITE_PRIMITIVE_FIELD(this, type, offset, value); \
+ WRITE_PRIMITIVE_FIELD(*this, type, offset, value); \
}
// WasmModuleObject
@@ -91,7 +94,7 @@ const wasm::WasmModule* WasmModuleObject::module() const {
return native_module()->module();
}
void WasmModuleObject::reset_breakpoint_infos() {
- WRITE_FIELD(this, kBreakPointInfosOffset,
+ WRITE_FIELD(*this, kBreakPointInfosOffset,
GetReadOnlyRoots().undefined_value());
}
bool WasmModuleObject::is_asm_js() {
@@ -102,7 +105,7 @@ bool WasmModuleObject::is_asm_js() {
}
// WasmTableObject
-ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
+ACCESSORS(WasmTableObject, elements, FixedArray, kElementsOffset)
ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
@@ -222,6 +225,7 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
@@ -233,6 +237,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_exported_functions, FixedArray,
+ kWasmExportedFunctionsOffset)
inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
@@ -295,7 +301,7 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef WRITE_PRIMITIVE_FIELD
#undef PRIMITIVE_ACCESSORS
-uint32_t WasmTableObject::current_length() { return functions()->length(); }
+uint32_t WasmTableObject::current_length() { return elements()->length(); }
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 392ddd4ca8..80e4f0f110 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -16,9 +16,11 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/vector.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -209,7 +211,7 @@ Handle<WasmModuleObject> WasmModuleObject::New(
// Create a new {NativeModule} first.
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(shared_module.get());
- auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
+ auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled, code_size_estimate,
wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module));
native_module->SetWireBytes(std::move(wire_bytes));
@@ -782,16 +784,19 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
auto table_obj = Handle<WasmTableObject>::cast(
isolate->factory()->NewJSObject(table_ctor));
- *js_functions = isolate->factory()->NewFixedArray(initial);
+ Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
- (*js_functions)->set(i, null);
+ backing_store->set(i, null);
}
- table_obj->set_functions(**js_functions);
+ table_obj->set_elements(*backing_store);
Handle<Object> max = isolate->factory()->NewNumberFromUint(maximum);
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
+ if (js_functions != nullptr) {
+ *js_functions = backing_store;
+ }
return Handle<WasmTableObject>::cast(table_obj);
}
@@ -824,7 +829,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
Handle<FixedArray> dispatch_tables(this->dispatch_tables(), isolate);
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- uint32_t old_size = functions()->length();
+ uint32_t old_size = elements()->length();
// Tables are stored in the instance object, no code patching is
// necessary. We simply have to grow the raw tables in each instance
@@ -845,7 +850,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t table_index, Handle<JSFunction> function) {
- Handle<FixedArray> array(table->functions(), isolate);
+ Handle<FixedArray> array(table->elements(), isolate);
if (function.is_null()) {
ClearDispatchTables(isolate, table, table_index); // Degenerate case.
array->set(table_index, ReadOnlyRoots(isolate).null_value());
@@ -915,30 +920,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
// freeing it too early.
- if (!old_buffer->is_external() &&
- ((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
- if (old_size != new_size) {
- DCHECK_NOT_NULL(old_buffer->backing_store());
- // If adjusting permissions fails, propagate error back to return
- // failure to grow.
- if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
- new_size, PageAllocator::kReadWrite)) {
- return {};
- }
- DCHECK_GE(new_size, old_size);
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
- }
- // NOTE: We must allocate a new array buffer here because the spec
- // assumes that ArrayBuffers do not change size.
- void* backing_store = old_buffer->backing_store();
- bool is_external = old_buffer->is_external();
- // Disconnect buffer early so GC won't free it.
- i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
- Handle<JSArrayBuffer> new_buffer =
- wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
- return new_buffer;
- } else {
+ if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) {
// We couldn't reuse the old backing store, so create a new one and copy the
// old contents in.
Handle<JSArrayBuffer> new_buffer;
@@ -960,6 +942,28 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
constexpr bool free_memory = true;
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
+ } else {
+ if (old_size != new_size) {
+ DCHECK_NOT_NULL(old_buffer->backing_store());
+ // If adjusting permissions fails, propagate error back to return
+ // failure to grow.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
+ new_size, PageAllocator::kReadWrite)) {
+ return {};
+ }
+ DCHECK_GE(new_size, old_size);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
+ }
+ // NOTE: We must allocate a new array buffer here because the spec
+ // assumes that ArrayBuffers do not change size.
+ void* backing_store = old_buffer->backing_store();
+ bool is_external = old_buffer->is_external();
+ // Disconnect buffer early so GC won't free it.
+ i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
+ Handle<JSArrayBuffer> new_buffer =
+ wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
+ return new_buffer;
}
}
@@ -1009,6 +1013,28 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
return memory_obj;
}
+MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
+ uint32_t initial,
+ uint32_t maximum,
+ bool is_shared_memory) {
+ Handle<JSArrayBuffer> buffer;
+ size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
+ static_cast<size_t>(initial);
+ if (is_shared_memory) {
+ size_t max_size = static_cast<size_t>(i::wasm::kWasmPageSize) *
+ static_cast<size_t>(maximum);
+ if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size)
+ .ToHandle(&buffer)) {
+ return {};
+ }
+ } else {
+ if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) {
+ return {};
+ }
+ }
+ return New(isolate, buffer, maximum);
+}
+
bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
const wasm::WasmMemoryTracker::AllocationData* allocation =
isolate->wasm_engine()->memory_tracker()->FindAllocationData(
@@ -1054,7 +1080,9 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
- if (!old_buffer->is_growable()) return -1;
+ // TODO(gdeepti): Remove check for is_shared when Growing Shared memory
+ // is supported.
+ if (!old_buffer->is_growable() || old_buffer->is_shared()) return -1;
// Checks for maximum memory size, compute new size.
uint32_t maximum_pages = wasm::max_mem_pages();
@@ -1435,14 +1463,17 @@ void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
// static
bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_index, uint32_t dst,
- uint32_t src, uint32_t count) {
- CHECK_EQ(0, table_index); // TODO(titzer): multiple tables in TableCopy
- if (count == 0) return true; // no-op
+ uint32_t table_src_index,
+ uint32_t table_dst_index,
+ uint32_t dst, uint32_t src,
+ uint32_t count) {
+ // TODO(titzer): multiple tables in TableCopy
+ CHECK_EQ(0, table_src_index);
+ CHECK_EQ(0, table_dst_index);
auto max = instance->indirect_function_table_size();
if (!IsInBounds(dst, count, max)) return false;
if (!IsInBounds(src, count, max)) return false;
- if (dst == src) return true; // no-op
+ if (dst == src) return true; // no-op
if (!instance->has_table_object()) {
// No table object, only need to update this instance.
@@ -1464,7 +1495,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
}
// Copy the function entries.
- Handle<FixedArray> functions(table->functions(), isolate);
+ Handle<FixedArray> functions(table->elements(), isolate);
if (src < dst) {
for (uint32_t i = count; i > 0; i--) {
functions->set(dst + i - 1, functions->get(src + i - 1));
@@ -1478,6 +1509,48 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
}
// static
+bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index,
+ uint32_t segment_index, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ // Note that this implementation just calls through to module instantiation.
+ // This is intentional, so that the runtime only depends on the object
+ // methods, and not the module instantiation logic.
+ return wasm::LoadElemSegment(isolate, instance, table_index, segment_index,
+ dst, src, count);
+}
+
+MaybeHandle<WasmExportedFunction> WasmInstanceObject::GetWasmExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
+ MaybeHandle<WasmExportedFunction> result;
+ if (instance->has_wasm_exported_functions()) {
+ Object val = instance->wasm_exported_functions()->get(index);
+ if (!val->IsUndefined(isolate)) {
+ result = Handle<WasmExportedFunction>(WasmExportedFunction::cast(val),
+ isolate);
+ }
+ }
+ return result;
+}
+
+void WasmInstanceObject::SetWasmExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int index,
+ Handle<WasmExportedFunction> val) {
+ Handle<FixedArray> functions;
+ if (!instance->has_wasm_exported_functions()) {
+ // lazily-allocate the wasm exported functions.
+ functions = isolate->factory()->NewFixedArray(
+ static_cast<int>(instance->module()->functions.size()));
+ instance->set_wasm_exported_functions(*functions);
+ } else {
+ functions =
+ Handle<FixedArray>(instance->wasm_exported_functions(), isolate);
+ }
+ functions->set(index, *val);
+}
+
+// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag) {
@@ -1517,6 +1590,106 @@ bool WasmExceptionObject::IsSignatureEqual(const wasm::FunctionSig* sig) {
return true;
}
+// static
+Handle<JSReceiver> WasmExceptionPackage::New(
+ Isolate* isolate, Handle<WasmExceptionTag> exception_tag, int size) {
+ Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
+ MessageTemplate::kWasmExceptionError);
+ CHECK(!Object::SetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_tag_symbol(),
+ exception_tag, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
+ .is_null());
+ Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
+ CHECK(!Object::SetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol(),
+ values, StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
+ .is_null());
+ return Handle<JSReceiver>::cast(exception);
+}
+
+// static
+Handle<Object> WasmExceptionPackage::GetExceptionTag(
+ Isolate* isolate, Handle<Object> exception_object) {
+ if (exception_object->IsJSReceiver()) {
+ Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
+ Handle<Object> tag;
+ if (JSReceiver::GetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_tag_symbol())
+ .ToHandle(&tag)) {
+ return tag;
+ }
+ }
+ return ReadOnlyRoots(isolate).undefined_value_handle();
+}
+
+// static
+Handle<Object> WasmExceptionPackage::GetExceptionValues(
+ Isolate* isolate, Handle<Object> exception_object) {
+ if (exception_object->IsJSReceiver()) {
+ Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object);
+ Handle<Object> values;
+ if (JSReceiver::GetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol())
+ .ToHandle(&values)) {
+ DCHECK(values->IsFixedArray());
+ return values;
+ }
+ }
+ return ReadOnlyRoots(isolate).undefined_value_handle();
+}
+
+#ifdef DEBUG
+
+namespace {
+
+constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
+
+size_t ComputeEncodedElementSize(wasm::ValueType type) {
+ size_t byte_size =
+ static_cast<size_t>(wasm::ValueTypes::ElementSizeInBytes(type));
+ DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
+ DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
+ return byte_size / kBytesPerExceptionValuesArrayElement;
+}
+
+} // namespace
+
+#endif // DEBUG
+
+// static
+uint32_t WasmExceptionPackage::GetEncodedSize(
+ const wasm::WasmException* exception) {
+ const wasm::WasmExceptionSig* sig = exception->sig;
+ uint32_t encoded_size = 0;
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ switch (sig->GetParam(i)) {
+ case wasm::kWasmI32:
+ case wasm::kWasmF32:
+ DCHECK_EQ(2, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 2;
+ break;
+ case wasm::kWasmI64:
+ case wasm::kWasmF64:
+ DCHECK_EQ(4, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 4;
+ break;
+ case wasm::kWasmS128:
+ DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 8;
+ break;
+ case wasm::kWasmAnyRef:
+ encoded_size += 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return encoded_size;
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object->IsJSFunction()) return false;
JSFunction js_function = JSFunction::cast(object);
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 84aeb8972d..ba1c4abb4e 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -26,6 +26,7 @@ struct InterpretedFrameDeleter;
class NativeModule;
class SignatureMap;
class WasmCode;
+struct WasmException;
struct WasmFeatures;
class WasmInterpreter;
struct WasmModule;
@@ -36,8 +37,10 @@ class BreakPoint;
class JSArrayBuffer;
class SeqOneByteString;
class WasmDebugInfo;
+class WasmExceptionTag;
class WasmInstanceObject;
class WasmModuleObject;
+class WasmExportedFunction;
template <class CppType>
class Managed;
@@ -50,9 +53,9 @@ class Managed;
// The underlying storage in the instance is used by generated code to
// call functions indirectly at runtime.
// Each entry has the following fields:
-// - object = target instance, if a WASM function, tuple if imported
+// - object = target instance, if a Wasm function, tuple if imported
// - sig_id = signature id of function
-// - target = entrypoint to WASM code or import wrapper code
+// - target = entrypoint to Wasm code or import wrapper code
class IndirectFunctionTableEntry {
public:
inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
@@ -211,7 +214,7 @@ class WasmModuleObject : public JSObject {
bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
// Get the source position from a given function index and byte offset,
- // for either asm.js or pure WASM modules.
+ // for either asm.js or pure Wasm modules.
static int GetSourcePosition(Handle<WasmModuleObject>, uint32_t func_index,
uint32_t byte_offset,
bool is_at_number_conversion);
@@ -243,7 +246,7 @@ class WasmModuleObject : public JSObject {
Handle<WasmModuleObject>,
int position);
- OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject)
+ OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject);
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -251,14 +254,14 @@ class WasmTableObject : public JSObject {
public:
DECL_CAST(WasmTableObject)
- DECL_ACCESSORS(functions, FixedArray)
+ DECL_ACCESSORS(elements, FixedArray)
// TODO(titzer): introduce DECL_I64_ACCESSORS macro
DECL_ACCESSORS(maximum_length, Object)
DECL_ACCESSORS(dispatch_tables, FixedArray)
// Layout description.
#define WASM_TABLE_OBJECT_FIELDS(V) \
- V(kFunctionsOffset, kTaggedSize) \
+ V(kElementsOffset, kTaggedSize) \
V(kMaximumLengthOffset, kTaggedSize) \
V(kDispatchTablesOffset, kTaggedSize) \
V(kSize, 0)
@@ -288,7 +291,7 @@ class WasmTableObject : public JSObject {
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
- OBJECT_CONSTRUCTORS(WasmTableObject, JSObject)
+ OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -323,9 +326,13 @@ class WasmMemoryObject : public JSObject {
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
+ V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
+ Isolate* isolate, uint32_t initial, uint32_t maximum,
+ bool is_shared_memory);
+
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
- OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject)
+ OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject);
};
// Representation of a WebAssembly.Global JavaScript-level object.
@@ -385,7 +392,7 @@ class WasmGlobalObject : public JSObject {
// not have a fixed address.
inline Address address() const;
- OBJECT_CONSTRUCTORS(WasmGlobalObject, JSObject)
+ OBJECT_CONSTRUCTORS(WasmGlobalObject, JSObject);
};
// Representation of a WebAssembly.Instance JavaScript-level object.
@@ -402,6 +409,7 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
+ DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
@@ -409,6 +417,7 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
+ DECL_OPTIONAL_ACCESSORS(wasm_exported_functions, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
@@ -427,6 +436,8 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(dropped_data_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic. Depending on the V8 build mode there could be no padding.
V8_INLINE void clear_padding();
// Dispatched behavior.
@@ -445,6 +456,7 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kDebugInfoOffset, kTaggedSize) \
V(kTableObjectOffset, kTaggedSize) \
+ V(kTablesOffset, kTaggedSize) \
V(kImportedFunctionRefsOffset, kTaggedSize) \
V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
@@ -452,6 +464,7 @@ class WasmInstanceObject : public JSObject {
V(kUndefinedValueOffset, kTaggedSize) \
V(kNullValueOffset, kTaggedSize) \
V(kCEntryStubOffset, kTaggedSize) \
+ V(kWasmExportedFunctionsOffset, kTaggedSize) \
V(kEndOfTaggedFieldsOffset, 0) \
/* Raw data. */ \
V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
@@ -504,13 +517,30 @@ class WasmInstanceObject : public JSObject {
// Copies table entries. Returns {false} if the ranges are out-of-bounds.
static bool CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_index, uint32_t dst, uint32_t src,
+ uint32_t table_src_index,
+ uint32_t table_dst_index, uint32_t dst,
+ uint32_t src,
+ uint32_t count) V8_WARN_UNUSED_RESULT;
+
+ // Copy table entries from an element segment. Returns {false} if the ranges
+ // are out-of-bounds.
+ static bool InitTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t segment_index,
+ uint32_t dst, uint32_t src,
uint32_t count) V8_WARN_UNUSED_RESULT;
// Iterates all fields in the object except the untagged fields.
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject)
+ static MaybeHandle<WasmExportedFunction> GetWasmExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int index);
+ static void SetWasmExportedFunction(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ int index,
+ Handle<WasmExportedFunction> val);
+
+ OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
@@ -545,10 +575,29 @@ class WasmExceptionObject : public JSObject {
const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag);
- OBJECT_CONSTRUCTORS(WasmExceptionObject, JSObject)
+ OBJECT_CONSTRUCTORS(WasmExceptionObject, JSObject);
+};
+
+// A Wasm exception that has been thrown out of Wasm code.
+class WasmExceptionPackage : public JSReceiver {
+ public:
+ // TODO(mstarzinger): Ideally this interface would use {WasmExceptionPackage}
+ // instead of {JSReceiver} throughout. For now a type-check implies doing a
+ // property lookup however, which would result in casts being handlified.
+ static Handle<JSReceiver> New(Isolate* isolate,
+ Handle<WasmExceptionTag> exception_tag,
+ int encoded_size);
+
+ // The below getters return {undefined} in case the given exception package
+ // does not carry the requested values (i.e. is of a different type).
+ static Handle<Object> GetExceptionTag(Isolate*, Handle<Object> exception);
+ static Handle<Object> GetExceptionValues(Isolate*, Handle<Object> exception);
+
+ // Determines the size of the array holding all encoded exception values.
+ static uint32_t GetEncodedSize(const wasm::WasmException* exception);
};
-// A WASM function that is wrapped and exported to JavaScript.
+// A Wasm function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
WasmInstanceObject instance();
@@ -567,7 +616,7 @@ class WasmExportedFunction : public JSFunction {
wasm::FunctionSig* sig();
DECL_CAST(WasmExportedFunction)
- OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction)
+ OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
};
// Information for a WasmExportedFunction which is referenced as the function
@@ -575,10 +624,10 @@ class WasmExportedFunction : public JSFunction {
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
class WasmExportedFunctionData : public Struct {
public:
- DECL_ACCESSORS(wrapper_code, Code);
+ DECL_ACCESSORS(wrapper_code, Code)
DECL_ACCESSORS(instance, WasmInstanceObject)
- DECL_INT_ACCESSORS(jump_table_offset);
- DECL_INT_ACCESSORS(function_index);
+ DECL_INT_ACCESSORS(jump_table_offset)
+ DECL_INT_ACCESSORS(function_index)
DECL_CAST(WasmExportedFunctionData)
@@ -598,15 +647,15 @@ class WasmExportedFunctionData : public Struct {
WASM_EXPORTED_FUNCTION_DATA_FIELDS)
#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
- OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct)
+ OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct);
};
class WasmDebugInfo : public Struct {
public:
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
- DECL_ACCESSORS(interpreter_handle, Object); // Foreign or undefined
- DECL_ACCESSORS(interpreted_functions, FixedArray);
+ DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
+ DECL_ACCESSORS(interpreted_functions, FixedArray)
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
@@ -669,23 +718,16 @@ class WasmDebugInfo : public Struct {
std::unique_ptr<wasm::InterpretedFrame, wasm::InterpretedFrameDeleter>
GetInterpretedFrame(Address frame_pointer, int frame_index);
- // Unwind the interpreted stack belonging to the passed interpreter entry
- // frame.
- void Unwind(Address frame_pointer);
-
// Returns the number of calls / function frames executed in the interpreter.
uint64_t NumInterpretedCalls();
// Get scope details for a specific interpreted frame.
- // This returns a JSArray of length two: One entry for the global scope, one
- // for the local scope. Both elements are JSArrays of size
- // ScopeIterator::kScopeDetailsSize and layout as described in debug-scopes.h.
- // The global scope contains information about globals and the memory.
- // The local scope contains information about parameters, locals, and stack
- // values.
- static Handle<JSObject> GetScopeDetails(Handle<WasmDebugInfo>,
- Address frame_pointer,
- int frame_index);
+ // Both of these methods return a JSArrays (for the global scope and local
+ // scope respectively) of size {ScopeIterator::kScopeDetailsSize} and layout
+ // as described in debug-scopes.h.
+ // - The global scope contains information about globals and the memory.
+ // - The local scope contains information about parameters, locals, and
+ // stack values.
static Handle<JSObject> GetGlobalScopeObject(Handle<WasmDebugInfo>,
Address frame_pointer,
int frame_index);
@@ -696,7 +738,7 @@ class WasmDebugInfo : public Struct {
static Handle<JSFunction> GetCWasmEntry(Handle<WasmDebugInfo>,
wasm::FunctionSig*);
- OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct)
+ OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
// Tags provide an object identity for each exception defined in a wasm module
@@ -710,22 +752,16 @@ class WasmExceptionTag : public Struct {
// Note that this index is only useful for debugging purposes and it is not
// unique across modules. The GC however does not allow objects without at
// least one field, hence this also serves as a padding field for now.
- DECL_INT_ACCESSORS(index);
+ DECL_INT_ACCESSORS(index)
DECL_CAST(WasmExceptionTag)
DECL_PRINTER(WasmExceptionTag)
DECL_VERIFIER(WasmExceptionTag)
-// Layout description.
-#define WASM_EXCEPTION_TAG_FIELDS(V) \
- V(kIndexOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, WASM_EXCEPTION_TAG_FIELDS)
-#undef WASM_EXCEPTION_TAG_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_WASM_EXCEPTION_TAG_FIELDS)
- OBJECT_CONSTRUCTORS(WasmExceptionTag, Struct)
+ OBJECT_CONSTRUCTORS(WasmExceptionTag, Struct);
};
class AsmWasmData : public Struct {
@@ -756,7 +792,7 @@ class AsmWasmData : public Struct {
DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, ASM_WASM_DATA_FIELDS)
#undef ASM_WASM_DATA_FIELDS
- OBJECT_CONSTRUCTORS(AsmWasmData, Struct)
+ OBJECT_CONSTRUCTORS(AsmWasmData, Struct);
};
#undef DECL_OPTIONAL_ACCESSORS
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index c8dfcf50e6..67e52a3a94 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -107,6 +107,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_FLOAT_OP(CopySign, "copysign")
CASE_REF_OP(Null, "null")
CASE_REF_OP(IsNull, "is_null")
+ CASE_REF_OP(Func, "func")
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
@@ -137,6 +138,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(Return, "return")
CASE_OP(CallFunction, "call")
CASE_OP(CallIndirect, "call_indirect")
+ CASE_OP(ReturnCall, "return_call")
+ CASE_OP(ReturnCallIndirect, "return_call_indirect")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(GetLocal, "get_local")
@@ -144,6 +147,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(TeeLocal, "tee_local")
CASE_OP(GetGlobal, "get_global")
CASE_OP(SetGlobal, "set_global")
+ CASE_OP(GetTable, "get_table")
+ CASE_OP(SetTable, "set_table")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
@@ -198,11 +203,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
CASE_OP(MemoryInit, "memory.init")
- CASE_OP(MemoryDrop, "memory.drop")
+ CASE_OP(DataDrop, "data.drop")
CASE_OP(MemoryCopy, "memory.copy")
CASE_OP(MemoryFill, "memory.fill")
CASE_OP(TableInit, "table.init")
- CASE_OP(TableDrop, "table.drop")
+ CASE_OP(ElemDrop, "elem.drop")
CASE_OP(TableCopy, "table.copy")
// SIMD opcodes.
@@ -368,6 +373,19 @@ bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
+ // TODO(8729): Trapping opcodes are not yet considered to be throwing.
+ switch (opcode) {
+ case kExprThrow:
+ case kExprRethrow:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index b4ed83474f..96e96f20b0 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -38,21 +38,26 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(Return, 0x0f, _)
// Constants, locals, globals, and calls.
-#define FOREACH_MISC_OPCODE(V) \
- V(CallFunction, 0x10, _) \
- V(CallIndirect, 0x11, _) \
- V(Drop, 0x1a, _) \
- V(Select, 0x1b, _) \
- V(GetLocal, 0x20, _) \
- V(SetLocal, 0x21, _) \
- V(TeeLocal, 0x22, _) \
- V(GetGlobal, 0x23, _) \
- V(SetGlobal, 0x24, _) \
- V(I32Const, 0x41, _) \
- V(I64Const, 0x42, _) \
- V(F32Const, 0x43, _) \
- V(F64Const, 0x44, _) \
- V(RefNull, 0xd0, _)
+#define FOREACH_MISC_OPCODE(V) \
+ V(CallFunction, 0x10, _) \
+ V(CallIndirect, 0x11, _) \
+ V(ReturnCall, 0x12, _) \
+ V(ReturnCallIndirect, 0x13, _) \
+ V(Drop, 0x1a, _) \
+ V(Select, 0x1b, _) \
+ V(GetLocal, 0x20, _) \
+ V(SetLocal, 0x21, _) \
+ V(TeeLocal, 0x22, _) \
+ V(GetGlobal, 0x23, _) \
+ V(SetGlobal, 0x24, _) \
+ V(GetTable, 0x25, _) \
+ V(SetTable, 0x26, _) \
+ V(I32Const, 0x41, _) \
+ V(I64Const, 0x42, _) \
+ V(F32Const, 0x43, _) \
+ V(F64Const, 0x44, _) \
+ V(RefNull, 0xd0, _) \
+ V(RefFunc, 0xd2, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -410,11 +415,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I64SConvertSatF64, 0xfc06, l_d) \
V(I64UConvertSatF64, 0xfc07, l_d) \
V(MemoryInit, 0xfc08, v_iii) \
- V(MemoryDrop, 0xfc09, v_v) \
+ V(DataDrop, 0xfc09, v_v) \
V(MemoryCopy, 0xfc0a, v_iii) \
V(MemoryFill, 0xfc0b, v_iii) \
V(TableInit, 0xfc0c, v_iii) \
- V(TableDrop, 0xfc0d, v_v) \
+ V(ElemDrop, 0xfc0d, v_v) \
V(TableCopy, 0xfc0e, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -586,6 +591,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static bool IsControlOpcode(WasmOpcode opcode);
static bool IsSignExtensionOpcode(WasmOpcode opcode);
static bool IsAnyRefOpcode(WasmOpcode opcode);
+ static bool IsThrowingOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 824e838ae2..76de1ea303 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -9,7 +9,8 @@
#include <memory>
#include "src/base/compiler-specific.h"
-#include "src/utils.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
#include "src/globals.h"
@@ -122,12 +123,6 @@ class V8_EXPORT_PRIVATE ErrorThrower {
PRINTF_FORMAT(2, 3) void LinkError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
- void CompileFailed(const char* context, const WasmError& error) {
- DCHECK(error.has_error());
- CompileError("%s: %s @+%u", context, error.message().c_str(),
- error.offset());
- }
-
void CompileFailed(const WasmError& error) {
DCHECK(error.has_error());
CompileError("%s @+%u", error.message().c_str(), error.offset());
@@ -169,7 +164,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
- DISALLOW_NEW_AND_DELETE();
+ DISALLOW_NEW_AND_DELETE()
DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index a167b81cbd..aa27ba8035 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -9,6 +9,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
#include "src/utils.h"
@@ -135,12 +136,12 @@ void WriteVersion(Writer* writer) {
void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
DCHECK(rinfo->HasTargetAddressAddress());
- *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
+ WriteUnalignedValue(rinfo->target_address_address(), tag);
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
if (instr->IsLdrLiteralX()) {
- Memory<Address>(rinfo->constant_pool_entry_address()) =
- static_cast<Address>(tag);
+ WriteUnalignedValue(rinfo->constant_pool_entry_address(),
+ static_cast<Address>(tag));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
instr->SetBranchImmTarget(
@@ -160,12 +161,11 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
- return *(reinterpret_cast<uint32_t*>(rinfo->target_address_address()));
+ return ReadUnalignedValue<uint32_t>(rinfo->target_address_address());
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
if (instr->IsLdrLiteralX()) {
- return static_cast<uint32_t>(
- Memory<Address>(rinfo->constant_pool_entry_address()));
+ return ReadUnalignedValue<uint32_t>(rinfo->constant_pool_entry_address());
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
@@ -195,6 +195,7 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // offset of code comments
sizeof(size_t) + // unpadded binary size
sizeof(uint32_t) + // stack slots
+ sizeof(uint32_t) + // tagged parameter slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
sizeof(size_t) + // source positions size
@@ -241,15 +242,24 @@ class ExternalReferenceList {
std::end(tags_ordered_by_address_), addr_by_tag_less_than);
}
-#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
- static constexpr uint32_t kNumExternalReferences =
+#define COUNT_EXTERNAL_REFERENCE(name, ...) +1
+ static constexpr uint32_t kNumExternalReferencesList =
EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+ static constexpr uint32_t kNumExternalReferencesIntrinsics =
+ FOR_EACH_INTRINSIC(COUNT_EXTERNAL_REFERENCE);
+ static constexpr uint32_t kNumExternalReferences =
+ kNumExternalReferencesList + kNumExternalReferencesIntrinsics;
#undef COUNT_EXTERNAL_REFERENCE
-#define EXT_REF_ADDR(name, desc) ExternalReference::name().address(),
Address external_reference_by_tag_[kNumExternalReferences] = {
- EXTERNAL_REFERENCE_LIST(EXT_REF_ADDR)};
+#define EXT_REF_ADDR(name, desc) ExternalReference::name().address(),
+ EXTERNAL_REFERENCE_LIST(EXT_REF_ADDR)
#undef EXT_REF_ADDR
+#define RUNTIME_ADDR(name, ...) \
+ ExternalReference::Create(Runtime::k##name).address(),
+ FOR_EACH_INTRINSIC(RUNTIME_ADDR)
+#undef RUNTIME_ADDR
+ };
uint32_t tags_ordered_by_address_[kNumExternalReferences];
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
};
@@ -289,9 +299,8 @@ NativeModuleSerializer::NativeModuleSerializer(
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Address addr =
- native_module_->runtime_stub(static_cast<WasmCode::RuntimeStubId>(i))
- ->instruction_start();
+ Address addr = native_module_->runtime_stub_entry(
+ static_cast<WasmCode::RuntimeStubId>(i));
wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
}
}
@@ -335,6 +344,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->code_comments_offset());
writer->Write(code->unpadded_binary_size());
writer->Write(code->stack_slots());
+ writer->Write(code->tagged_parameter_slots());
writer->Write(code->instructions().size());
writer->Write(code->reloc_info().size());
writer->Write(code->source_positions().size());
@@ -350,7 +360,8 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
writer->WriteVector(Vector<byte>::cast(code->protected_instructions()));
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_PPC
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;
@@ -495,6 +506,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_comment_offset = reader->Read<size_t>();
size_t unpadded_binary_size = reader->Read<size_t>();
uint32_t stack_slot_count = reader->Read<uint32_t>();
+ uint32_t tagged_parameter_slots = reader->Read<uint32_t>();
size_t code_size = reader->Read<size_t>();
size_t reloc_size = reader->Read<size_t>();
size_t source_position_size = reader->Read<size_t>();
@@ -514,10 +526,11 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
reader->ReadVector(Vector<byte>::cast(protected_instructions.as_vector()));
WasmCode* code = native_module_->AddDeserializedCode(
- fn_index, code_buffer, stack_slot_count, safepoint_table_offset,
- handler_table_offset, constant_pool_offset, code_comment_offset,
- unpadded_binary_size, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_pos), tier);
+ fn_index, code_buffer, stack_slot_count, tagged_parameter_slots,
+ safepoint_table_offset, handler_table_offset, constant_pool_offset,
+ code_comment_offset, unpadded_binary_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos), tier);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -539,10 +552,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
case RelocInfo::WASM_STUB_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
- Address target =
- native_module_
- ->runtime_stub(static_cast<WasmCode::RuntimeStubId>(tag))
- ->instruction_start();
+ Address target = native_module_->runtime_stub_entry(
+ static_cast<WasmCode::RuntimeStubId>(tag));
iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
@@ -569,8 +580,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
code->Validate();
// Finally, flush the icache for that code.
- Assembler::FlushICache(code->instructions().start(),
- code->instructions().size());
+ FlushInstructionCache(code->instructions().start(),
+ code->instructions().size());
return true;
}
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index 6445608193..dbdaada428 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -5,12 +5,14 @@
#ifndef V8_WASM_WASM_TIER_H_
#define V8_WASM_WASM_TIER_H_
+#include <cstdint>
+
namespace v8 {
namespace internal {
namespace wasm {
// All the tiers of WASM execution.
-enum class ExecutionTier {
+enum class ExecutionTier : int8_t {
kInterpreter, // interpreter (used to provide debugging services).
kBaseline, // Liftoff.
kOptimized // TurboFan.
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 0dcd84da79..7b389c2456 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -24,27 +24,17 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
void Assembler::emitl(uint32_t x) {
- Memory<uint32_t>(pc_) = x;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint32_t);
}
-void Assembler::emitp(Address x, RelocInfo::Mode rmode) {
- Memory<uintptr_t>(pc_) = x;
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, x);
- }
- pc_ += sizeof(uintptr_t);
-}
-
-
void Assembler::emitq(uint64_t x) {
- Memory<uint64_t>(pc_) = x;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint64_t);
}
-
void Assembler::emitw(uint16_t x) {
- Memory<uint16_t>(pc_) = x;
+ WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint16_t);
}
@@ -61,16 +51,21 @@ void Assembler::emit(Immediate x) {
emitl(x.value_);
}
+void Assembler::emit(Immediate64 x) {
+ if (!RelocInfo::IsNone(x.rmode_)) {
+ RecordRelocInfo(x.rmode_);
+ }
+ emitq(static_cast<uint64_t>(x.value_));
+}
+
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
-
void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
-
void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
@@ -160,14 +155,14 @@ void Assembler::emit_optional_rex_32(Operand op) {
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
LeadingOpcode m) {
- byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
+ byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.high_bit())) << 5;
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
- byte rxb = ~((reg.high_bit() << 2) | rm.data().rex) << 5;
+ byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.data().rex)) << 5;
emit(rxb | m);
}
@@ -175,7 +170,7 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
- byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
+ byte rv = static_cast<byte>(~((reg.high_bit() << 4) | v.code())) << 3;
emit(rv | l | pp);
}
@@ -233,21 +228,21 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
- return Memory<int32_t>(pc) + pc + 4;
+ return ReadUnalignedValue<int32_t>(pc) + pc + 4;
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
- Memory<int32_t>(pc) = static_cast<int32_t>(target - pc - 4);
+ WriteUnalignedValue(pc, static_cast<int32_t>(target - pc - 4));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, sizeof(int32_t));
+ FlushInstructionCache(pc, sizeof(int32_t));
}
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory<Address>(pc) = target;
+ WriteUnalignedValue(pc, target);
}
@@ -268,11 +263,11 @@ int Assembler::deserialization_special_target_size(
}
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
- return GetCodeTarget(Memory<int32_t>(pc));
+ return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
- return Memory<int32_t>(pc) + options().code_range_start;
+ return ReadUnalignedValue<int32_t>(pc) + options().code_range_start;
}
// -----------------------------------------------------------------------------
@@ -281,10 +276,11 @@ Address Assembler::runtime_entry_at(Address pc) {
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
- Memory<int32_t>(pc_) -= static_cast<int32_t>(delta);
+ WriteUnalignedValue(
+ pc_, ReadUnalignedValue<int32_t>(pc_) - static_cast<int32_t>(delta));
} else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Memory<Address>(pc_) += delta;
+ // Absolute code pointer inside code object moves with the code object.
+ WriteUnalignedValue(pc_, ReadUnalignedValue<Address>(pc_) + delta);
}
}
@@ -317,13 +313,13 @@ int RelocInfo::target_address_size() {
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Object(Memory<Address>(pc_)));
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>::cast(Memory<Handle<Object>>(pc_));
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
} else {
return origin->code_target_object_handle_at(pc_);
}
@@ -331,21 +327,21 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- Memory<Address>(pc_) = target;
+ WriteUnalignedValue(pc_, target);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
+ FlushInstructionCache(pc_, sizeof(Address));
}
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
@@ -358,9 +354,9 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory<Address>(pc_) = target->ptr();
+ WriteUnalignedValue(pc_, target->ptr());
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
+ FlushInstructionCache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -383,13 +379,13 @@ void RelocInfo::set_target_runtime_entry(Address target,
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)) {
- Memory<Address>(pc_) = kNullAddress;
+ WriteUnalignedValue(pc_, kNullAddress);
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
@@ -399,25 +395,6 @@ void RelocInfo::WipeOut() {
}
}
-template <typename ObjectVisitor>
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
- visitor->VisitOffHeapTarget(host(), this);
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 5cf944a697..466e04b9f5 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -126,7 +126,7 @@ void CpuFeatures::PrintFeatures() {
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
- return Memory<uint32_t>(pc_);
+ return ReadUnalignedValue<uint32_t>(pc_);
}
// -----------------------------------------------------------------------------
@@ -200,7 +200,8 @@ class OperandBuilder {
int32_t disp_value = 0;
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *bit_cast<const int32_t*>(&operand.data().buf[disp_offset]);
+ disp_value = ReadUnalignedValue<int32_t>(
+ reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
@@ -215,8 +216,8 @@ class OperandBuilder {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
data_.len = disp_offset + 4;
- Memory<int32_t>(reinterpret_cast<Address>(&data_.buf[disp_offset])) =
- disp_value;
+ WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
+ disp_value);
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
@@ -260,15 +261,15 @@ class OperandBuilder {
void set_disp32(int disp) {
DCHECK(data_.len == 1 || data_.len == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&data_.buf[data_.len]);
- *p = disp;
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
data_.len += sizeof(int32_t);
}
void set_disp64(int64_t disp) {
DCHECK_EQ(1, data_.len);
- int64_t* p = reinterpret_cast<int64_t*>(&data_.buf[data_.len]);
- *p = disp;
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
data_.len += sizeof(disp);
}
@@ -328,14 +329,14 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapNumber> object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
- Memory<Handle<Object>>(pc) = object;
+ WriteUnalignedValue(pc, object);
break;
}
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
Handle<String> allocated = str->AllocateStringConstant(isolate);
- Memory<Handle<Object>>(pc) = allocated;
+ WriteUnalignedValue(pc, allocated);
break;
}
}
@@ -372,11 +373,9 @@ bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
return AddSharedEntry(raw_data, offset);
}
-bool ConstPool::IsMoveRipRelative(byte* instr) {
- if ((*reinterpret_cast<uint32_t*>(instr) & kMoveRipRelativeMask) ==
- kMoveRipRelativeInstr)
- return true;
- return false;
+bool ConstPool::IsMoveRipRelative(Address instr) {
+ return (ReadUnalignedValue<uint32_t>(instr) & kMoveRipRelativeMask) ==
+ kMoveRipRelativeInstr;
}
void ConstPool::Clear() { entries_.clear(); }
@@ -397,13 +396,13 @@ void ConstPool::PatchEntries() {
DCHECK_LT(constant_entry_offset, it->second);
int32_t disp32 =
constant_entry_offset - (it->second + kRipRelativeDispSize);
- byte* disp_addr = assm_->addr_at(it->second);
+ Address disp_addr = assm_->addr_at(it->second);
// Check if the instruction is actually a rip-relative move.
DCHECK(IsMoveRipRelative(disp_addr - kMoveRipRelativeDispOffset));
// The displacement of the rip-relative move should be 0 before patching.
- DCHECK(*reinterpret_cast<uint32_t*>(disp_addr) == 0);
- *reinterpret_cast<int32_t*>(disp_addr) = disp32;
+ DCHECK(ReadUnalignedValue<uint32_t>(disp_addr) == 0);
+ WriteUnalignedValue(disp_addr, disp32);
}
}
Clear();
@@ -436,11 +435,13 @@ Assembler::Assembler(const AssemblerOptions& options,
}
}
-void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
PatchConstPool();
DCHECK(constpool_.IsEmpty());
- int code_comments_size = WriteCodeComments();
+ const int code_comments_size = WriteCodeComments();
// At this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info.
@@ -449,17 +450,25 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_start_;
- desc->buffer_size = buffer_->size();
- desc->instr_size = pc_offset();
- DCHECK_GT(desc->instr_size, 0); // Zero-size code objects upset the system.
- desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
- reloc_info_writer.pos());
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
- desc->code_comments_size = code_comments_size;
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
}
void Assembler::FinalizeJumpOptimizationInfo() {
@@ -518,7 +527,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (current >= 4 && long_at(current - 4) == 0) {
// Absolute address.
intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_start_ + pos);
- *reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
+ WriteUnalignedValue(addr_at(current - 4), imm64);
internal_reference_positions_.push_back(current - 4);
} else {
// Relative address, relative to point after address.
@@ -532,7 +541,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (current >= 4 && long_at(current - 4) == 0) {
// Absolute address.
intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_start_ + pos);
- *reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
+ WriteUnalignedValue(addr_at(current - 4), imm64);
internal_reference_positions_.push_back(current - 4);
} else {
// Relative address, relative to point after address.
@@ -629,8 +638,8 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
- *p += pc_delta;
+ Address p = reinterpret_cast<Address>(buffer_start_ + pos);
+ WriteUnalignedValue(p, ReadUnalignedValue<intptr_t>(p) + pc_delta);
}
DCHECK(!buffer_overflow());
@@ -648,7 +657,8 @@ void Assembler::emit_operand(int code, Operand adr) {
// Recognize RIP relative addressing.
if (adr.data().buf[0] == 5) {
DCHECK_EQ(9u, length);
- Label* label = *bit_cast<Label* const*>(&adr.data().buf[1]);
+ Label* label = ReadUnalignedValue<Label*>(
+ reinterpret_cast<Address>(&adr.data().buf[1]));
if (label->is_bound()) {
int offset =
label->pos() - pc_offset() - sizeof(int32_t) + adr.data().addend;
@@ -1638,27 +1648,15 @@ void Assembler::emit_lea(Register dst, Operand src, int size) {
void Assembler::load_rax(Address value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (kSystemPointerSize == kInt64Size) {
- emit(0x48); // REX.W
- emit(0xA1);
- emitp(value, mode);
- } else {
- DCHECK_EQ(kSystemPointerSize, kInt32Size);
- emit(0xA1);
- emitp(value, mode);
- // In 64-bit mode, need to zero extend the operand to 8 bytes.
- // See 2.2.1.4 in Intel64 and IA32 Architectures Software
- // Developer's Manual Volume 2.
- emitl(0);
- }
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emit(Immediate64(value, mode));
}
-
void Assembler::load_rax(ExternalReference ref) {
load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
-
void Assembler::leave() {
EnsureSpace ensure_space(this);
emit(0xC9);
@@ -1783,55 +1781,35 @@ void Assembler::emit_mov(Operand dst, Immediate value, int size) {
emit(value);
}
-void Assembler::movp(Register dst, Address value, RelocInfo::Mode rmode) {
- if (constpool_.TryRecordEntry(value, rmode)) {
+void Assembler::emit_mov(Register dst, Immediate64 value, int size) {
+ DCHECK_EQ(size, kInt64Size);
+ if (constpool_.TryRecordEntry(value.value_, value.rmode_)) {
// Emit rip-relative move with offset = 0
Label label;
- emit_mov(dst, Operand(&label, 0), kSystemPointerSize);
+ emit_mov(dst, Operand(&label, 0), size);
bind(&label);
} else {
EnsureSpace ensure_space(this);
- emit_rex(dst, kSystemPointerSize);
+ emit_rex(dst, size);
emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
+ emit(value);
}
}
-void Assembler::movp_heap_number(Register dst, double value) {
+void Assembler::movq_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
- emit_rex(dst, kSystemPointerSize);
+ emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(value));
- emitp(0, RelocInfo::EMBEDDED_OBJECT);
+ emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
}
-void Assembler::movp_string(Register dst, const StringConstantBase* str) {
+void Assembler::movq_string(Register dst, const StringConstantBase* str) {
EnsureSpace ensure_space(this);
- emit_rex(dst, kSystemPointerSize);
+ emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(str));
- emitp(0, RelocInfo::EMBEDDED_OBJECT);
-}
-
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- if (constpool_.TryRecordEntry(value, rmode)) {
- // Emit rip-relative move with offset = 0
- Label label;
- emit_mov(dst, Operand(&label, 0), kInt64Size);
- bind(&label);
- } else {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, value);
- }
- emitq(value);
- }
-}
-
-void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
- movq(dst, static_cast<int64_t>(value), rmode);
+ emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
}
// Loads the ip-relative location of the src label into the target location
@@ -2311,22 +2289,11 @@ void Assembler::emit_xchg(Register dst, Operand src, int size) {
void Assembler::store_rax(Address dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (kSystemPointerSize == kInt64Size) {
- emit(0x48); // REX.W
- emit(0xA3);
- emitp(dst, mode);
- } else {
- DCHECK_EQ(kSystemPointerSize, kInt32Size);
- emit(0xA3);
- emitp(dst, mode);
- // In 64-bit mode, need to zero extend the operand to 8 bytes.
- // See 2.2.1.4 in Intel64 and IA32 Architectures Software
- // Developer's Manual Volume 2.
- emitl(0);
- }
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emit(Immediate64(dst, mode));
}
-
void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
@@ -4956,8 +4923,8 @@ void Assembler::dq(Label* label) {
EnsureSpace ensure_space(this);
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
- emitp(reinterpret_cast<Address>(buffer_start_) + label->pos(),
- RelocInfo::INTERNAL_REFERENCE);
+ emit(Immediate64(reinterpret_cast<Address>(buffer_start_) + label->pos(),
+ RelocInfo::INTERNAL_REFERENCE));
} else {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emitl(0); // Zero for the first 32bit marks it as 64bit absolute address.
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 4f16dc0fd3..a491bbfcee 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -51,6 +51,8 @@
namespace v8 {
namespace internal {
+class SafepointTableBuilder;
+
// Utility functions
enum Condition {
@@ -129,6 +131,21 @@ ASSERT_TRIVIALLY_COPYABLE(Immediate);
static_assert(sizeof(Immediate) <= kSystemPointerSize,
"Immediate must be small enough to pass it by value");
+class Immediate64 {
+ public:
+ explicit constexpr Immediate64(int64_t value) : value_(value) {}
+ explicit constexpr Immediate64(int64_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode) {}
+ explicit constexpr Immediate64(Address value, RelocInfo::Mode rmode)
+ : value_(static_cast<int64_t>(value)), rmode_(rmode) {}
+
+ private:
+ const int64_t value_;
+ const RelocInfo::Mode rmode_ = RelocInfo::NONE;
+
+ friend class Assembler;
+};
+
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -138,7 +155,7 @@ enum ScaleFactor : int8_t {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
+ times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
};
@@ -219,8 +236,7 @@ static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
V(xchg) \
V(xor)
-// Shift instructions on operands/registers with kSystemPointerSize, kInt32Size
-// and kInt64Size.
+// Shift instructions on operands/registers with kInt32Size and kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
@@ -268,7 +284,7 @@ class ConstPool {
bool AddSharedEntry(uint64_t data, int offset);
// Check if the instruction is a rip-relative move.
- bool IsMoveRipRelative(byte* instr);
+ bool IsMoveRipRelative(Address instr);
Assembler* assm_;
@@ -316,13 +332,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::unique_ptr<AssemblerBuffer> = {});
~Assembler() override = default;
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(Isolate* isolate, CodeDesc* desc);
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
void FinalizeJumpOptimizationInfo();
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
// Read/Modify the code target in the relative branch/call instruction at pc.
// On the x64 architecture, we use relative jumps with a 32-bit displacement
// to jump to other Code objects in the Code space in the heap.
@@ -400,16 +426,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
- STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
- kSystemPointerSize == kInt32Size);
-
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
- void instruction##p(P1 p1) { \
- emit_##instruction(p1, kSystemPointerSize); \
- } \
- \
- template <class P1> \
void instruction##_tagged(P1 p1) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
@@ -427,11 +445,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
\
template <class P1, class P2> \
- void instruction##p(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kSystemPointerSize); \
- } \
- \
- template <class P1, class P2> \
void instruction##_tagged(P1 p1, P2 p2) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
@@ -450,19 +463,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
\
template <class P1, class P2, class P3> \
- void instruction##p(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kSystemPointerSize); \
- } \
- \
- template <class P1, class P2, class P3> \
- void instruction##_tagged(P1 p1, P2 p2, P3 p3) { \
- STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
- /* TODO(ishell): change to kTaggedSize */ \
- emit_##instruction(p1, p2, p3, \
- COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
- } \
- \
- template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
@@ -518,9 +518,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// position (after the move) to the destination.
void movl(Operand dst, Label* src);
- // Loads a pointer into a register with a relocation mode.
- void movp(Register dst, Address ptr, RelocInfo::Mode rmode);
-
// Load a heap number into a register.
// The heap number will not be allocated and embedded into the code right
// away. Instead, we emit the load of a dummy object. Later, when calling
@@ -528,15 +525,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// patched by replacing the dummy with the actual object. The RelocInfo for
// the embedded object gets already recorded correctly when emitting the dummy
// move.
- void movp_heap_number(Register dst, double value);
+ void movq_heap_number(Register dst, double value);
- void movp_string(Register dst, const StringConstantBase* str);
+ void movq_string(Register dst, const StringConstantBase* str);
// Loads a 64-bit immediate into a register.
- void movq(Register dst, int64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE);
- void movq(Register dst, uint64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
+ void movq(Register dst, uint64_t value) {
+ movq(dst, Immediate64(static_cast<int64_t>(value)));
+ }
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, Operand src);
@@ -553,7 +550,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void repmovsb();
void repmovsw();
- void repmovsp() { emit_repmovs(kSystemPointerSize); }
void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
@@ -633,10 +629,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulq(Register src);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
- void instruction##p(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kSystemPointerSize); \
- } \
- \
void instruction##l(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
@@ -645,10 +637,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
- void instruction##p(Operand dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kSystemPointerSize); \
- } \
- \
void instruction##l(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
@@ -657,18 +645,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
- void instruction##p_cl(Register dst) { \
- shift(dst, subcode, kSystemPointerSize); \
- } \
- \
void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
\
- void instruction##p_cl(Operand dst) { \
- shift(dst, subcode, kSystemPointerSize); \
- } \
- \
void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
@@ -1120,12 +1100,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
- SSE_CMP_P(cmpeq, 0x0);
- SSE_CMP_P(cmplt, 0x1);
- SSE_CMP_P(cmple, 0x2);
- SSE_CMP_P(cmpneq, 0x4);
- SSE_CMP_P(cmpnlt, 0x5);
- SSE_CMP_P(cmpnle, 0x6);
+ SSE_CMP_P(cmpeq, 0x0)
+ SSE_CMP_P(cmplt, 0x1)
+ SSE_CMP_P(cmple, 0x2)
+ SSE_CMP_P(cmpneq, 0x4)
+ SSE_CMP_P(cmpnlt, 0x5)
+ SSE_CMP_P(cmpnle, 0x6)
#undef SSE_CMP_P
@@ -1336,18 +1316,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
impl(opcode, dst, src1, src2); \
}
- AVX_SP_3(vsqrt, 0x51);
- AVX_SP_3(vadd, 0x58);
- AVX_SP_3(vsub, 0x5c);
- AVX_SP_3(vmul, 0x59);
- AVX_SP_3(vdiv, 0x5e);
- AVX_SP_3(vmin, 0x5d);
- AVX_SP_3(vmax, 0x5f);
- AVX_P_3(vand, 0x54);
- AVX_P_3(vor, 0x56);
- AVX_P_3(vxor, 0x57);
- AVX_3(vcvtsd2ss, 0x5a, vsd);
- AVX_3(vhaddps, 0x7c, vsd);
+ AVX_SP_3(vsqrt, 0x51)
+ AVX_SP_3(vadd, 0x58)
+ AVX_SP_3(vsub, 0x5c)
+ AVX_SP_3(vmul, 0x59)
+ AVX_SP_3(vdiv, 0x5e)
+ AVX_SP_3(vmin, 0x5d)
+ AVX_SP_3(vmax, 0x5f)
+ AVX_P_3(vand, 0x54)
+ AVX_P_3(vor, 0x56)
+ AVX_P_3(vxor, 0x57)
+ AVX_3(vcvtsd2ss, 0x5a, vsd)
+ AVX_3(vhaddps, 0x7c, vsd)
#undef AVX_3
#undef AVX_S_3
@@ -1512,12 +1492,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vcmppd(dst, src1, src2, imm8); \
}
- AVX_CMP_P(vcmpeq, 0x0);
- AVX_CMP_P(vcmplt, 0x1);
- AVX_CMP_P(vcmple, 0x2);
- AVX_CMP_P(vcmpneq, 0x4);
- AVX_CMP_P(vcmpnlt, 0x5);
- AVX_CMP_P(vcmpnle, 0x6);
+ AVX_CMP_P(vcmpeq, 0x0)
+ AVX_CMP_P(vcmplt, 0x1)
+ AVX_CMP_P(vcmple, 0x2)
+ AVX_CMP_P(vcmpneq, 0x4)
+ AVX_CMP_P(vcmpnlt, 0x5)
+ AVX_CMP_P(vcmpnle, 0x6)
#undef AVX_CMP_P
@@ -1806,12 +1786,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void call(Operand operand);
private:
- byte* addr_at(int pos) { return buffer_start_ + pos; }
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ Address addr_at(int pos) {
+ return reinterpret_cast<Address>(buffer_start_ + pos);
+ }
+ uint32_t long_at(int pos) {
+ return ReadUnalignedValue<uint32_t>(addr_at(pos));
}
void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ WriteUnalignedValue(addr_at(pos), x);
}
// code emission
@@ -1819,11 +1801,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
- inline void emitp(Address x, RelocInfo::Mode rmode);
inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
inline void emit(Immediate x);
+ inline void emit(Immediate64 x);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
@@ -2135,6 +2117,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_mov(Operand dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(Operand dst, Immediate value, int size);
+ void emit_mov(Register dst, Immediate64 value, int size);
void emit_movzxb(Register dst, Operand src, int size);
void emit_movzxb(Register dst, Register src, int size);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index a600c329ce..f49a131a26 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ subp(rsp, Immediate(kDoubleRegsSize));
+ __ subq(rsp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
- __ subp(rsp, Immediate(kFloatRegsSize));
+ __ subq(rsp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
@@ -51,8 +51,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pushq(r);
}
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kRegisterSize + kDoubleRegsSize + kFloatRegsSize;
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
+ kDoubleRegsSize + kFloatRegsSize;
__ Store(
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
@@ -64,36 +64,36 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Register arg5 = r11;
// The bailout id is passed using r13 on the stack.
- __ movp(arg_reg_3, r13);
+ __ movq(arg_reg_3, r13);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
- __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
+ __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
+ __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
- __ subp(arg5, rbp);
- __ negp(arg5);
+ __ subq(arg5, rbp);
+ __ negq(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
- __ movp(rax, Immediate(0));
+ __ movq(rax, Immediate(0));
Label context_check;
- __ movp(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(rdi, &context_check);
- __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ movp(arg_reg_1, rax);
+ __ movq(arg_reg_1, rax);
__ Set(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kRegisterSize), arg5);
+ __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
- __ movq(Operand(rsp, 5 * kRegisterSize), arg5);
+ __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
#else
- __ movp(r8, arg5);
+ __ movq(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate));
#endif
@@ -103,11 +103,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
- __ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
- int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
}
@@ -119,7 +120,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movl(rcx, Operand(rsp, src_offset));
__ movl(Operand(rbx, dst_offset), rcx);
}
- __ addp(rsp, Immediate(kFloatRegsSize));
+ __ addq(rsp, Immediate(kFloatRegsSize));
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
@@ -129,31 +130,31 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Remove the return address from the stack.
- __ addp(rsp, Immediate(kPCOnStackSize));
+ __ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
- __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addp(rcx, rsp);
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
- __ addp(rdx, Immediate(sizeof(intptr_t)));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpp(rcx, rsp);
+ __ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ pushq(rax);
__ PrepareCallCFunction(2);
- __ movp(arg_reg_1, rax);
+ __ movq(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -161,7 +162,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
__ popq(rax);
- __ movp(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
+ __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -169,23 +170,23 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
- __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movp(rbx, Operand(rax, 0));
- __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testp(rcx, rcx);
+ __ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addp(rax, Immediate(kSystemPointerSize));
+ __ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
- __ cmpp(rax, rdx);
+ __ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -201,7 +202,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PushQuad(Operand(rbx, offset));
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 1282fd8d7e..fec7619ee1 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -12,6 +12,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/v8-fallthrough.h"
#include "src/disasm.h"
+#include "src/utils.h"
#include "src/x64/register-x64.h"
#include "src/x64/sse-instr.h"
@@ -251,7 +252,7 @@ void InstructionTable::AddJumpConditionalShort() {
}
namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(InstructionTable, GetInstructionTable);
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(InstructionTable, GetInstructionTable)
}
static const InstructionDesc cmov_instructions[16] = {
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index 21f51a096d..2b75c90677 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -24,8 +24,9 @@ class EntryFrameConstants : public AllStatic {
// On x64, there are 7 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp, plus we manually allocate kXMMRegistersBlockSize
// bytes on the stack.
- static constexpr int kCallerFPOffset =
- -3 * kSystemPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize +
+ -7 * kSystemPointerSize -
+ kXMMRegistersBlockSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgcOffset = 6 * kSystemPointerSize;
@@ -36,7 +37,7 @@ class EntryFrameConstants : public AllStatic {
// On x64, there are 5 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp.
static constexpr int kCallerFPOffset =
- -3 * kSystemPointerSize + -5 * kRegisterSize;
+ -3 * kSystemPointerSize + -5 * kSystemPointerSize;
#endif
};
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 3e14f1e407..c6b2ce74e3 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -101,6 +101,14 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdx: the function template info
+ // rcx: number of arguments (on the stack, not including receiver)
+ Register registers[] = {rdx, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -205,9 +213,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // kTargetContext
- rdx, // kApiFunctionAddress
- rcx, // kArgc
+ rdx, // api function address
+ rcx, // argument count (not including receiver)
+ rbx, // call data
+ rdi, // holder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 1955e80f79..0a60d5e557 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -15,6 +15,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/globals.h"
+#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/smi.h"
@@ -49,9 +50,9 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
kSystemPointerSize);
} else {
// argument[0] is at base_reg_ + displacement_to_last_argument +
- // argument_count_reg_ * times_pointer_size + (receiver - 1) *
+ // argument_count_reg_ * times_system_pointer_size + (receiver - 1) *
// kSystemPointerSize.
- return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ return Operand(base_reg_, argument_count_reg_, times_system_pointer_size,
displacement_to_last_argument +
(receiver - 1 - index) * kSystemPointerSize);
}
@@ -74,7 +75,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
if (is_int32(delta)) {
- movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -82,7 +83,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
if (destination == rax && !options().isolate_independent_code) {
load_rax(source);
} else {
- movp(destination, ExternalReferenceAsOperand(source));
+ movq(destination, ExternalReferenceAsOperand(source));
}
}
@@ -92,7 +93,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
intptr_t delta =
RootRegisterOffsetForExternalReference(isolate(), destination);
if (is_int32(delta)) {
- movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
@@ -100,7 +101,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
if (source == rax && !options().isolate_independent_code) {
store_rax(destination);
} else {
- movp(ExternalReferenceAsOperand(destination), source);
+ movq(ExternalReferenceAsOperand(destination), source);
}
}
@@ -119,12 +120,12 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
if (offset == 0) {
Move(destination, kRootRegister);
} else {
- leap(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
+ leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
}
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
- movp(destination, Operand(kRootRegister, offset));
+ movq(destination, Operand(kRootRegister, offset));
}
void TurboAssembler::LoadAddress(Register destination,
@@ -132,7 +133,7 @@ void TurboAssembler::LoadAddress(Register destination,
if (root_array_available_ && options().enable_root_array_delta_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
if (is_int32(delta)) {
- leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -165,7 +166,7 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
return Operand(kRootRegister, static_cast<int32_t>(offset));
} else {
// Otherwise, do a memory load from the external reference table.
- movp(scratch, Operand(kRootRegister,
+ movq(scratch, Operand(kRootRegister,
RootRegisterOffsetForExternalReferenceTableEntry(
isolate(), reference)));
return Operand(scratch, 0);
@@ -182,7 +183,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
DCHECK(root_array_available_);
- movp(destination,
+ movq(destination,
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
@@ -199,7 +200,7 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
} else {
// Some smi roots contain system pointer size values like stack limits.
- cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ cmpq(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
}
@@ -212,38 +213,34 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
cmp_tagged(with, kScratchRegister);
} else {
// Some smi roots contain system pointer size values like stack limits.
- cmpp(with, kScratchRegister);
+ cmpq(with, kScratchRegister);
}
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
- Operand field_operand,
- Register scratch_for_debug) {
+ Operand field_operand) {
#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand, scratch_for_debug);
+ DecompressTaggedPointer(destination, field_operand);
#else
- movp(destination, field_operand);
+ mov_tagged(destination, field_operand);
#endif
}
void TurboAssembler::LoadAnyTaggedField(Register destination,
- Operand field_operand, Register scratch,
- Register scratch_for_debug) {
+ Operand field_operand,
+ Register scratch) {
#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand, scratch, scratch_for_debug);
+ DecompressAnyTagged(destination, field_operand, scratch);
#else
- movp(destination, field_operand);
+ mov_tagged(destination, field_operand);
#endif
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand,
- Register scratch,
- Register scratch_for_debug) {
+ Register scratch) {
#ifdef V8_COMPRESS_POINTERS
- DCHECK(!AreAliased(scratch, scratch_for_debug));
DCHECK(!field_operand.AddressUsesRegister(scratch));
- DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
- DecompressTaggedPointer(scratch, field_operand, scratch_for_debug);
+ DecompressTaggedPointer(scratch, field_operand);
Push(scratch);
#else
Push(field_operand);
@@ -251,14 +248,12 @@ void TurboAssembler::PushTaggedPointerField(Operand field_operand,
}
void TurboAssembler::PushTaggedAnyField(Operand field_operand,
- Register scratch1, Register scratch2,
- Register scratch_for_debug) {
+ Register scratch1, Register scratch2) {
#ifdef V8_COMPRESS_POINTERS
- DCHECK(!AreAliased(scratch1, scratch2, scratch_for_debug));
+ DCHECK(!AreAliased(scratch1, scratch2));
DCHECK(!field_operand.AddressUsesRegister(scratch1));
DCHECK(!field_operand.AddressUsesRegister(scratch2));
- DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
- DecompressAnyTagged(scratch1, field_operand, scratch2, scratch_for_debug);
+ DecompressAnyTagged(scratch1, field_operand, scratch2);
Push(scratch1);
#else
Push(field_operand);
@@ -271,71 +266,49 @@ void TurboAssembler::SmiUntagField(Register dst, Operand src) {
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Immediate value) {
- movp(dst_field_operand, value);
+#ifdef V8_COMPRESS_POINTERS
+ RecordComment("[ StoreTagged");
+ movl(dst_field_operand, value);
+ movl(Operand(dst_field_operand, 4), Immediate(0));
+ RecordComment("]");
+#else
+ movq(dst_field_operand, value);
+#endif
}
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Register value) {
- movp(dst_field_operand, value);
+#ifdef V8_COMPRESS_POINTERS
+ RecordComment("[ StoreTagged");
+ movl(dst_field_operand, value);
+ movl(Operand(dst_field_operand, 4), Immediate(0));
+ RecordComment("]");
+#else
+ movq(dst_field_operand, value);
+#endif
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
- Operand field_operand,
- Register scratch_for_debug) {
- DCHECK(!AreAliased(destination, scratch_for_debug));
+ Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
- if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
- Register expected_value = scratch_for_debug;
- movq(expected_value, field_operand);
- movsxlq(destination, expected_value);
- Label check_passed;
- cmpq(destination, expected_value);
- j(equal, &check_passed);
- RecordComment("DecompressTaggedSigned failed");
- int3();
- bind(&check_passed);
- } else {
- movsxlq(destination, field_operand);
- }
+ movsxlq(destination, field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
- Operand field_operand,
- Register scratch_for_debug) {
- DCHECK(!AreAliased(destination, scratch_for_debug));
+ Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
- if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
- Register expected_value = scratch_for_debug;
- movq(expected_value, field_operand);
- movsxlq(destination, expected_value);
- addq(destination, kRootRegister);
- Label check_passed;
- cmpq(destination, expected_value);
- j(equal, &check_passed);
- RecordComment("DecompressTaggedPointer failed");
- int3();
- bind(&check_passed);
- } else {
- movsxlq(destination, field_operand);
- addq(destination, kRootRegister);
- }
+ movsxlq(destination, field_operand);
+ addq(destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand,
- Register scratch,
- Register scratch_for_debug) {
- DCHECK(!AreAliased(destination, scratch, scratch_for_debug));
+ Register scratch) {
+ DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
- Register expected_value = scratch_for_debug;
- if (DEBUG_BOOL && expected_value.is_valid()) {
- movq(expected_value, field_operand);
- movsxlq(destination, expected_value);
- } else {
- movsxlq(destination, field_operand);
- }
+ movsxlq(destination, field_operand);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
@@ -347,14 +320,6 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
// Now this add operation will either leave the value unchanged if it is a smi
// or add the isolate root if it is a heap object.
addq(destination, masked_root);
- if (DEBUG_BOOL && expected_value.is_valid()) {
- Label check_passed;
- cmpq(destination, expected_value);
- j(equal, &check_passed);
- RecordComment("Decompression failed: Tagged");
- int3();
- bind(&check_passed);
- }
RecordComment("]");
}
@@ -376,7 +341,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so the offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- leap(dst, FieldOperand(object, offset));
+ leaq(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate(kTaggedSize - 1));
@@ -571,7 +536,7 @@ void TurboAssembler::CheckStackAlignment() {
if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
- testp(rsp, Immediate(frame_alignment_mask));
+ testq(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -719,7 +684,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
int delta = kDoubleSize * XMMRegister::kNumRegisters;
- subp(rsp, Immediate(delta));
+ subq(rsp, Immediate(delta));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -739,7 +704,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Movsd(reg, Operand(rsp, i * kDoubleSize));
}
int delta = kDoubleSize * XMMRegister::kNumRegisters;
- addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
bytes += delta;
}
@@ -1078,41 +1043,6 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, success);
}
-void MacroAssembler::Load(Register dst, Operand src, Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8()) {
- movsxbq(dst, src);
- } else if (r.IsUInteger8()) {
- movzxbl(dst, src);
- } else if (r.IsInteger16()) {
- movsxwq(dst, src);
- } else if (r.IsUInteger16()) {
- movzxwl(dst, src);
- } else if (r.IsInteger32()) {
- movl(dst, src);
- } else {
- movp(dst, src);
- }
-}
-
-void MacroAssembler::Store(Operand dst, Register src, Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8() || r.IsUInteger8()) {
- movb(dst, src);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- movw(dst, src);
- } else if (r.IsInteger32()) {
- movl(dst, src);
- } else {
- if (r.IsHeapObject()) {
- AssertNotSmi(src);
- } else if (r.IsSmi()) {
- AssertSmi(src);
- }
- movp(dst, src);
- }
-}
-
void TurboAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -1126,15 +1056,11 @@ void TurboAssembler::Set(Register dst, int64_t x) {
}
void TurboAssembler::Set(Operand dst, intptr_t x) {
- if (kSystemPointerSize == kInt64Size) {
- if (is_int32(x)) {
- movp(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movp(dst, kScratchRegister);
- }
+ if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movp(dst, Immediate(static_cast<int32_t>(x)));
+ Set(kScratchRegister, x);
+ movq(dst, kScratchRegister);
}
}
@@ -1170,25 +1096,25 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
return;
}
}
- movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
+ movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
- movp(dst, src);
+ movq(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- shlp(dst, Immediate(kSmiShift));
+ shlq(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
- movp(dst, src);
+ movq(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- sarp(dst, Immediate(kSmiShift));
+ sarq(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Operand src) {
@@ -1198,15 +1124,15 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
- movp(dst, src);
- sarp(dst, Immediate(kSmiShift));
+ movq(dst, src);
+ sarq(dst, Immediate(kSmiShift));
}
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpp(smi1, smi2);
+ cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi src) {
@@ -1292,15 +1218,15 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
Immediate(constant->value()));
} else {
DCHECK(SmiValuesAre31Bits());
- if (kSystemPointerSize == kInt64Size) {
+ if (kTaggedSize == kInt64Size) {
// Sign-extend value after addition
movl(kScratchRegister, dst);
addl(kScratchRegister, Immediate(constant));
movsxlq(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else {
- DCHECK_EQ(kSmiShiftSize, 32);
- addp(dst, Immediate(constant));
+ DCHECK_EQ(kTaggedSize, kInt32Size);
+ addl(dst, Immediate(constant));
}
}
}
@@ -1314,12 +1240,12 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
// There is a possible optimization if shift is in the range 60-63, but that
// will (and must) never happen.
if (dst != src) {
- movp(dst, src);
+ movq(dst, src);
}
if (shift < kSmiShift) {
- sarp(dst, Immediate(kSmiShift - shift));
+ sarq(dst, Immediate(kSmiShift - shift));
} else {
- shlp(dst, Immediate(shift - kSmiShift));
+ shlq(dst, Immediate(shift - kSmiShift));
}
return SmiIndex(dst, times_1);
} else {
@@ -1350,7 +1276,7 @@ void TurboAssembler::Push(Smi source) {
}
int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
- if (first_byte_set == last_byte_set && kSystemPointerSize == kInt64Size) {
+ if (first_byte_set == last_byte_set) {
// This sequence has only 7 bytes, compared to the 12 bytes below.
Push(Immediate(0));
movb(Operand(rsp, first_byte_set),
@@ -1365,7 +1291,7 @@ void TurboAssembler::Push(Smi source) {
void TurboAssembler::Move(Register dst, Register src) {
if (dst != src) {
- movp(dst, src);
+ movq(dst, src);
}
}
@@ -1374,7 +1300,7 @@ void TurboAssembler::MoveNumber(Register dst, double value) {
if (DoubleToSmiInteger(value, &smi)) {
Move(dst, Smi::FromInt(smi));
} else {
- movp_heap_number(dst, value);
+ movq_heap_number(dst, value);
}
}
@@ -1464,6 +1390,18 @@ void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
}
}
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range,
+ Label::Distance near_jump) {
+ if (lower_limit != 0) {
+ leal(kScratchRegister, Operand(value, 0u - lower_limit));
+ cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
+ } else {
+ cmpl(value, Immediate(higher_limit));
+ }
+ j(below_equal, on_in_range, near_jump);
+}
+
void TurboAssembler::Push(Handle<HeapObject> source) {
Move(kScratchRegister, source);
Push(kScratchRegister);
@@ -1477,24 +1415,24 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
return;
}
}
- movp(result, object.address(), rmode);
+ movq(result, Immediate64(object.address(), rmode));
}
void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
Move(kScratchRegister, object, rmode);
- movp(dst, kScratchRegister);
+ movq(dst, kScratchRegister);
}
void TurboAssembler::MoveStringConstant(Register result,
const StringConstantBase* string,
RelocInfo::Mode rmode) {
- movp_string(result, string);
+ movq_string(result, string);
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addp(rsp, Immediate(stack_elements * kSystemPointerSize));
+ addq(rsp, Immediate(stack_elements * kSystemPointerSize));
}
}
@@ -1502,7 +1440,7 @@ void MacroAssembler::Drop(int stack_elements) {
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
DCHECK_GT(stack_elements, 0);
- if (kSystemPointerSize == kInt64Size && stack_elements == 1) {
+ if (stack_elements == 1) {
popq(MemOperand(rsp, 0));
return;
}
@@ -1512,105 +1450,28 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
PushReturnAddressFrom(scratch);
}
-void TurboAssembler::Push(Register src) {
- if (kSystemPointerSize == kInt64Size) {
- pushq(src);
- } else {
- // x32 uses 64-bit push for rbp in the prologue.
- DCHECK(src.code() != rbp.code());
- leal(rsp, Operand(rsp, -4));
- movp(Operand(rsp, 0), src);
- }
-}
+void TurboAssembler::Push(Register src) { pushq(src); }
-void TurboAssembler::Push(Operand src) {
- if (kSystemPointerSize == kInt64Size) {
- pushq(src);
- } else {
- movp(kScratchRegister, src);
- leal(rsp, Operand(rsp, -4));
- movp(Operand(rsp, 0), kScratchRegister);
- }
-}
+void TurboAssembler::Push(Operand src) { pushq(src); }
-void MacroAssembler::PushQuad(Operand src) {
- if (kSystemPointerSize == kInt64Size) {
- pushq(src);
- } else {
- movp(kScratchRegister, src);
- pushq(kScratchRegister);
- }
-}
+void MacroAssembler::PushQuad(Operand src) { pushq(src); }
-void TurboAssembler::Push(Immediate value) {
- if (kSystemPointerSize == kInt64Size) {
- pushq(value);
- } else {
- leal(rsp, Operand(rsp, -4));
- movp(Operand(rsp, 0), value);
- }
-}
+void TurboAssembler::Push(Immediate value) { pushq(value); }
+void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
-void MacroAssembler::PushImm32(int32_t imm32) {
- if (kSystemPointerSize == kInt64Size) {
- pushq_imm32(imm32);
- } else {
- leal(rsp, Operand(rsp, -4));
- movp(Operand(rsp, 0), Immediate(imm32));
- }
-}
-
-
-void MacroAssembler::Pop(Register dst) {
- if (kSystemPointerSize == kInt64Size) {
- popq(dst);
- } else {
- // x32 uses 64-bit pop for rbp in the epilogue.
- DCHECK(dst.code() != rbp.code());
- movp(dst, Operand(rsp, 0));
- leal(rsp, Operand(rsp, 4));
- }
-}
+void MacroAssembler::Pop(Register dst) { popq(dst); }
-void MacroAssembler::Pop(Operand dst) {
- if (kSystemPointerSize == kInt64Size) {
- popq(dst);
- } else {
- Register scratch = dst.AddressUsesRegister(kScratchRegister)
- ? kRootRegister : kScratchRegister;
- movp(scratch, Operand(rsp, 0));
- movp(dst, scratch);
- leal(rsp, Operand(rsp, 4));
- if (scratch == kRootRegister) {
- // Restore kRootRegister.
- InitializeRootRegister();
- }
- }
-}
+void MacroAssembler::Pop(Operand dst) { popq(dst); }
-void MacroAssembler::PopQuad(Operand dst) {
- if (kSystemPointerSize == kInt64Size) {
- popq(dst);
- } else {
- popq(kScratchRegister);
- movp(dst, kScratchRegister);
- }
-}
+void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
void TurboAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
}
-void TurboAssembler::Jump(Operand op) {
- if (kSystemPointerSize == kInt64Size) {
- jmp(op);
- } else {
- movp(kScratchRegister, op);
- jmp(kScratchRegister);
- }
-}
+void TurboAssembler::Jump(Operand op) { jmp(op); }
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
@@ -1655,10 +1516,10 @@ void TurboAssembler::Call(ExternalReference ext) {
}
void TurboAssembler::Call(Operand op) {
- if (kSystemPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
+ if (!CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
- movp(kScratchRegister, op);
+ movq(kScratchRegister, op);
call(kScratchRegister);
}
}
@@ -1691,7 +1552,6 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
@@ -1701,8 +1561,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
// of times_8 since smis are already shifted by one).
Call(Operand(kRootRegister, builtin_pointer, times_4,
IsolateData::builtin_entry_table_offset()));
-#else // V8_COMPRESS_POINTERS
- STATIC_ASSERT(kSystemPointerSize == 8);
+#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
@@ -1739,20 +1598,21 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
- addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_builtin);
movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
- movp(destination, Operand(kRootRegister, destination, times_pointer_size,
- IsolateData::builtin_entry_table_offset()));
+ movq(destination,
+ Operand(kRootRegister, destination, times_system_pointer_size,
+ IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Move(destination, code_object);
- addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
}
@@ -2017,7 +1877,7 @@ void MacroAssembler::Pushad() {
// Use lea for symmetry with Popad.
int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
kSystemPointerSize;
- leap(rsp, Operand(rsp, -sp_delta));
+ leaq(rsp, Operand(rsp, -sp_delta));
}
@@ -2025,7 +1885,7 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
kSystemPointerSize;
- leap(rsp, Operand(rsp, sp_delta));
+ leaq(rsp, Operand(rsp, sp_delta));
Pop(r15);
Pop(r14);
Pop(r12);
@@ -2076,7 +1936,7 @@ void MacroAssembler::PushStackHandler() {
Push(ExternalReferenceAsOperand(handler_address));
// Set this new handler as the current one.
- movp(ExternalReferenceAsOperand(handler_address), rsp);
+ movq(ExternalReferenceAsOperand(handler_address), rsp);
}
@@ -2085,7 +1945,7 @@ void MacroAssembler::PopStackHandler() {
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
Pop(ExternalReferenceAsOperand(handler_address));
- addp(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
+ addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void TurboAssembler::Ret() { ret(0); }
@@ -2095,7 +1955,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addp(rsp, Immediate(bytes_dropped));
+ addq(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -2237,7 +2097,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
j(equal, target_if_cleared);
- andp(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
+ andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
@@ -2272,7 +2132,7 @@ void MacroAssembler::MaybeDropFrames() {
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
Load(rbx, restart_fp);
- testp(rbx, rbx);
+ testq(rbx, rbx);
Label dont_drop;
j(zero, &dont_drop, Label::kNear);
@@ -2297,18 +2157,19 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// after we drop current frame.
Register new_sp_reg = scratch0;
if (callee_args_count.is_reg()) {
- subp(caller_args_count_reg, callee_args_count.reg());
- leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset));
+ subq(caller_args_count_reg, callee_args_count.reg());
+ leaq(new_sp_reg,
+ Operand(rbp, caller_args_count_reg, times_system_pointer_size,
+ StandardFrameConstants::kCallerPCOffset));
} else {
- leap(new_sp_reg,
- Operand(rbp, caller_args_count_reg, times_pointer_size,
+ leaq(new_sp_reg,
+ Operand(rbp, caller_args_count_reg, times_system_pointer_size,
StandardFrameConstants::kCallerPCOffset -
callee_args_count.immediate() * kSystemPointerSize));
}
if (FLAG_debug_code) {
- cmpp(rsp, new_sp_reg);
+ cmpq(rsp, new_sp_reg);
Check(below, AbortReason::kStackAccessBelowStackPointer);
}
@@ -2316,19 +2177,19 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// to avoid its trashing and let the following loop copy it to the right
// place.
Register tmp_reg = scratch1;
- movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- movp(Operand(rsp, 0), tmp_reg);
+ movq(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ movq(Operand(rsp, 0), tmp_reg);
// Restore caller's frame pointer now as it could be overwritten by
// the copying loop.
- movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ movq(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// +2 here is to copy both receiver and return address.
Register count_reg = caller_args_count_reg;
if (callee_args_count.is_reg()) {
- leap(count_reg, Operand(callee_args_count.reg(), 2));
+ leaq(count_reg, Operand(callee_args_count.reg(), 2));
} else {
- movp(count_reg, Immediate(callee_args_count.immediate() + 2));
+ movq(count_reg, Immediate(callee_args_count.immediate() + 2));
// TODO(ishell): Unroll copying loop for small immediate values.
}
@@ -2337,15 +2198,15 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Label loop, entry;
jmp(&entry, Label::kNear);
bind(&loop);
- decp(count_reg);
- movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
- movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+ decq(count_reg);
+ movq(tmp_reg, Operand(rsp, count_reg, times_system_pointer_size, 0));
+ movq(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
bind(&entry);
- cmpp(count_reg, Immediate(0));
+ cmpq(count_reg, Immediate(0));
j(not_equal, &loop, Label::kNear);
// Leave current frame.
- movp(rsp, new_sp_reg);
+ movq(rsp, new_sp_reg);
}
void MacroAssembler::InvokeFunction(Register function, Register new_target,
@@ -2440,13 +2301,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// case when we invoke function values without going through the
// IC mechanism.
Set(rax, actual.immediate());
- cmpp(expected.reg(), Immediate(actual.immediate()));
+ cmpq(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
DCHECK(expected.reg() == rbx);
} else if (expected.reg() != actual.reg()) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpp(expected.reg(), actual.reg());
+ cmpq(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
DCHECK(actual.reg() == rax);
DCHECK(expected.reg() == rbx);
@@ -2518,30 +2379,30 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void TurboAssembler::StubPrologue(StackFrame::Type type) {
pushq(rbp); // Caller's frame pointer.
- movp(rbp, rsp);
+ movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
pushq(rbp); // Caller's frame pointer.
- movp(rbp, rsp);
+ movq(rbp, rsp);
Push(rsi); // Callee's context.
Push(rdi); // Callee's JS function.
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
- movp(rbp, rsp);
+ movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
- cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
- movp(rsp, rbp);
+ movq(rsp, rbp);
popq(rbp);
}
@@ -2557,7 +2418,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
pushq(rbp);
- movp(rbp, rsp);
+ movq(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
Push(Immediate(StackFrame::TypeToMarker(frame_type)));
@@ -2568,7 +2429,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
// Save the frame pointer and the context in top.
if (save_rax) {
- movp(r14, rax); // Backup rax in callee-save register.
+ movq(r14, rax); // Backup rax in callee-save register.
}
Store(
@@ -2591,8 +2452,8 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kNumRegisters * kDoubleSize +
- arg_stack_space * kRegisterSize;
- subp(rsp, Immediate(space));
+ arg_stack_space * kSystemPointerSize;
+ subq(rsp, Immediate(space));
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -2601,7 +2462,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subp(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subq(rsp, Immediate(arg_stack_space * kSystemPointerSize));
}
// Get the required frame alignment for the OS.
@@ -2609,11 +2470,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
DCHECK(is_int8(kFrameAlignment));
- andp(rsp, Immediate(-kFrameAlignment));
+ andq(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
- movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
@@ -2623,7 +2484,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
- leap(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leaq(r15, Operand(rbp, r14, times_system_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -2650,12 +2511,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
if (pop_arguments) {
// Get the return address from the stack and restore the frame pointer.
- movp(rcx, Operand(rbp, kFPOnStackSize));
- movp(rbp, Operand(rbp, 0 * kSystemPointerSize));
+ movq(rcx, Operand(rbp, kFPOnStackSize));
+ movq(rbp, Operand(rbp, 0 * kSystemPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- leap(rsp, Operand(r15, 1 * kSystemPointerSize));
+ leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
PushReturnAddressFrom(rcx);
} else {
@@ -2667,7 +2528,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
}
void MacroAssembler::LeaveApiExitFrame() {
- movp(rsp, rbp);
+ movq(rsp, rbp);
popq(rbp);
LeaveExitFrameEpilogue();
@@ -2678,16 +2539,16 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
Operand context_operand = ExternalReferenceAsOperand(context_address);
- movp(rsi, context_operand);
+ movq(rsi, context_operand);
#ifdef DEBUG
- movp(context_operand, Immediate(Context::kInvalidContext));
+ movq(context_operand, Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
- movp(c_entry_fp_operand, Immediate(0));
+ movq(c_entry_fp_operand, Immediate(0));
}
@@ -2728,13 +2589,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
DCHECK_GE(num_arguments, 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movp(kScratchRegister, rsp);
+ movq(kScratchRegister, rsp);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- andp(rsp, Immediate(-frame_alignment));
- movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
+ subq(rsp, Immediate((argument_slots_on_stack + 1) * kSystemPointerSize));
+ andq(rsp, Immediate(-frame_alignment));
+ movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
+ kScratchRegister);
}
void TurboAssembler::CallCFunction(ExternalReference function,
@@ -2758,10 +2620,10 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK(!AreAliased(kScratchRegister, function));
leaq(kScratchRegister, Operand(&get_pc, 0));
bind(&get_pc);
- movp(ExternalReferenceAsOperand(
+ movq(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_pc_address(isolate())),
kScratchRegister);
- movp(ExternalReferenceAsOperand(
+ movq(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_fp_address(isolate())),
rbp);
}
@@ -2770,7 +2632,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
if (isolate() != nullptr) {
// We don't unset the PC; the FP is the source of truth.
- movp(ExternalReferenceAsOperand(
+ movq(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_fp_address(isolate())),
Immediate(0));
}
@@ -2779,7 +2641,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_GE(num_arguments, 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
+ movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
}
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
@@ -2787,10 +2649,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
- andp(scratch, Immediate(~kPageAlignmentMask));
+ andq(scratch, Immediate(~kPageAlignmentMask));
} else {
- movp(scratch, Immediate(~kPageAlignmentMask));
- andp(scratch, object);
+ movq(scratch, Immediate(~kPageAlignmentMask));
+ andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -2817,7 +2679,7 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
NoRootArrayScope no_root_array(this);
// Save the deopt id in r13 (we don't need the roots array from now on).
- movp(r13, Immediate(deopt_id));
+ movq(r13, Immediate(deopt_id));
call(target, RelocInfo::RUNTIME_ENTRY);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index cfd040a5c3..36a9475ef5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -189,7 +189,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRoot(Register destination, RootIndex index) override;
void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
- movp(destination, kScratchRegister);
+ movq(destination, kScratchRegister);
}
void Push(Register src);
@@ -295,7 +295,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Operand dst, Smi source) {
Register constant = GetSmiConstant(source);
- movp(dst, constant);
+ movq(dst, constant);
}
void Move(Register dst, ExternalReference ext);
@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
- movp(dst, ptr, rmode);
+ movq(dst, Immediate64(ptr, rmode));
}
void MoveStringConstant(Register result, const StringConstantBase* string,
@@ -475,47 +475,40 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression support
- // TODO(ishell): remove |scratch_for_debug| once pointer compression works.
-
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
- void LoadTaggedPointerField(Register destination, Operand field_operand,
- Register scratch_for_debug = no_reg);
+ void LoadTaggedPointerField(Register destination, Operand field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
// When pointer compression is enabled, uses |scratch| to decompress the
// value.
void LoadAnyTaggedField(Register destination, Operand field_operand,
- Register scratch,
- Register scratch_for_debug = no_reg);
+ Register scratch);
// Loads a field containing a HeapObject, decompresses it if necessary and
// pushes full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
- void PushTaggedPointerField(Operand field_operand, Register scratch,
- Register scratch_for_debug = no_reg);
+ void PushTaggedPointerField(Operand field_operand, Register scratch);
// Loads a field containing any tagged value, decompresses it if necessary and
// pushes the full pointer to the stack. When pointer compression is enabled,
// uses |scratch1| and |scratch2| to decompress the value.
void PushTaggedAnyField(Operand field_operand, Register scratch1,
- Register scratch2,
- Register scratch_for_debug = no_reg);
+ Register scratch2);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, Operand src);
- // Compresses and stores tagged value to given on-heap location.
- // TODO(ishell): drop once mov_tagged() can be used.
+ // Compresses tagged value if necessary and stores it to given on-heap
+ // location.
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
- void DecompressTaggedSigned(Register destination, Operand field_operand,
- Register scratch_for_debug);
- void DecompressTaggedPointer(Register destination, Operand field_operand,
- Register scratch_for_debug);
+ // The following macros work even when pointer compression is not enabled.
+ void DecompressTaggedSigned(Register destination, Operand field_operand);
+ void DecompressTaggedPointer(Register destination, Operand field_operand);
void DecompressAnyTagged(Register destination, Operand field_operand,
- Register scratch, Register scratch_for_debug);
+ Register scratch);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -711,15 +704,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Macro instructions.
- // Load/store with specific representation.
- void Load(Register dst, Operand src, Representation r);
- void Store(Operand dst, Register src, Representation r);
-
void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source);
void Cmp(Register dst, Smi src);
void Cmp(Operand dst, Smi src);
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range,
+ Label::Distance near_jump = Label::kFar);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
@@ -770,9 +765,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
if (shift != 0) {
- shrp(reg, Immediate(shift));
+ shrq(reg, Immediate(shift));
}
- andp(reg, Immediate(mask));
+ andq(reg, Immediate(mask));
}
// Abort execution if argument is a smi, enabled via --debug-code.
@@ -884,13 +879,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LeaveExitFrameEpilogue();
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance = Label::kFar);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code) {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
@@ -927,7 +915,8 @@ inline Operand ContextOperand(Register context, int index) {
inline Operand ContextOperand(Register context, Register index) {
- return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+ return Operand(context, index, times_system_pointer_size,
+ Context::SlotOffset(0));
}
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 37ebcf0dd4..d59c7de146 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -11,196 +11,35 @@
#endif
#include "src/allocation.h"
+#include "src/asan.h"
+#include "src/msan.h"
namespace v8 {
namespace internal {
-AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
- static const size_t kDefaultBucketMaxSize = 5;
-
- memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
- std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
- nullptr);
- std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
- std::fill(unused_segments_max_sizes_,
- unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
-}
-
-AccountingAllocator::~AccountingAllocator() { ClearPool(); }
-
-void AccountingAllocator::MemoryPressureNotification(
- MemoryPressureLevel level) {
- memory_pressure_level_.SetValue(level);
-
- if (level != MemoryPressureLevel::kNone) {
- ClearPool();
- }
-}
-
-void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
- // The sum of the bytes of one segment of each size.
- static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
- (size_t(1) << kMinSegmentSizePower);
- size_t fits_fully = max_pool_size / full_size;
-
- base::MutexGuard lock_guard(&unused_segments_mutex_);
-
- // We assume few zones (less than 'fits_fully' many) to be active at the same
- // time. When zones grow regularly, they will keep requesting segments of
- // increasing size each time. Therefore we try to get as many segments with an
- // equal number of segments of each size as possible.
- // The remaining space is used to make more room for an 'incomplete set' of
- // segments beginning with the smaller ones.
- // This code will work best if the max_pool_size is a multiple of the
- // full_size. If max_pool_size is no sum of segment sizes the actual pool
- // size might be smaller then max_pool_size. Note that no actual memory gets
- // wasted though.
- // TODO(heimbuef): Determine better strategy generating a segment sizes
- // distribution that is closer to real/benchmark usecases and uses the given
- // max_pool_size more efficiently.
- size_t total_size = fits_fully * full_size;
-
- for (size_t power = 0; power < kNumberBuckets; ++power) {
- if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
- max_pool_size) {
- unused_segments_max_sizes_[power] = fits_fully + 1;
- total_size += size_t(1) << power;
- } else {
- unused_segments_max_sizes_[power] = fits_fully;
- }
- }
-}
-
-Segment* AccountingAllocator::GetSegment(size_t bytes) {
- Segment* result = GetSegmentFromPool(bytes);
- if (result == nullptr) {
- result = AllocateSegment(bytes);
- if (result != nullptr) {
- result->Initialize(bytes);
- }
- }
-
- return result;
-}
+AccountingAllocator::~AccountingAllocator() = default;
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes);
- if (memory != nullptr) {
- base::AtomicWord current =
- base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
- base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
- while (current > max) {
- max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
- }
+ if (memory == nullptr) return nullptr;
+
+ size_t current =
+ current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
+ size_t max = max_memory_usage_.load(std::memory_order_relaxed);
+ while (current > max && !max_memory_usage_.compare_exchange_weak(
+ max, current, std::memory_order_relaxed)) {
+ // {max} was updated by {compare_exchange_weak}; retry.
}
- return reinterpret_cast<Segment*>(memory);
+ DCHECK_LE(sizeof(Segment), bytes);
+ return new (memory) Segment(bytes);
}
void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents();
-
- if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
- FreeSegment(segment);
- } else if (!AddSegmentToPool(segment)) {
- FreeSegment(segment);
- }
-}
-
-void AccountingAllocator::FreeSegment(Segment* memory) {
- base::Relaxed_AtomicIncrement(&current_memory_usage_,
- -static_cast<base::AtomicWord>(memory->size()));
- memory->ZapHeader();
- free(memory);
-}
-
-size_t AccountingAllocator::GetCurrentMemoryUsage() const {
- return base::Relaxed_Load(&current_memory_usage_);
-}
-
-size_t AccountingAllocator::GetMaxMemoryUsage() const {
- return base::Relaxed_Load(&max_memory_usage_);
-}
-
-size_t AccountingAllocator::GetCurrentPoolSize() const {
- return base::Relaxed_Load(&current_pool_size_);
-}
-
-Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
- if (requested_size > (1 << kMaxSegmentSizePower)) {
- return nullptr;
- }
-
- size_t power = kMinSegmentSizePower;
- while (requested_size > (static_cast<size_t>(1) << power)) power++;
-
- DCHECK_GE(power, kMinSegmentSizePower + 0);
- power -= kMinSegmentSizePower;
-
- Segment* segment;
- {
- base::MutexGuard lock_guard(&unused_segments_mutex_);
-
- segment = unused_segments_heads_[power];
-
- if (segment != nullptr) {
- unused_segments_heads_[power] = segment->next();
- segment->set_next(nullptr);
-
- unused_segments_sizes_[power]--;
- base::Relaxed_AtomicIncrement(
- &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
- }
- }
-
- if (segment) {
- DCHECK_GE(segment->size(), requested_size);
- }
- return segment;
-}
-
-bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
- size_t size = segment->size();
-
- if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
-
- if (size < (1 << kMinSegmentSizePower)) return false;
-
- size_t power = kMaxSegmentSizePower;
-
- while (size < (static_cast<size_t>(1) << power)) power--;
-
- DCHECK_GE(power, kMinSegmentSizePower + 0);
- power -= kMinSegmentSizePower;
-
- {
- base::MutexGuard lock_guard(&unused_segments_mutex_);
-
- if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
- return false;
- }
-
- segment->set_next(unused_segments_heads_[power]);
- unused_segments_heads_[power] = segment;
- base::Relaxed_AtomicIncrement(&current_pool_size_, size);
- unused_segments_sizes_[power]++;
- }
-
- return true;
-}
-
-void AccountingAllocator::ClearPool() {
- base::MutexGuard lock_guard(&unused_segments_mutex_);
-
- for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
- power++) {
- Segment* current = unused_segments_heads_[power];
- while (current) {
- Segment* next = current->next();
- FreeSegment(current);
- current = next;
- }
- unused_segments_heads_[power] = nullptr;
- }
+ current_memory_usage_.fetch_sub(segment->total_size(),
+ std::memory_order_relaxed);
+ segment->ZapHeader();
+ free(segment);
}
} // namespace internal
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index bf36a7ff95..e6f0d65e33 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -5,13 +5,11 @@
#ifndef V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
+#include <atomic>
+
#include "include/v8-platform.h"
#include "include/v8.h"
-#include "src/base/atomic-utils.h"
-#include "src/base/atomicops.h"
#include "src/base/macros.h"
-#include "src/base/platform/mutex.h"
-#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/zone/zone-segment.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
@@ -21,67 +19,30 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
- static const size_t kMaxPoolSize = 8ul * KB;
-
- AccountingAllocator();
+ AccountingAllocator() = default;
virtual ~AccountingAllocator();
- // Gets an empty segment from the pool or creates a new one.
- virtual Segment* GetSegment(size_t bytes);
+ // Allocates a new segment. Returns nullptr on failed allocation.
+ virtual Segment* AllocateSegment(size_t bytes);
+
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
- size_t GetCurrentMemoryUsage() const;
- size_t GetMaxMemoryUsage() const;
-
- size_t GetCurrentPoolSize() const;
+ size_t GetCurrentMemoryUsage() const {
+ return current_memory_usage_.load(std::memory_order_relaxed);
+ }
- void MemoryPressureNotification(MemoryPressureLevel level);
- // Configures the zone segment pool size limits so the pool does not
- // grow bigger than max_pool_size.
- // TODO(heimbuef): Do not accept segments to pool that are larger than
- // their size class requires. Sometimes the zones generate weird segments.
- void ConfigureSegmentPool(const size_t max_pool_size);
+ size_t GetMaxMemoryUsage() const {
+ return max_memory_usage_.load(std::memory_order_relaxed);
+ }
virtual void ZoneCreation(const Zone* zone) {}
virtual void ZoneDestruction(const Zone* zone) {}
private:
- FRIEND_TEST(Zone, SegmentPoolConstraints);
-
- static const size_t kMinSegmentSizePower = 13;
- static const size_t kMaxSegmentSizePower = 18;
-
- STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
-
- static const size_t kNumberBuckets =
- 1 + kMaxSegmentSizePower - kMinSegmentSizePower;
-
- // Allocates a new segment. Returns nullptr on failed allocation.
- Segment* AllocateSegment(size_t bytes);
- void FreeSegment(Segment* memory);
-
- // Returns a segment from the pool of at least the requested size.
- Segment* GetSegmentFromPool(size_t requested_size);
- // Trys to add a segment to the pool. Returns false if the pool is full.
- bool AddSegmentToPool(Segment* segment);
-
- // Empties the pool and puts all its contents onto the garbage stack.
- void ClearPool();
-
- Segment* unused_segments_heads_[kNumberBuckets];
-
- size_t unused_segments_sizes_[kNumberBuckets];
- size_t unused_segments_max_sizes_[kNumberBuckets];
-
- base::Mutex unused_segments_mutex_;
-
- base::AtomicWord current_memory_usage_ = 0;
- base::AtomicWord max_memory_usage_ = 0;
- base::AtomicWord current_pool_size_ = 0;
-
- base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+ std::atomic<size_t> current_memory_usage_{0};
+ std::atomic<size_t> max_memory_usage_{0};
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index fe32e48c0b..a15f27fab1 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdlib.h>
+#include <algorithm>
#include "src/base/iterator.h"
#include "src/globals.h"
@@ -301,7 +301,8 @@ void ZoneChunkList<T>::push_back(const T& item) {
DCHECK_LE(back_->position_, back_->capacity_);
if (back_->position_ == back_->capacity_) {
if (back_->next_ == nullptr) {
- Chunk* chunk = NewChunk(Min(back_->capacity_ << 1, kMaxChunkCapacity));
+ constexpr auto max_capacity = kMaxChunkCapacity;
+ Chunk* chunk = NewChunk(std::min(back_->capacity_ << 1, max_capacity));
back_->next_ = chunk;
chunk->previous_ = back_;
}
diff --git a/deps/v8/src/zone/zone-list-inl.h b/deps/v8/src/zone/zone-list-inl.h
index 0eebdcc212..a0e4b1950b 100644
--- a/deps/v8/src/zone/zone-list-inl.h
+++ b/deps/v8/src/zone/zone-list-inl.h
@@ -66,7 +66,9 @@ template <typename T>
void ZoneList<T>::Resize(int new_capacity, ZoneAllocationPolicy alloc) {
DCHECK_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- MemCopy(new_data, data_, length_ * sizeof(T));
+ if (length_ > 0) {
+ MemCopy(new_data, data_, length_ * sizeof(T));
+ }
ZoneList<T>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
diff --git a/deps/v8/src/zone/zone-segment.h b/deps/v8/src/zone/zone-segment.h
index 206edc7d64..2bc2f7f1ca 100644
--- a/deps/v8/src/zone/zone-segment.h
+++ b/deps/v8/src/zone/zone-segment.h
@@ -15,20 +15,24 @@
namespace v8 {
namespace internal {
-// Forward declaration
+// Forward declarations.
+class AccountingAllocator;
class Zone;
class Segment {
public:
- void Initialize(size_t size) { size_ = size; }
-
Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; }
Segment* next() const { return next_; }
void set_next(Segment* const next) { next_ = next; }
- size_t size() const { return size_; }
+ // {total_size} returns the allocated size including the bookkeeping bytes of
+ // the {Segment}.
+ size_t total_size() const { return size_; }
+
+ // {capacity} returns the number of storage bytes in this {Segment}, i.e.
+ // {end() - start()}.
size_t capacity() const { return size_ - sizeof(Segment); }
Address start() const { return address(sizeof(Segment)); }
@@ -40,6 +44,11 @@ class Segment {
void ZapHeader();
private:
+ // Segments are only created by the AccountingAllocator.
+ friend class AccountingAllocator;
+
+ explicit Segment(size_t size) : size_(size) {}
+
#ifdef DEBUG
// Constant byte value used for zapping dead memory in debug mode.
static const unsigned char kZapDeadByte = 0xcd;
@@ -50,10 +59,11 @@ class Segment {
return reinterpret_cast<Address>(this) + n;
}
- Zone* zone_;
- Segment* next_;
- size_t size_;
+ Zone* zone_ = nullptr;
+ Segment* next_ = nullptr;
+ const size_t size_;
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/zone-splay-tree.h b/deps/v8/src/zone/zone-splay-tree.h
new file mode 100644
index 0000000000..55a81738b6
--- /dev/null
+++ b/deps/v8/src/zone/zone-splay-tree.h
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_SPLAY_TREE_H_
+#define V8_ZONE_ZONE_SPLAY_TREE_H_
+
+#include "src/splay-tree.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// A zone splay tree. The config type parameter encapsulates the
+// different configurations of a concrete splay tree (see splay-tree.h).
+// The tree itself and all its elements are allocated in the Zone.
+template <typename Config>
+class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
+ public:
+ explicit ZoneSplayTree(Zone* zone)
+ : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
+ ~ZoneSplayTree() {
+ // Reset the root to avoid unneeded iteration over all tree nodes
+ // in the destructor. For a zone-allocated tree, nodes will be
+ // freed by the Zone.
+ SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
+ }
+
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
+
+ void operator delete(void* pointer) { UNREACHABLE(); }
+ void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_ZONE_SPLAY_TREE_H_
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 2b836f3778..9b5153d3f6 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -85,7 +85,7 @@ void Zone::DeleteAll() {
// Traverse the chained list of segments and return them all to the allocator.
for (Segment* current = segment_head_; current;) {
Segment* next = current->next();
- size_t size = current->size();
+ size_t size = current->total_size();
// Un-poison the segment content so we can re-use or zap it later.
ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
@@ -101,17 +101,16 @@ void Zone::DeleteAll() {
segment_head_ = nullptr;
}
-// Creates a new segment, sets it size, and pushes it to the front
+// Creates a new segment, sets its size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
- Segment* result = allocator_->GetSegment(requested_size);
- if (result != nullptr) {
- DCHECK_GE(result->size(), requested_size);
- segment_bytes_allocated_ += result->size();
- result->set_zone(this);
- result->set_next(segment_head_);
- segment_head_ = result;
- }
+ Segment* result = allocator_->AllocateSegment(requested_size);
+ if (!result) return nullptr;
+ DCHECK_GE(result->total_size(), requested_size);
+ segment_bytes_allocated_ += result->total_size();
+ result->set_zone(this);
+ result->set_next(segment_head_);
+ segment_head_ = result;
return result;
}
@@ -128,7 +127,7 @@ Address Zone::NewExpand(size_t size) {
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
- const size_t old_size = (head == nullptr) ? 0 : head->size();
+ const size_t old_size = head ? head->total_size() : 0;
static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
const size_t new_size_no_overhead = size + (old_size << 1);
size_t new_size = kSegmentOverhead + new_size_no_overhead;
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index 8fb7d1fc74..0dc1a30947 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -5,14 +5,13 @@
#ifndef V8_ZONE_ZONE_H_
#define V8_ZONE_ZONE_H_
+#include <algorithm>
#include <limits>
+#include <vector>
#include "src/base/hashmap.h"
#include "src/base/logging.h"
-#include "src/base/threaded-list.h"
#include "src/globals.h"
-#include "src/splay-tree.h"
-#include "src/utils.h"
#include "src/zone/accounting-allocator.h"
#ifndef ZONE_NAME
@@ -226,7 +225,7 @@ class ZoneList final {
Vector<T> ToVector() const { return Vector<T>(data_, length_); }
Vector<T> ToVector(int start, int length) const {
- return Vector<T>(data_ + start, Min(length_ - start, length));
+ return Vector<T>(data_ + start, std::min(length_ - start, length));
}
Vector<const T> ToConstVector() const {
@@ -364,6 +363,15 @@ class ScopedPtrList final {
target->AddAll(Vector<T*>(data, length()), zone);
}
+ Vector<T*> CopyTo(Zone* zone) {
+ DCHECK_LE(end_, buffer_.size());
+ T** data = zone->NewArray<T*>(length());
+ if (length() != 0) {
+ MemCopy(data, &buffer_[start_], length() * sizeof(T*));
+ }
+ return Vector<T*>(data, length());
+ }
+
void Add(T* value) {
DCHECK_EQ(buffer_.size(), end_);
buffer_.push_back(value);
@@ -385,32 +393,6 @@ class ScopedPtrList final {
size_t end_;
};
-// ZoneThreadedList is a special variant of the ThreadedList that can be put
-// into a Zone.
-template <typename T, typename TLTraits = base::ThreadedListTraits<T>>
-using ZoneThreadedList = base::ThreadedListBase<T, ZoneObject, TLTraits>;
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree (see splay-tree.h).
-// The tree itself and all its elements are allocated in the Zone.
-template <typename Config>
-class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
- public:
- explicit ZoneSplayTree(Zone* zone)
- : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
- ~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
- }
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
-
- void operator delete(void* pointer) { UNREACHABLE(); }
- void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
-};
-
typedef base::PointerTemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
typedef base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 70c8b51fa3..68916347b3 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -141,3 +141,24 @@ group("v8_run_num_fuzzer") {
"webkit:v8_webkit",
]
}
+
+v8_header_set("common_test_headers") {
+ testonly = true
+
+ configs = []
+
+ public_deps = [
+ # We can't depend on this here, because if the "cctest" target depends on it
+ # we will get duplicate symbols.
+ #"../:v8_for_testing",
+ "../:v8_libbase",
+ ]
+
+ sources = [
+ "common/assembler-tester.h",
+ "common/types-fuzz.h",
+ "common/wasm/flag-utils.h",
+ "common/wasm/test-signatures.h",
+ "common/wasm/wasm-macro-gen.h",
+ ]
+}
diff --git a/deps/v8/test/OWNERS b/deps/v8/test/OWNERS
new file mode 100644
index 0000000000..85f514c4ab
--- /dev/null
+++ b/deps/v8/test/OWNERS
@@ -0,0 +1,3 @@
+machenbach@chromium.org
+sergiyb@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 53acd19be4..bb87cc6dba 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -43,10 +43,20 @@
'octane/typescript': [SKIP],
}],
+['variant == jitless', {
+ # Too slow for jitless mode.
+ 'octane/zlib': [SKIP],
+}],
+
['gc_fuzzer', {
# Too slow for gc fuzzing.
'octane/earley-boyer' : [PASS, SLOW, ['mode == debug', SKIP]],
'octane/splay': [SKIP],
'octane/typescript': [SKIP],
}], # 'gc_fuzzer'
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 8333f49c78..bf24b7a7d5 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -34,13 +34,9 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-class TestSuite(testsuite.TestSuite):
- def __init__(self, *args, **kwargs):
- super(TestSuite, self).__init__(*args, **kwargs)
- self.testroot = os.path.join(self.root, "data")
-
- def ListTests(self):
- tests = map(self._create_test, [
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ return [
"kraken/ai-astar",
"kraken/audio-beat-detection",
"kraken/audio-dft",
@@ -98,8 +94,16 @@ class TestSuite(testsuite.TestSuite):
"sunspider/string-tagcloud",
"sunspider/string-unpack-code",
"sunspider/string-validate-input",
- ])
- return tests
+ ]
+
+
+class TestSuite(testsuite.TestSuite):
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
+ self.testroot = os.path.join(self.root, "data")
+
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 9c18ce5806..b61d9edf53 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -43,6 +43,19 @@ v8_executable("cctest") {
}
}
+v8_header_set("cctest_headers") {
+ testonly = true
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ sources = [
+ "cctest.h",
+ ]
+}
+
v8_source_set("cctest_sources") {
testonly = true
@@ -55,7 +68,6 @@ v8_source_set("cctest_sources") {
"../common/wasm/test-signatures.h",
"../common/wasm/wasm-macro-gen.h",
"cctest.cc",
- "cctest.h",
"compiler/c-signature.h",
"compiler/call-tester.h",
"compiler/code-assembler-tester.h",
@@ -64,6 +76,8 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/graph-builder-tester.h",
+ "compiler/serializer-tester.cc",
+ "compiler/serializer-tester.h",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-code-assembler.cc",
@@ -236,10 +250,8 @@ v8_source_set("cctest_sources") {
"test-version.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
- "torque/test-torque.cc",
"trace-extension.cc",
"trace-extension.h",
- "types-fuzz.h",
"unicode-helpers.cc",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
@@ -248,6 +260,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc",
"wasm/test-run-wasm-atomics64.cc",
+ "wasm/test-run-wasm-exceptions.cc",
"wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
@@ -354,7 +367,9 @@ v8_source_set("cctest_sources") {
]
public_deps = [
+ ":cctest_headers",
":resources",
+ "..:common_test_headers",
"../..:v8_initializers",
"../..:v8_libbase",
"../..:v8_libplatform",
@@ -437,6 +452,8 @@ action("resources") {
}
v8_executable("generate-bytecode-expectations") {
+ testonly = true
+
sources = [
"interpreter/bytecode-expectations-printer.cc",
"interpreter/bytecode-expectations-printer.h",
@@ -449,6 +466,7 @@ v8_executable("generate-bytecode-expectations") {
]
deps = [
+ ":cctest_headers",
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 30fc172657..43a617e87a 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -11,3 +11,4 @@ per-file *-s390*=joransiu@ca.ibm.com
per-file *-s390*=jyan@ca.ibm.com
per-file *-s390*=mbrandy@us.ibm.com
per-file *-s390*=michael_dawson@ca.ibm.com
+per-file *profile*=alph@chromium.org
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
index bb3ed9eb4c..b3a27f8cd8 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.cc
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -4,7 +4,7 @@
#include "test/cctest/assembler-helper-arm.h"
-#include "src/assembler-inl.h"
+#include "src/macro-assembler.h"
#include "src/isolate-inl.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -12,9 +12,9 @@
namespace v8 {
namespace internal {
-Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble) {
+Handle<Code> AssembleCodeImpl(std::function<void(MacroAssembler&)> assemble) {
Isolate* isolate = CcTest::i_isolate();
- Assembler assm(AssemblerOptions{});
+ MacroAssembler assm(AssemblerOptions{});
assemble(assm);
assm.bx(lr);
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
index 1f7c0ff9ad..15b821a30d 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.h
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -21,11 +21,11 @@ using F_ppiii = void*(void* p0, void* p1, int p2, int p3, int p4);
using F_pppii = void*(void* p0, void* p1, void* p2, int p3, int p4);
using F_ippii = void*(int p0, void* p1, void* p2, int p3, int p4);
-Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble);
+Handle<Code> AssembleCodeImpl(std::function<void(MacroAssembler&)> assemble);
template <typename Signature>
GeneratedCode<Signature> AssembleCode(
- std::function<void(Assembler&)> assemble) {
+ std::function<void(MacroAssembler&)> assemble) {
return GeneratedCode<Signature>::FromCode(*AssembleCodeImpl(assemble));
}
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index ee03a66ea3..e4a0bd8a50 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -29,8 +29,11 @@
#include "test/cctest/cctest.h"
#include "include/libplatform/libplatform.h"
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/print-extension.h"
#include "test/cctest/profiler-extension.h"
@@ -222,6 +225,36 @@ HandleAndZoneScope::HandleAndZoneScope()
HandleAndZoneScope::~HandleAndZoneScope() = default;
+i::Handle<i::JSFunction> Optimize(i::Handle<i::JSFunction> function,
+ i::Zone* zone, i::Isolate* isolate,
+ uint32_t flags,
+ i::compiler::JSHeapBroker** out_broker) {
+ i::Handle<i::SharedFunctionInfo> shared(function->shared(), isolate);
+ i::IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ CHECK(is_compiled_scope.is_compiled() ||
+ i::Compiler::Compile(function, i::Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope));
+
+ CHECK_NOT_NULL(zone);
+
+ i::OptimizedCompilationInfo info(zone, isolate, shared, function);
+
+ if (flags & i::OptimizedCompilationInfo::kInliningEnabled) {
+ info.MarkAsInliningEnabled();
+ }
+
+ CHECK(info.shared_info()->HasBytecodeArray());
+ i::JSFunction::EnsureFeedbackVector(function);
+
+ i::Handle<i::Code> code =
+ i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
+ .ToHandleChecked();
+ info.native_context()->AddOptimizedCode(*code);
+ function->set_code(*code);
+
+ return function;
+}
+
static void PrintTestList(CcTest* current) {
if (current == nullptr) return;
PrintTestList(current->prev());
@@ -282,12 +315,9 @@ int main(int argc, char* argv[]) {
CcTest::set_array_buffer_allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
- i::PrintExtension print_extension;
- v8::RegisterExtension(&print_extension);
- i::ProfilerExtension profiler_extension;
- v8::RegisterExtension(&profiler_extension);
- i::TraceExtension trace_extension;
- v8::RegisterExtension(&trace_extension);
+ v8::RegisterExtension(v8::base::make_unique<i::PrintExtension>());
+ v8::RegisterExtension(v8::base::make_unique<i::ProfilerExtension>());
+ v8::RegisterExtension(v8::base::make_unique<i::TraceExtension>());
int tests_run = 0;
bool print_run_count = true;
@@ -337,8 +367,7 @@ int main(int argc, char* argv[]) {
if (print_run_count && tests_run != 1)
printf("Ran %i tests.\n", tests_run);
CcTest::TearDown();
- // TODO(svenpanne) See comment above.
- // if (!disable_automatic_dispose_) v8::V8::Dispose();
+ if (!disable_automatic_dispose_) v8::V8::Dispose();
v8::V8::ShutdownPlatform();
return 0;
}
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 3c99721760..6e6b920dbd 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -56,6 +56,12 @@ const auto GetRegConfig = RegisterConfiguration::Default;
class HandleScope;
class Zone;
+namespace compiler {
+
+class JSHeapBroker;
+
+} // namespace compiler
+
} // namespace internal
} // namespace v8
@@ -487,7 +493,13 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(
return CompileRunWithOrigin(v8_str(source), origin_url);
}
-
+// Takes a JSFunction and runs it through the test version of the optimizing
+// pipeline, allocating the temporary compilation artifacts in a given Zone.
+// For possible {flags} values, look at OptimizedCompilationInfo::Flag.
+// If passed a non-null pointer for {broker}, outputs the JSHeapBroker to it.
+i::Handle<i::JSFunction> Optimize(
+ i::Handle<i::JSFunction> function, i::Zone* zone, i::Isolate* isolate,
+ uint32_t flags, i::compiler::JSHeapBroker** out_broker = nullptr);
static inline void ExpectString(const char* code, const char* expected) {
v8::Local<v8::Value> result = CompileRun(code);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index b05848d07e..71dd21db35 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -74,23 +74,14 @@
# BUG(5193). The cpu profiler tests are notoriously flaky.
'test-profile-generator/RecordStackTraceAtStartProfiling': [SKIP],
- 'test-cpu-profiler/CollectCpuProfile': [SKIP],
- 'test-cpu-profiler/CollectCpuProfileCallerLineNumbers': [SKIP],
- 'test-cpu-profiler/CollectCpuProfileSamples': [SKIP],
'test-cpu-profiler/CollectDeoptEvents': [SKIP],
- 'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
- 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
- 'test-cpu-profiler/FunctionApplySample': [SKIP],
- 'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
+ 'test-cpu-profiler/CollectCpuProfile': [SKIP],
'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
'test-cpu-profiler/JsNativeJsSample': [SKIP],
- 'test-cpu-profiler/NativeAccessorUninitializedIC': [SKIP],
+ 'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
- 'test-cpu-profiler/TracingCpuProfiler': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
# BUG(7202). The test is flaky.
@@ -118,6 +109,12 @@
# BUG(v8:8296). Flaky OOM test.
'test-heap/OutOfMemorySmallObjects': [SKIP],
+ # BUG(v8:8739). Wasm interpreter does not create proper stack traces.
+ 'test-wasm-stack/RunWasmInterpreter_CollectDetailedWasmStack_WasmError': [SKIP],
+
+ # https://crbug.com/v8/8919
+ 'test-platform/StackAlignment': [PASS, ['not is_clang', SKIP]],
+
############################################################################
# Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
@@ -184,11 +181,6 @@
}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
-# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
-['arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
- 'test-multiple-return/*': [SKIP],
-}],
-##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -219,6 +211,8 @@
# support.
'test-serialize/CustomSnapshotDataBlobWithWarmup': [SKIP],
'test-serialize/SnapshotDataBlobWithWarmup': [SKIP],
+ # Fails the embedded blob <-> Isolate verification step.
+ 'test-serialize/CustomSnapshotDataBlobWithLocker': [SKIP],
# https://crbug.com/v8/7763
'test-lockers/ExtensionsRegistration': [SKIP],
@@ -420,8 +414,16 @@
# TODO(ppc/s390): implement atomic operations
'test-run-wasm-atomics/*': [SKIP],
+ # TODO(ppc/s390): support concurrent patching of jump table
+ 'test-jump-table-assembler/*': [SKIP],
+
}], # 'arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
+['arch == ppc64', {
+ # https://crbug.com/v8/8766
+ 'test-bytecode-generator/WideRegisters': [SKIP],
+}],
+
##############################################################################
['variant == stress_incremental_marking', {
'test-heap-profiler/SamplingHeapProfiler': [SKIP],
@@ -434,6 +436,12 @@
}], # variant == stress_background_compile
##############################################################################
+['variant == interpreted_regexp', {
+ # Times out: https://crbug.com/v8/8678
+ 'test-api/RegExpInterruption': [SKIP],
+}], # variant == interpreted_regexp
+
+##############################################################################
['variant == no_wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
@@ -465,7 +473,7 @@
}],
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(8394): First execution events don't work in lite_mode. Enable this after
# we fix the lite mode to track the first execution.
@@ -512,6 +520,9 @@
'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [SKIP],
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'test-api/WasmI32AtomicWaitCallback': [SKIP],
+ 'test-api/WasmI64AtomicWaitCallback': [SKIP],
+ 'test-api/WasmStreaming*': [SKIP],
'test-c-wasm-entry/*': [SKIP],
'test-jump-table-assembler/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
@@ -519,6 +530,7 @@
'test-run-wasm-atomics64/*': [SKIP],
'test-run-wasm-atomics/*': [SKIP],
'test-run-wasm/*': [SKIP],
+ 'test-run-wasm-exceptions/*': [SKIP],
'test-run-wasm-interpreter/*': [SKIP],
'test-run-wasm-js/*': [SKIP],
'test-run-wasm-module/*': [SKIP],
@@ -537,11 +549,81 @@
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
+ 'test-api/RegExpInterruption': [SKIP],
'test-assembler-*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
'test-branch-combine/*': [SKIP],
'test-multiple-return/*': [SKIP],
+ 'test-regexp/MacroAssemblernativeAtStart': [SKIP],
+ 'test-regexp/MacroAssemblerNativeBackReferenceLATIN1': [SKIP],
+ 'test-regexp/MacroAssemblerNativeBackReferenceUC16': [SKIP],
+ 'test-regexp/MacroAssemblerNativeBackRefNoCase': [SKIP],
+ 'test-regexp/MacroAssemblerNativeBacktrack': [SKIP],
+ 'test-regexp/MacroAssemblerNativeLotsOfRegisters': [SKIP],
+ 'test-regexp/MacroAssemblerNativeRegisters': [SKIP],
+ 'test-regexp/MacroAssemblerNativeSimple': [SKIP],
+ 'test-regexp/MacroAssemblerNativeSimpleUC16': [SKIP],
+ 'test-regexp/MacroAssemblerNativeSuccess': [SKIP],
+ 'test-regexp/MacroAssemblerStackOverflow': [SKIP],
'test-run-calls-to-external-references/*': [SKIP],
+
+ # Field representation tracking is disabled in jitless mode.
+ 'test-field-type-tracking/*': [SKIP],
+
+ # Instruction cache flushing is disabled in jitless mode.
+ 'test-icache/*': [SKIP],
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['lite_mode', {
+ # TODO(v8:8510): Tests that currently fail with lazy source positions.
+ 'test-cpu-profiler/TickLinesBaseline': [SKIP],
+ 'test-cpu-profiler/TickLinesOptimized': [SKIP],
+ 'test-cpu-profiler/Inlining2': [SKIP],
}], # lite_mode
+##############################################################################
+['variant == jitless', {
+ # https://crbug.com/v8/7777
+ 'serializer-tester/SerializeCallAnyReceiver': [SKIP],
+ 'serializer-tester/SerializeCallArguments': [SKIP],
+ 'serializer-tester/SerializeCallProperty': [SKIP],
+ 'serializer-tester/SerializeCallProperty2': [SKIP],
+ 'serializer-tester/SerializeCallUndefinedReceiver': [SKIP],
+ 'serializer-tester/SerializeCallUndefinedReceiver2': [SKIP],
+ 'serializer-tester/SerializeCallWithSpread': [SKIP],
+ 'serializer-tester/SerializeConstruct': [SKIP],
+ 'serializer-tester/SerializeConstructWithSpread': [SKIP],
+ 'serializer-tester/SerializeInlinedClosure': [SKIP],
+ 'serializer-tester/SerializeInlinedFunction': [SKIP],
+ 'test-api/TurboAsmDisablesDetach': [SKIP],
+ 'test-cpu-profiler/TickLinesOptimized': [SKIP],
+ 'test-heap/TestOptimizeAfterBytecodeFlushingCandidate': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallDirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallExternal': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallIndirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchThrow': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchTrapTypeError': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmLiftoff_TryCatchCallDirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmLiftoff_TryCatchCallExternal': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmLiftoff_TryCatchCallIndirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmLiftoff_TryCatchThrow': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmLiftoff_TryCatchTrapTypeError': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchCallDirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchCallExternal': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchCallIndirect': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchThrow': [SKIP],
+ 'test-run-wasm-exceptions/RunWasmTurbofan_TryCatchTrapTypeError': [SKIP],
+
+ # Crashes on native arm.
+ 'test-macro-assembler-arm/ExtractLane': [PASS, ['arch == arm and not simulator_run', SKIP]],
+ 'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [PASS, ['arch == arm and not simulator_run', SKIP]],
+ 'test-macro-assembler-arm/ReplaceLane': [PASS, ['arch == arm and not simulator_run', SKIP]],
+}], # variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index 0aff318211..a06585cbca 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -22,27 +22,19 @@ TEST(CompareWrapper) {
CompareWrapper wUint32LessThan(IrOpcode::kUint32LessThan);
CompareWrapper wUint32LessThanOrEqual(IrOpcode::kUint32LessThanOrEqual);
- {
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t a = *pl;
- int32_t b = *pr;
- CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
- CHECK_EQ(a < b, wInt32LessThan.Int32Compare(a, b));
- CHECK_EQ(a <= b, wInt32LessThanOrEqual.Int32Compare(a, b));
- }
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
+ CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+ CHECK_EQ(a < b, wInt32LessThan.Int32Compare(a, b));
+ CHECK_EQ(a <= b, wInt32LessThanOrEqual.Int32Compare(a, b));
}
}
- {
- FOR_UINT32_INPUTS(pl) {
- FOR_UINT32_INPUTS(pr) {
- uint32_t a = *pl;
- uint32_t b = *pr;
- CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
- CHECK_EQ(a < b, wUint32LessThan.Int32Compare(a, b));
- CHECK_EQ(a <= b, wUint32LessThanOrEqual.Int32Compare(a, b));
- }
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
+ CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+ CHECK_EQ(a < b, wUint32LessThan.Int32Compare(a, b));
+ CHECK_EQ(a <= b, wUint32LessThanOrEqual.Int32Compare(a, b));
}
}
@@ -338,8 +330,8 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
- input_a = *pl;
- input_b = *pr;
+ input_a = pl;
+ input_b = pr;
int32_t expect = gen->expected(input_a, input_b);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
@@ -350,7 +342,7 @@ void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
void Int32BinopInputShapeTester::RunLeft(
RawMachineAssemblerTester<int32_t>* m) {
FOR_UINT32_INPUTS(i) {
- input_a = *i;
+ input_a = i;
int32_t expect = gen->expected(input_a, input_b);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
@@ -360,7 +352,7 @@ void Int32BinopInputShapeTester::RunLeft(
void Int32BinopInputShapeTester::RunRight(
RawMachineAssemblerTester<int32_t>* m) {
FOR_UINT32_INPUTS(i) {
- input_b = *i;
+ input_b = i;
int32_t expect = gen->expected(input_a, input_b);
CHECK_EQ(expect, m->Call(input_a, input_b));
}
@@ -414,8 +406,8 @@ TEST(RunEmpty) {
TEST(RunInt32Constants) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.Int32Constant(*i));
- CHECK_EQ(*i, m.Call());
+ m.Return(m.Int32Constant(i));
+ CHECK_EQ(i, m.Call());
}
}
@@ -435,17 +427,12 @@ TEST(RunSmiConstants) {
RunSmiConstant(Smi::kMinValue);
RunSmiConstant(Smi::kMinValue + 1);
- FOR_INT32_INPUTS(i) { RunSmiConstant(*i); }
+ FOR_INT32_INPUTS(i) { RunSmiConstant(i); }
}
-
TEST(RunNumberConstants) {
- {
- FOR_FLOAT64_INPUTS(i) { RunNumberConstant(*i); }
- }
- {
- FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
- }
+ FOR_FLOAT64_INPUTS(i) { RunNumberConstant(i); }
+ FOR_INT32_INPUTS(i) { RunNumberConstant(i); }
for (int32_t i = 1; i < Smi::kMaxValue && i != 0;
i = base::ShlWithWraparound(i, 1)) {
@@ -460,7 +447,6 @@ TEST(RunNumberConstants) {
RunNumberConstant(Smi::kMinValue + 1);
}
-
TEST(RunEmptyString) {
RawMachineAssemblerTester<Object> m;
m.Return(m.StringConstant("empty"));
@@ -490,8 +476,8 @@ TEST(RunParam1) {
m.Return(m.Parameter(0));
FOR_INT32_INPUTS(i) {
- int32_t result = m.Call(*i);
- CHECK_EQ(*i, result);
+ int32_t result = m.Call(i);
+ CHECK_EQ(i, result);
}
}
@@ -505,8 +491,8 @@ TEST(RunParam2_1) {
USE(p1);
FOR_INT32_INPUTS(i) {
- int32_t result = m.Call(*i, -9999);
- CHECK_EQ(*i, result);
+ int32_t result = m.Call(i, -9999);
+ CHECK_EQ(i, result);
}
}
@@ -520,8 +506,8 @@ TEST(RunParam2_2) {
USE(p0);
FOR_INT32_INPUTS(i) {
- int32_t result = m.Call(-7777, *i);
- CHECK_EQ(*i, result);
+ int32_t result = m.Call(-7777, i);
+ CHECK_EQ(i, result);
}
}
@@ -535,9 +521,9 @@ TEST(RunParam3) {
int p[] = {-99, -77, -88};
FOR_INT32_INPUTS(j) {
- p[i] = *j;
+ p[i] = j;
int32_t result = m.Call(p[0], p[1], p[2]);
- CHECK_EQ(*j, result);
+ CHECK_EQ(j, result);
}
}
}
@@ -549,7 +535,7 @@ TEST(RunBinopTester) {
Int32BinopTester bt(&m);
bt.AddReturn(bt.param0);
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 777)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, bt.call(i, 777)); }
}
{
@@ -557,7 +543,7 @@ TEST(RunBinopTester) {
Int32BinopTester bt(&m);
bt.AddReturn(bt.param1);
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(666, *i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, bt.call(666, i)); }
}
{
@@ -565,7 +551,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param0);
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, bt.call(*i, 9.0)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, bt.call(i, 9.0)); }
}
{
@@ -573,7 +559,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param1);
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, bt.call(-11.25, *i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, bt.call(-11.25, i)); }
}
}
@@ -603,7 +589,7 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
{
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Parameter(0));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, m.Call(i)); }
}
{
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Int64(),
@@ -611,8 +597,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int64Add(m.Parameter(0), m.Parameter(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(base::AddWithWraparound(*i, *j), m.Call(*i, *j));
- CHECK_EQ(base::AddWithWraparound(*j, *i), m.Call(*j, *i));
+ CHECK_EQ(base::AddWithWraparound(i, j), m.Call(i, j));
+ CHECK_EQ(base::AddWithWraparound(j, i), m.Call(j, i));
}
}
}
@@ -623,9 +609,9 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(Add3(*i, *i, *j), m.Call(*i, *i, *j));
- CHECK_EQ(Add3(*i, *j, *i), m.Call(*i, *j, *i));
- CHECK_EQ(Add3(*j, *i, *i), m.Call(*j, *i, *i));
+ CHECK_EQ(Add3(i, i, j), m.Call(i, i, j));
+ CHECK_EQ(Add3(i, j, i), m.Call(i, j, i));
+ CHECK_EQ(Add3(j, i, i), m.Call(j, i, i));
}
}
}
@@ -638,10 +624,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Parameter(3)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(Add4(*i, *i, *i, *j), m.Call(*i, *i, *i, *j));
- CHECK_EQ(Add4(*i, *i, *j, *i), m.Call(*i, *i, *j, *i));
- CHECK_EQ(Add4(*i, *j, *i, *i), m.Call(*i, *j, *i, *i));
- CHECK_EQ(Add4(*j, *i, *i, *i), m.Call(*j, *i, *i, *i));
+ CHECK_EQ(Add4(i, i, i, j), m.Call(i, i, i, j));
+ CHECK_EQ(Add4(i, i, j, i), m.Call(i, i, j, i));
+ CHECK_EQ(Add4(i, j, i, i), m.Call(i, j, i, i));
+ CHECK_EQ(Add4(j, i, i, i), m.Call(j, i, i, i));
}
}
}
@@ -662,8 +648,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.PointerConstant(&result), m.Parameter(0), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
- m.Call(*i);
- CHECK_DOUBLE_EQ(*i, result);
+ m.Call(i);
+ CHECK_DOUBLE_EQ(i, result);
}
}
{
@@ -676,11 +662,11 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int32Constant(0));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- m.Call(*i, *j);
- CHECK_EQ(base::AddWithWraparound(*i, *j), result);
+ m.Call(i, j);
+ CHECK_EQ(base::AddWithWraparound(i, j), result);
- m.Call(*j, *i);
- CHECK_EQ(base::AddWithWraparound(*j, *i), result);
+ m.Call(j, i);
+ CHECK_EQ(base::AddWithWraparound(j, i), result);
}
}
}
@@ -695,14 +681,14 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int32Constant(0));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- m.Call(*i, *i, *j);
- CHECK_EQ(Add3(*i, *i, *j), result);
+ m.Call(i, i, j);
+ CHECK_EQ(Add3(i, i, j), result);
- m.Call(*i, *j, *i);
- CHECK_EQ(Add3(*i, *j, *i), result);
+ m.Call(i, j, i);
+ CHECK_EQ(Add3(i, j, i), result);
- m.Call(*j, *i, *i);
- CHECK_EQ(Add3(*j, *i, *i), result);
+ m.Call(j, i, i);
+ CHECK_EQ(Add3(j, i, i), result);
}
}
}
@@ -720,17 +706,17 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int32Constant(0));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- m.Call(*i, *i, *i, *j);
- CHECK_EQ(Add4(*i, *i, *i, *j), result);
+ m.Call(i, i, i, j);
+ CHECK_EQ(Add4(i, i, i, j), result);
- m.Call(*i, *i, *j, *i);
- CHECK_EQ(Add4(*i, *i, *j, *i), result);
+ m.Call(i, i, j, i);
+ CHECK_EQ(Add4(i, i, j, i), result);
- m.Call(*i, *j, *i, *i);
- CHECK_EQ(Add4(*i, *j, *i, *i), result);
+ m.Call(i, j, i, i);
+ CHECK_EQ(Add4(i, j, i, i), result);
- m.Call(*j, *i, *i, *i);
- CHECK_EQ(Add4(*j, *i, *i, *i), result);
+ m.Call(j, i, i, i);
+ CHECK_EQ(Add4(j, i, i, i), result);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index dc35a6b928..93d93e1671 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -139,7 +139,10 @@ class BufferedRawMachineAssemblerTester
template <typename... Params>
ReturnType Call(Params... p) {
+ uintptr_t zap_data[] = {kZapValue, kZapValue};
ReturnType return_value;
+ STATIC_ASSERT(sizeof(return_value) <= sizeof(zap_data));
+ MemCopy(&return_value, &zap_data, sizeof(return_value));
CSignature::VerifyParams<Params...>(test_graph_signature_);
CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p)...,
reinterpret_cast<void*>(&return_value));
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index bb23d0644a..347f414b56 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -6,7 +6,6 @@
#include "src/api-inl.h"
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/execution.h"
@@ -142,27 +141,8 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
}
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
- CHECK(is_compiled_scope.is_compiled() ||
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope));
-
Zone zone(isolate->allocator(), ZONE_NAME);
- OptimizedCompilationInfo info(&zone, isolate, shared, function);
-
- if (flags_ & OptimizedCompilationInfo::kInliningEnabled) {
- info.MarkAsInliningEnabled();
- }
-
- CHECK(info.shared_info()->HasBytecodeArray());
- JSFunction::EnsureFeedbackVector(function);
-
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, isolate).ToHandleChecked();
- info.native_context()->AddOptimizedCode(*code);
- function->set_code(*code);
- return function;
+ return Optimize(function, &zone, isolate, flags_);
}
// Compile the given machine graph instead of the source of the function
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
new file mode 100644
index 0000000000..9b6d328159
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -0,0 +1,219 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Serializer tests don't make sense in lite mode, as it doesn't gather
+// IC feedback.
+#ifndef V8_LITE_MODE
+
+#include "test/cctest/compiler/serializer-tester.h"
+
+#include "src/api-inl.h"
+#include "src/compiler/serializer-for-background-compilation.h"
+#include "src/compiler/zone-stats.h"
+#include "src/optimized-compilation-info.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SerializerTester::SerializerTester(const char* source)
+ : canonical_(main_isolate()) {
+ // The tests only make sense in the context of concurrent compilation.
+ FLAG_concurrent_inlining = true;
+ // The tests don't make sense when optimizations are turned off.
+ FLAG_opt = true;
+ // We need the IC to feed it to the serializer.
+ FLAG_use_ic = true;
+ // We need manual control over when a given function is optimized.
+ FLAG_always_opt = false;
+ // We need allocation of executable memory for the compilation.
+ FLAG_jitless = false;
+
+ std::string function_string = "(function() { ";
+ function_string += source;
+ function_string += " })();";
+ Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(function_string.c_str()))));
+ uint32_t flags = i::OptimizedCompilationInfo::kInliningEnabled |
+ i::OptimizedCompilationInfo::kFunctionContextSpecializing |
+ i::OptimizedCompilationInfo::kAccessorInliningEnabled |
+ i::OptimizedCompilationInfo::kLoopPeelingEnabled |
+ i::OptimizedCompilationInfo::kBailoutOnUninitialized |
+ i::OptimizedCompilationInfo::kAllocationFoldingEnabled |
+ i::OptimizedCompilationInfo::kSplittingEnabled |
+ i::OptimizedCompilationInfo::kAnalyzeEnvironmentLiveness;
+ Optimize(function, main_zone(), main_isolate(), flags, &broker_);
+ function_ = JSFunctionRef(broker_, function);
+}
+
+TEST(SerializeEmptyFunction) {
+ SerializerTester tester("function f() {}; return f;");
+ CHECK(tester.function().IsSerializedForCompilation());
+}
+
+// This helper function allows for testing weather an inlinee candidate
+// was properly serialized. It expects that the top-level function (that is
+// run through the SerializerTester) will return its inlinee candidate.
+void CheckForSerializedInlinee(const char* source, int argc = 0,
+ Handle<Object> argv[] = {}) {
+ SerializerTester tester(source);
+ JSFunctionRef f = tester.function();
+ CHECK(f.IsSerializedForCompilation());
+
+ MaybeHandle<Object> g_obj = Execution::Call(
+ tester.isolate(), tester.function().object(),
+ tester.isolate()->factory()->undefined_value(), argc, argv);
+ Handle<Object> g;
+ CHECK(g_obj.ToHandle(&g));
+
+ Handle<JSFunction> g_func = Handle<JSFunction>::cast(g);
+ SharedFunctionInfoRef g_sfi(tester.broker(),
+ handle(g_func->shared(), tester.isolate()));
+ FeedbackVectorRef g_fv(tester.broker(),
+ handle(g_func->feedback_vector(), tester.isolate()));
+ CHECK(g_sfi.IsSerializedForCompilation(g_fv));
+}
+
+TEST(SerializeInlinedClosure) {
+ CheckForSerializedInlinee(
+ "function f() {"
+ " return (function g(){ return g; })();"
+ "}; f(); return f;");
+}
+
+TEST(SerializeInlinedFunction) {
+ CheckForSerializedInlinee(
+ "function g() {};"
+ "function f() {"
+ " g(); return g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeCallUndefinedReceiver) {
+ CheckForSerializedInlinee(
+ "function g(a,b,c) {};"
+ "function f() {"
+ " g(1,2,3); return g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeCallUndefinedReceiver2) {
+ CheckForSerializedInlinee(
+ "function g(a,b) {};"
+ "function f() {"
+ " g(1,2); return g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeCallProperty) {
+ CheckForSerializedInlinee(
+ "let obj = {"
+ " g: function g(a,b,c) {}"
+ "};"
+ "function f() {"
+ " obj.g(1,2,3); return obj.g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeCallProperty2) {
+ CheckForSerializedInlinee(
+ "let obj = {"
+ " g: function g(a,b) {}"
+ "};"
+ "function f() {"
+ " obj.g(1,2); return obj.g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeCallAnyReceiver) {
+ CheckForSerializedInlinee(
+ "let obj = {"
+ " g: function g() {}"
+ "};"
+ "function f() {"
+ " with(obj) {"
+ " g(); return g;"
+ " };"
+ "};"
+ "f(); return f;");
+}
+
+TEST(SerializeCallWithSpread) {
+ CheckForSerializedInlinee(
+ "function g(args) {};"
+ "const arr = [1,2,3];"
+ "function f() {"
+ " g(...arr); return g;"
+ "}; f(); return f;");
+}
+
+// The following test causes the CallIC of `g` to turn megamorphic,
+// thus allowing us to test if we forward arguments hints (`callee` in this
+// example) and correctly serialize the inlining candidate `j`.
+TEST(SerializeCallArguments) {
+ CheckForSerializedInlinee(
+ "function g(callee) { callee(); };"
+ "function h() {};"
+ "function i() {};"
+ "g(h); g(i);"
+ "function f() {"
+ " function j() {};"
+ " g(j);"
+ " return j;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeConstruct) {
+ CheckForSerializedInlinee(
+ "function g() {};"
+ "function f() {"
+ " new g(); return g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeConstructWithSpread) {
+ CheckForSerializedInlinee(
+ "function g(a, b, c) {};"
+ "const arr = [1, 2];"
+ "function f() {"
+ " new g(0, ...arr); return g;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeConditionalJump) {
+ CheckForSerializedInlinee(
+ "function g(callee) { callee(); };"
+ "function h() {};"
+ "function i() {};"
+ "let a = true;"
+ "g(h); g(i);"
+ "function f() {"
+ " function q() {};"
+ " if (a) g(q);"
+ " return q;"
+ "}; f(); return f;");
+}
+
+TEST(SerializeUnconditionalJump) {
+ CheckForSerializedInlinee(
+ "function g(callee) { callee(); };"
+ "function h() {};"
+ "function i() {};"
+ "let a = false;"
+ "g(h); g(i);"
+ "function f() {"
+ " function p() {};"
+ " function q() {};"
+ " if (a) g(q);"
+ " else g(p);"
+ " return p;"
+ "}; f(); return f;");
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LITE_MODE
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.h b/deps/v8/test/cctest/compiler/serializer-tester.h
new file mode 100644
index 0000000000..f5a5107841
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/serializer-tester.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_SERIALIZER_TESTER_H_
+#define V8_CCTEST_COMPILER_SERIALIZER_TESTER_H_
+
+#include "src/compiler/js-heap-broker.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ZoneStats;
+
+// The purpose of this class is to provide testing facility for the
+// SerializerForBackgroundCompilation class. On a high-level, it executes the
+// following steps:
+// 1. Wraps the provided source in an IIFE
+// 2. Generates bytecode for the given source
+// 3. Runs the bytecode which *must* return a function
+// 4. Takes the returned function and optimizes it
+// 5. The optimized function is accessible through `function()`
+class SerializerTester : public HandleAndZoneScope {
+ public:
+ explicit SerializerTester(const char* source);
+
+ JSFunctionRef function() const { return function_.value(); }
+ JSHeapBroker* broker() const { return broker_; }
+ Isolate* isolate() { return main_isolate(); }
+
+ private:
+ CanonicalHandleScope canonical_;
+ base::Optional<JSFunctionRef> function_;
+ JSHeapBroker* broker_ = nullptr;
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_COMPILER_SERIALIZER_TESTER_H_
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 46240aa9b1..b36d61fbc6 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -32,8 +32,7 @@ TEST(BranchCombineWord32EqualZero_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- int32_t a = *i;
+ FOR_INT32_INPUTS(a) {
int32_t expect = a == 0 ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -59,8 +58,7 @@ TEST(BranchCombineWord32EqualZero_chain) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- int32_t a = *i;
+ FOR_INT32_INPUTS(a) {
int32_t expect = (k & 1) == 1 ? (a == 0 ? eq_constant : ne_constant)
: (a == 0 ? ne_constant : eq_constant);
CHECK_EQ(expect, m.Call(a));
@@ -83,8 +81,7 @@ TEST(BranchCombineInt32LessThanZero_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- int32_t a = *i;
+ FOR_INT32_INPUTS(a) {
int32_t expect = a < 0 ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -105,8 +102,7 @@ TEST(BranchCombineUint32LessThan100_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_UINT32_INPUTS(i) {
- uint32_t a = *i;
+ FOR_UINT32_INPUTS(a) {
int32_t expect = a < 100 ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -127,8 +123,7 @@ TEST(BranchCombineUint32LessThanOrEqual100_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_UINT32_INPUTS(i) {
- uint32_t a = *i;
+ FOR_UINT32_INPUTS(a) {
int32_t expect = a <= 100 ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -149,8 +144,7 @@ TEST(BranchCombineZeroLessThanInt32_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- int32_t a = *i;
+ FOR_INT32_INPUTS(a) {
int32_t expect = 0 < a ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -171,8 +165,7 @@ TEST(BranchCombineInt32GreaterThanZero_1) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- int32_t a = *i;
+ FOR_INT32_INPUTS(a) {
int32_t expect = a > 0 ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a));
}
@@ -195,10 +188,8 @@ TEST(BranchCombineWord32EqualP) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = a == b ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -211,9 +202,8 @@ TEST(BranchCombineWord32EqualI) {
int32_t ne_constant = 925718;
for (int left = 0; left < 2; left++) {
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(a) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- int32_t a = *i;
Node* p0 = m.Int32Constant(a);
Node* p1 = m.Parameter(0);
@@ -226,8 +216,7 @@ TEST(BranchCombineWord32EqualI) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(j) {
- int32_t b = *j;
+ FOR_INT32_INPUTS(b) {
int32_t expect = a == b ? eq_constant : ne_constant;
CHECK_EQ(expect, m.Call(b));
}
@@ -254,10 +243,8 @@ TEST(BranchCombineInt32CmpP) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = 0;
if (op == 0) expect = a < b ? eq_constant : ne_constant;
if (op == 1) expect = a <= b ? eq_constant : ne_constant;
@@ -273,9 +260,8 @@ TEST(BranchCombineInt32CmpI) {
int32_t ne_constant = 927711;
for (int op = 0; op < 2; op++) {
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(a) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- int32_t a = *i;
Node* p0 = m.Int32Constant(a);
Node* p1 = m.Parameter(0);
@@ -287,8 +273,7 @@ TEST(BranchCombineInt32CmpI) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- FOR_INT32_INPUTS(j) {
- int32_t b = *j;
+ FOR_INT32_INPUTS(b) {
int32_t expect = 0;
if (op == 0) expect = a < b ? eq_constant : ne_constant;
if (op == 1) expect = a <= b ? eq_constant : ne_constant;
@@ -498,10 +483,8 @@ TEST(BranchCombineInt32AddLessThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect =
(base::AddWithWraparound(a, b) < 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
@@ -527,10 +510,8 @@ TEST(BranchCombineInt32AddGreaterThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect =
(base::AddWithWraparound(a, b) >= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
@@ -556,10 +537,8 @@ TEST(BranchCombineInt32ZeroGreaterThanAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect =
(0 > base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
@@ -585,10 +564,8 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect =
(0 <= base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
@@ -614,10 +591,8 @@ TEST(BranchCombineUint32AddLessThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (a + b <= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -642,10 +617,8 @@ TEST(BranchCombineUint32AddGreaterThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (a + b > 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -670,10 +643,8 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (0 >= a + b) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -698,10 +669,8 @@ TEST(BranchCombineUint32ZeroLessThanAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_UINT32_INPUTS(i) {
- FOR_UINT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (0 < a + b) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -726,10 +695,8 @@ TEST(BranchCombineWord32AndLessThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = ((a & b) < 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -754,10 +721,8 @@ TEST(BranchCombineWord32AndGreaterThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = ((a & b) >= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -782,10 +747,8 @@ TEST(BranchCombineInt32ZeroGreaterThanAnd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = (0 > (a & b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -810,10 +773,8 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAnd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int32_t a = *i;
- int32_t b = *j;
+ FOR_INT32_INPUTS(a) {
+ FOR_INT32_INPUTS(b) {
int32_t expect = (0 <= (a & b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -838,10 +799,8 @@ TEST(BranchCombineUint32AndLessThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = ((a & b) <= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -866,10 +825,8 @@ TEST(BranchCombineUint32AndGreaterThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = ((a & b) > 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -894,10 +851,8 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAnd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (0 >= (a & b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
@@ -922,10 +877,8 @@ TEST(BranchCombineUint32ZeroLessThanAnd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- uint32_t a = *i;
- uint32_t b = *j;
+ FOR_UINT32_INPUTS(a) {
+ FOR_UINT32_INPUTS(b) {
int32_t expect = (0 < (a & b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 6125ef4bdb..ed39225747 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -29,8 +29,10 @@ namespace {
int GetSlotSizeInBytes(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kTagged:
+ // Spill slots for tagged values are always uncompressed.
+ return kSystemPointerSize;
case MachineRepresentation::kFloat32:
- return kPointerSize;
+ return kSystemPointerSize;
case MachineRepresentation::kFloat64:
return kDoubleSize;
case MachineRepresentation::kSimd128:
@@ -382,7 +384,7 @@ class TestEnvironment : public HandleAndZoneScope {
TestEnvironment()
: blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()),
- code_(main_isolate(), main_zone(), &blocks_),
+ instructions_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
supported_reps_({MachineRepresentation::kTagged,
MachineRepresentation::kFloat32,
@@ -521,7 +523,7 @@ class TestEnvironment : public HandleAndZoneScope {
// Keep a map of (MachineRepresentation . std::vector<int>) with
// allocated slots to pick from for each representation.
int slot = slot_parameter_n;
- slot_parameter_n -= (GetSlotSizeInBytes(rep) / kPointerSize);
+ slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
AddStackSlot(&test_signature, rep, slot);
entry->second--;
}
@@ -535,7 +537,7 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < kSmiConstantCount; i++) {
intptr_t smi_value = static_cast<intptr_t>(
Smi::FromInt(rng_->NextInt(Smi::kMaxValue)).ptr());
- Constant constant = kPointerSize == 8
+ Constant constant = kSystemPointerSize == 8
? Constant(static_cast<int64_t>(smi_value))
: Constant(static_cast<int32_t>(smi_value));
AddConstant(MachineRepresentation::kTagged, AllocateConstant(constant));
@@ -573,8 +575,8 @@ class TestEnvironment : public HandleAndZoneScope {
}
int AllocateConstant(Constant constant) {
- int virtual_register = code_.NextVirtualRegister();
- code_.AddConstant(virtual_register, constant);
+ int virtual_register = instructions_.NextVirtualRegister();
+ instructions_.AddConstant(virtual_register, constant);
return virtual_register;
}
@@ -721,8 +723,8 @@ class TestEnvironment : public HandleAndZoneScope {
OperandToStatePosition(AllocatedOperand::cast(move->destination()));
InstructionOperand from = move->source();
if (from.IsConstant()) {
- Constant constant =
- code_.GetConstant(ConstantOperand::cast(from).virtual_register());
+ Constant constant = instructions_.GetConstant(
+ ConstantOperand::cast(from).virtual_register());
Handle<Object> constant_value;
switch (constant.type()) {
case Constant::kInt32:
@@ -924,13 +926,13 @@ class TestEnvironment : public HandleAndZoneScope {
}
v8::base::RandomNumberGenerator* rng() const { return rng_; }
- InstructionSequence* code() { return &code_; }
+ InstructionSequence* instructions() { return &instructions_; }
CallDescriptor* test_descriptor() { return test_descriptor_; }
int stack_slot_count() const { return stack_slot_count_; }
private:
ZoneVector<InstructionBlock*> blocks_;
- InstructionSequence code_;
+ InstructionSequence instructions_;
v8::base::RandomNumberGenerator* rng_;
// The layout describes the type of each element in the environment, in order.
std::vector<AllocatedOperand> layout_;
@@ -995,9 +997,10 @@ class CodeGeneratorTester {
}
generator_ = new CodeGenerator(
- environment->main_zone(), &frame_, &linkage_, environment->code(),
- &info_, environment->main_isolate(), base::Optional<OsrHelper>(),
- kNoSourcePosition, nullptr, PoisoningMitigationLevel::kDontPoison,
+ environment->main_zone(), &frame_, &linkage_,
+ environment->instructions(), &info_, environment->main_isolate(),
+ base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
+ PoisoningMitigationLevel::kDontPoison,
AssemblerOptions::Default(environment->main_isolate()),
Builtins::kNoBuiltinId);
@@ -1109,6 +1112,8 @@ class CodeGeneratorTester {
generator_->FinishCode();
generator_->safepoints()->Emit(generator_->tasm(),
frame_.GetTotalFrameSlotCount());
+ generator_->MaybeEmitOutOfLineConstantPool();
+
return generator_->FinalizeCode().ToHandleChecked();
}
@@ -1121,7 +1126,7 @@ class CodeGeneratorTester {
generator_->AssembleMove(&move.second, &move.first);
}
- InstructionSequence* sequence = generator_->code();
+ InstructionSequence* sequence = generator_->instructions();
sequence->StartBlock(RpoNumber::FromInt(0));
// The environment expects this code to tail-call to it's first parameter
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index a31700ede2..23711bb3e7 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -167,9 +167,9 @@ TEST(CanonicalizingNumbers) {
JSConstantCacheTester T;
FOR_FLOAT64_INPUTS(i) {
- Node* node = T.Constant(*i);
+ Node* node = T.Constant(i);
for (int j = 0; j < 5; j++) {
- CHECK_EQ(node, T.Constant(*i));
+ CHECK_EQ(node, T.Constant(i));
}
}
}
@@ -178,8 +178,7 @@ TEST(CanonicalizingNumbers) {
TEST(HeapNumbers) {
JSConstantCacheTester T;
- FOR_FLOAT64_INPUTS(i) {
- double value = *i;
+ FOR_FLOAT64_INPUTS(value) {
Handle<Object> num = T.factory()->NewNumber(value);
Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
Node* node1 = T.Constant(value);
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 073891a52b..61736ae2dc 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -256,11 +256,8 @@ TEST(ReduceWord32And) {
ReducerTester R;
R.binop = R.machine.Word32And();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x & y, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x & y, x, y); }
}
R.CheckPutConstantOnRight(33);
@@ -282,11 +279,8 @@ TEST(ReduceWord32Or) {
ReducerTester R;
R.binop = R.machine.Word32Or();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x | y, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x | y, x, y); }
}
R.CheckPutConstantOnRight(36);
@@ -308,11 +302,8 @@ TEST(ReduceWord32Xor) {
ReducerTester R;
R.binop = R.machine.Word32Xor();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x ^ y, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x ^ y, x, y); }
}
R.CheckPutConstantOnRight(39);
@@ -332,10 +323,9 @@ TEST(ReduceWord32Shl) {
R.binop = R.machine.Word32Shl();
// TODO(titzer): out of range shifts
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(x) {
for (int y = 0; y < 32; y++) {
- int32_t x = *i;
- R.CheckFoldBinop<int32_t>(x << y, x, y);
+ R.CheckFoldBinop<int32_t>(base::ShlWithWraparound(x, y), x, y);
}
}
@@ -351,10 +341,9 @@ TEST(ReduceWord64Shl) {
ReducerTester R;
R.binop = R.machine.Word64Shl();
- FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(x) {
for (int64_t y = 0; y < 64; y++) {
- int64_t x = *i;
- R.CheckFoldBinop<int64_t>(x << y, x, y);
+ R.CheckFoldBinop<int64_t>(base::ShlWithWraparound(x, y), x, y);
}
}
@@ -371,9 +360,8 @@ TEST(ReduceWord32Shr) {
R.binop = R.machine.Word32Shr();
// TODO(titzer): test out of range shifts
- FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(x) {
for (uint32_t y = 0; y < 32; y++) {
- uint32_t x = *i;
R.CheckFoldBinop<int32_t>(x >> y, x, y);
}
}
@@ -390,9 +378,8 @@ TEST(ReduceWord64Shr) {
ReducerTester R;
R.binop = R.machine.Word64Shr();
- FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(x) {
for (uint64_t y = 0; y < 64; y++) {
- uint64_t x = *i;
R.CheckFoldBinop<int64_t>(x >> y, x, y);
}
}
@@ -410,9 +397,8 @@ TEST(ReduceWord32Sar) {
R.binop = R.machine.Word32Sar();
// TODO(titzer): test out of range shifts
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(x) {
for (int32_t y = 0; y < 32; y++) {
- int32_t x = *i;
R.CheckFoldBinop<int32_t>(x >> y, x, y);
}
}
@@ -429,9 +415,8 @@ TEST(ReduceWord64Sar) {
ReducerTester R;
R.binop = R.machine.Word64Sar();
- FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(x) {
for (int64_t y = 0; y < 64; y++) {
- int64_t x = *i;
R.CheckFoldBinop<int64_t>(x >> y, x, y);
}
}
@@ -477,11 +462,8 @@ TEST(Word32Equal) {
ReducerTester R;
R.binop = R.machine.Word32Equal();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x == y ? 1 : 0, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x == y ? 1 : 0, x, y); }
}
R.CheckPutConstantOnRight(48);
@@ -502,9 +484,8 @@ TEST(ReduceInt32Add) {
ReducerTester R;
R.binop = R.machine.Int32Add();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
R.CheckFoldBinop<int32_t>(base::AddWithWraparound(x, y), x, y);
}
}
@@ -523,9 +504,8 @@ TEST(ReduceInt64Add) {
ReducerTester R;
R.binop = R.machine.Int64Add();
- FOR_INT64_INPUTS(pl) {
- FOR_INT64_INPUTS(pr) {
- int64_t x = *pl, y = *pr;
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
R.CheckFoldBinop<int64_t>(base::AddWithWraparound(x, y), x, y);
}
}
@@ -542,9 +522,8 @@ TEST(ReduceInt32Sub) {
ReducerTester R;
R.binop = R.machine.Int32Sub();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
R.CheckFoldBinop<int32_t>(base::SubWithWraparound(x, y), x, y);
}
}
@@ -561,9 +540,8 @@ TEST(ReduceInt64Sub) {
ReducerTester R;
R.binop = R.machine.Int64Sub();
- FOR_INT64_INPUTS(pl) {
- FOR_INT64_INPUTS(pr) {
- int64_t x = *pl, y = *pr;
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
R.CheckFoldBinop<int64_t>(base::SubWithWraparound(x, y), x, y);
}
}
@@ -586,9 +564,8 @@ TEST(ReduceInt32Mul) {
ReducerTester R;
R.binop = R.machine.Int32Mul();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
R.CheckFoldBinop<int32_t>(base::MulWithWraparound(x, y), x, y);
}
}
@@ -624,9 +601,8 @@ TEST(ReduceInt32Div) {
ReducerTester R;
R.binop = R.machine.Int32Div();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
if (y == 0) continue; // TODO(titzer): test / 0
int32_t r = y == -1 ? base::NegateWithWraparound(x)
: x / y; // INT_MIN / -1 may explode in C
@@ -653,9 +629,8 @@ TEST(ReduceUint32Div) {
ReducerTester R;
R.binop = R.machine.Uint32Div();
- FOR_UINT32_INPUTS(pl) {
- FOR_UINT32_INPUTS(pr) {
- uint32_t x = *pl, y = *pr;
+ FOR_UINT32_INPUTS(x) {
+ FOR_UINT32_INPUTS(y) {
if (y == 0) continue; // TODO(titzer): test / 0
R.CheckFoldBinop<int32_t>(x / y, x, y);
}
@@ -682,9 +657,8 @@ TEST(ReduceInt32Mod) {
ReducerTester R;
R.binop = R.machine.Int32Mod();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
if (y == 0) continue; // TODO(titzer): test % 0
int32_t r = y == -1 ? 0 : x % y; // INT_MIN % -1 may explode in C
R.CheckFoldBinop<int32_t>(r, x, y);
@@ -706,9 +680,8 @@ TEST(ReduceUint32Mod) {
ReducerTester R;
R.binop = R.machine.Uint32Mod();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- uint32_t x = *pl, y = *pr;
+ FOR_UINT32_INPUTS(x) {
+ FOR_UINT32_INPUTS(y) {
if (y == 0) continue; // TODO(titzer): test x % 0
R.CheckFoldBinop<int32_t>(x % y, x, y);
}
@@ -734,11 +707,8 @@ TEST(ReduceInt32LessThan) {
ReducerTester R;
R.binop = R.machine.Int32LessThan();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y); }
}
R.CheckDontPutConstantOnRight(41399);
@@ -754,14 +724,11 @@ TEST(ReduceInt32LessThanOrEqual) {
ReducerTester R;
R.binop = R.machine.Int32LessThanOrEqual();
- FOR_INT32_INPUTS(pl) {
- FOR_INT32_INPUTS(pr) {
- int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
- }
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y); }
}
- FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(*i); }
+ FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(i); }
Node* x = R.Parameter(0);
@@ -773,11 +740,8 @@ TEST(ReduceUint32LessThan) {
ReducerTester R;
R.binop = R.machine.Uint32LessThan();
- FOR_UINT32_INPUTS(pl) {
- FOR_UINT32_INPUTS(pr) {
- uint32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
- }
+ FOR_UINT32_INPUTS(x) {
+ FOR_UINT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y); }
}
R.CheckDontPutConstantOnRight(41399);
@@ -797,11 +761,8 @@ TEST(ReduceUint32LessThanOrEqual) {
ReducerTester R;
R.binop = R.machine.Uint32LessThanOrEqual();
- FOR_UINT32_INPUTS(pl) {
- FOR_UINT32_INPUTS(pr) {
- uint32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
- }
+ FOR_UINT32_INPUTS(x) {
+ FOR_UINT32_INPUTS(y) { R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y); }
}
R.CheckDontPutConstantOnRight(41399);
@@ -846,11 +807,8 @@ TEST(ReduceFloat32Sub) {
ReducerTester R;
R.binop = R.machine.Float32Sub();
- FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- float x = *pl, y = *pr;
- R.CheckFoldBinop<float>(x - y, x, y);
- }
+ FOR_FLOAT32_INPUTS(x) {
+ FOR_FLOAT32_INPUTS(y) { R.CheckFoldBinop<float>(x - y, x, y); }
}
Node* x = R.Parameter();
@@ -866,11 +824,8 @@ TEST(ReduceFloat64Sub) {
ReducerTester R;
R.binop = R.machine.Float64Sub();
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(x - y, x, y);
- }
+ FOR_FLOAT64_INPUTS(x) {
+ FOR_FLOAT64_INPUTS(y) { R.CheckFoldBinop<double>(x - y, x, y); }
}
Node* x = R.Parameter();
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index bf5e829509..7f35b1b0ee 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -125,7 +125,7 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
- return isolate->wasm_engine()->code_manager()->NewNativeModule(
+ return isolate->wasm_engine()->NewNativeModule(
isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module));
}
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index f4218467f7..3f1cbaad24 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -261,12 +261,12 @@ TEST(ToInt32_constant) {
RepresentationChangerTester r;
{
FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(*i);
+ Node* n = r.jsgraph()->Constant(i);
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, Type::Signed32(), use,
UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckInt32Constant(c, *i);
+ r.CheckInt32Constant(c, i);
}
}
}
@@ -274,24 +274,24 @@ TEST(ToInt32_constant) {
TEST(ToUint32_constant) {
RepresentationChangerTester r;
FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
+ Node* n = r.jsgraph()->Constant(static_cast<double>(i));
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, Type::Unsigned32(), use,
UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckUint32Constant(c, *i);
+ r.CheckUint32Constant(c, i);
}
}
TEST(ToInt64_constant) {
RepresentationChangerTester r;
FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(*i);
+ Node* n = r.jsgraph()->Constant(i);
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, TypeCache::Get()->kSafeInteger, use,
UseInfo(MachineRepresentation::kWord64, Truncation::None()));
- r.CheckInt64Constant(c, *i);
+ r.CheckInt64Constant(c, i);
}
}
@@ -692,8 +692,6 @@ TEST(Nops) {
MachineRepresentation::kWord16);
r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
MachineRepresentation::kWord32);
- r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
- MachineRepresentation::kWord64);
}
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 775ffadfd4..b6043f2a9d 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -2891,7 +2891,7 @@ TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
ExpectedSnippet<0, const char*> illegal_const_decl[] = {
{"const x = x = 10 + 3; return x;",
- {"Uncaught ReferenceError: x is not defined"}},
+ {"Uncaught ReferenceError: Cannot access 'x' before initialization"}},
{"const x = 10; x = 20; return x;",
{"Uncaught TypeError: Assignment to constant variable."}},
{"const x = 10; { x = 20; } return x;",
@@ -2899,7 +2899,7 @@ TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
{"const x = 10; eval('x = 20;'); return x;",
{"Uncaught TypeError: Assignment to constant variable."}},
{"let x = x + 10; return x;",
- {"Uncaught ReferenceError: x is not defined"}},
+ {"Uncaught ReferenceError: Cannot access 'x' before initialization"}},
{"'use strict'; (function f1() { f1 = 123; })() ",
{"Uncaught TypeError: Assignment to constant variable."}},
};
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index ffee5310d2..26d681299d 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -52,7 +52,7 @@ void RunLoadInt32(const TestAlignment t) {
}
FOR_INT32_INPUTS(i) {
- p1 = *i;
+ p1 = i;
CHECK_EQ(p1, m.Call());
}
}
@@ -79,7 +79,7 @@ void RunLoadInt32Offset(TestAlignment t) {
}
FOR_INT32_INPUTS(j) {
- p1 = *j;
+ p1 = j;
CHECK_EQ(p1, m.Call());
}
}
@@ -91,9 +91,9 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
FOR_INT32_INPUTS(i) {
int32_t magic =
- base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
+ base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
+ int32_t offset = i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
@@ -115,8 +115,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
m.Return(m.Int32Constant(magic));
FOR_FLOAT32_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
+ p1 = j;
+ p2 = j - 5;
CHECK_EQ(magic, m.Call());
CHECK_DOUBLE_EQ(p1, p2);
}
@@ -129,9 +129,9 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
FOR_INT32_INPUTS(i) {
int32_t magic =
- base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
+ base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
+ int32_t offset = i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
@@ -152,8 +152,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
m.Return(m.Int32Constant(magic));
FOR_FLOAT64_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
+ p1 = j;
+ p2 = j - 5;
CHECK_EQ(magic, m.Call());
CHECK_DOUBLE_EQ(p1, p2);
}
@@ -189,6 +189,36 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
namespace {
+// Mostly same as CHECK_EQ() but customized for compressed tagged values.
+template <typename CType>
+void CheckEq(CType in_value, CType out_value) {
+ CHECK_EQ(in_value, out_value);
+}
+
+#ifdef V8_COMPRESS_POINTERS
+// Specializations for checking the result of compressing store.
+template <>
+void CheckEq<Object>(Object in_value, Object out_value) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ // |out_value| is compressed. Check that it's valid.
+ CHECK_EQ(CompressTagged(in_value->ptr()), out_value->ptr());
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ CHECK_EQ(in_value->ptr(),
+ DecompressTaggedAny(isolate->isolate_root(),
+ static_cast<int32_t>(out_value->ptr())));
+}
+
+template <>
+void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
+ return CheckEq<Object>(in_value, out_value);
+}
+
+template <>
+void CheckEq<Smi>(Smi in_value, Smi out_value) {
+ return CheckEq<Object>(in_value, out_value);
+}
+#endif
+
// Initializes the buffer with some raw data respecting requested representation
// of the values.
template <typename CType>
@@ -239,7 +269,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
- base_pointer = LSB(base_pointer, kPointerSize / 2);
+ base_pointer = LSB(base_pointer, kSystemPointerSize / 2);
}
#endif
Node* base = m.PointerConstant(base_pointer);
@@ -252,27 +282,21 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
UNREACHABLE();
}
- CHECK_EQ(buffer[i], m.Call());
+ CheckEq<CType>(buffer[i], m.Call());
}
}
}
template <typename CType>
-CType NullValue() {
- return CType{0};
-}
-
-template <>
-HeapObject NullValue<HeapObject>() {
- return HeapObject();
-}
-
-template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) {
const int kNumElems = 16;
CType in_buffer[kNumElems];
CType out_buffer[kNumElems];
+ uintptr_t zap_data[] = {kZapValue, kZapValue};
+ CType zap_value;
+ STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
+ MemCopy(&zap_value, &zap_data, sizeof(CType));
InitBuffer(in_buffer, kNumElems, rep);
for (int32_t x = 0; x < kNumElems; x++) {
@@ -294,12 +318,15 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
m.Return(m.Int32Constant(OK));
- memset(out_buffer, 0, sizeof(out_buffer));
+ for (int32_t z = 0; z < kNumElems; z++) {
+ out_buffer[z] = zap_value;
+ }
CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call());
- CHECK_EQ(in_buffer[x], out_buffer[y]);
+ // Mostly same as CHECK_EQ() but customized for compressed tagged values.
+ CheckEq<CType>(in_buffer[x], out_buffer[y]);
for (int32_t z = 0; z < kNumElems; z++) {
- if (z != y) CHECK_EQ(NullValue<CType>(), out_buffer[z]);
+ if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
}
}
}
@@ -335,7 +362,8 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
// Direct read of &out_buffer[y] may cause unaligned access in C++ code
// so we use MemCopy() to handle that.
MemCopy(&out, &out_buffer[y], sizeof(CType));
- CHECK_EQ(in, out);
+ // Mostly same as CHECK_EQ() but customized for compressed tagged values.
+ CheckEq<CType>(in, out);
}
}
}
@@ -458,12 +486,12 @@ void RunLoadStoreSignExtend32(TestAlignment t) {
m.Return(load8);
FOR_INT32_INPUTS(i) {
- buffer[0] = *i;
+ buffer[0] = i;
- CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
- CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
- CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
- CHECK_EQ(*i, buffer[3]);
+ CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
+ CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
+ CHECK_EQ(i, buffer[3]);
}
}
@@ -491,12 +519,12 @@ void RunLoadStoreZeroExtend32(TestAlignment t) {
m.Return(load8);
FOR_UINT32_INPUTS(i) {
- buffer[0] = *i;
+ buffer[0] = i;
- CHECK_EQ((*i & 0xFF), m.Call());
- CHECK_EQ((*i & 0xFF), buffer[1]);
- CHECK_EQ((*i & 0xFFFF), buffer[2]);
- CHECK_EQ(*i, buffer[3]);
+ CHECK_EQ((i & 0xFF), m.Call());
+ CHECK_EQ((i & 0xFF), buffer[1]);
+ CHECK_EQ((i & 0xFFFF), buffer[2]);
+ CHECK_EQ(i, buffer[3]);
}
}
} // namespace
@@ -552,18 +580,18 @@ void RunLoadStoreSignExtend64(TestAlignment t) {
m.Return(load8);
FOR_INT64_INPUTS(i) {
- buffer[0] = *i;
+ buffer[0] = i;
- CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
- CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
- CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
- CHECK_EQ(static_cast<int32_t>(*i & 0xFFFFFFFF), buffer[3]);
- CHECK_EQ(*i, buffer[4]);
+ CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
+ CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
+ CHECK_EQ(static_cast<int32_t>(i & 0xFFFFFFFF), buffer[3]);
+ CHECK_EQ(i, buffer[4]);
}
}
void RunLoadStoreZeroExtend64(TestAlignment t) {
- if (kPointerSize < 8) return;
+ if (kSystemPointerSize < 8) return;
uint64_t buffer[5];
RawMachineAssemblerTester<uint64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
@@ -595,13 +623,13 @@ void RunLoadStoreZeroExtend64(TestAlignment t) {
m.Return(load8);
FOR_UINT64_INPUTS(i) {
- buffer[0] = *i;
+ buffer[0] = i;
- CHECK_EQ((*i & 0xFF), m.Call());
- CHECK_EQ((*i & 0xFF), buffer[1]);
- CHECK_EQ((*i & 0xFFFF), buffer[2]);
- CHECK_EQ((*i & 0xFFFFFFFF), buffer[3]);
- CHECK_EQ(*i, buffer[4]);
+ CHECK_EQ((i & 0xFF), m.Call());
+ CHECK_EQ((i & 0xFF), buffer[1]);
+ CHECK_EQ((i & 0xFFFF), buffer[2]);
+ CHECK_EQ((i & 0xFFFFFFFF), buffer[3]);
+ CHECK_EQ(i, buffer[4]);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 782e9b51b8..92e473c840 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -527,8 +527,8 @@ TEST(RunInt64AddWithOverflowP) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int64_t expected_val;
- int expected_ovf = base::bits::SignedAddOverflow64(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ int expected_ovf = base::bits::SignedAddOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(i, j));
CHECK_EQ(expected_val, actual_val);
}
}
@@ -540,41 +540,39 @@ TEST(RunInt64AddWithOverflowImm) {
FOR_INT64_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
- Node* add = m.Int64AddWithOverflow(m.Int64Constant(*i), m.Parameter(0));
+ Node* add = m.Int64AddWithOverflow(m.Int64Constant(i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedAddOverflow64(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedAddOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
- Node* add = m.Int64AddWithOverflow(m.Parameter(0), m.Int64Constant(*i));
+ Node* add = m.Int64AddWithOverflow(m.Parameter(0), m.Int64Constant(i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedAddOverflow64(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedAddOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT64_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
- m.Int64AddWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
+ m.Int64AddWithOverflow(m.Int64Constant(i), m.Int64Constant(j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
- int expected_ovf = base::bits::SignedAddOverflow64(*i, *j, &expected_val);
+ int expected_ovf = base::bits::SignedAddOverflow64(i, j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -600,10 +598,10 @@ TEST(RunInt64AddWithOverflowInBranchP) {
FOR_INT64_INPUTS(j) {
int32_t expected = constant;
int64_t result;
- if (!base::bits::SignedAddOverflow64(*i, *j, &result)) {
+ if (!base::bits::SignedAddOverflow64(i, j, &result)) {
expected = static_cast<int32_t>(result);
}
- CHECK_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -621,8 +619,8 @@ TEST(RunInt64SubWithOverflowP) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int64_t expected_val;
- int expected_ovf = base::bits::SignedSubOverflow64(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ int expected_ovf = base::bits::SignedSubOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(i, j));
CHECK_EQ(expected_val, actual_val);
}
}
@@ -634,41 +632,39 @@ TEST(RunInt64SubWithOverflowImm) {
FOR_INT64_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
- Node* add = m.Int64SubWithOverflow(m.Int64Constant(*i), m.Parameter(0));
+ Node* add = m.Int64SubWithOverflow(m.Int64Constant(i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedSubOverflow64(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedSubOverflow64(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
- Node* add = m.Int64SubWithOverflow(m.Parameter(0), m.Int64Constant(*i));
+ Node* add = m.Int64SubWithOverflow(m.Parameter(0), m.Int64Constant(i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedSubOverflow64(*j, *i, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedSubOverflow64(j, i, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT64_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
- m.Int64SubWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
+ m.Int64SubWithOverflow(m.Int64Constant(i), m.Int64Constant(j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
- int expected_ovf = base::bits::SignedSubOverflow64(*i, *j, &expected_val);
+ int expected_ovf = base::bits::SignedSubOverflow64(i, j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -694,10 +690,10 @@ TEST(RunInt64SubWithOverflowInBranchP) {
FOR_INT64_INPUTS(j) {
int32_t expected = constant;
int64_t result;
- if (!base::bits::SignedSubOverflow64(*i, *j, &result)) {
+ if (!base::bits::SignedSubOverflow64(i, j, &result)) {
expected = static_cast<int32_t>(result);
}
- CHECK_EQ(expected, static_cast<int32_t>(bt.call(*i, *j)));
+ CHECK_EQ(expected, static_cast<int32_t>(bt.call(i, j)));
}
}
}
@@ -1249,9 +1245,9 @@ TEST(RunInt32AddP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
// Use uint32_t because signed overflow is UB in C.
- int expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
- static_cast<uint32_t>(*j));
- CHECK_EQ(expected, bt.call(*i, *j));
+ int expected = static_cast<int32_t>(static_cast<uint32_t>(i) +
+ static_cast<uint32_t>(j));
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1268,8 +1264,8 @@ TEST(RunInt32AddAndWord32EqualP) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>(bit_cast<uint32_t>(*i) + (*j == *k));
- CHECK_EQ(expected, m.Call(*i, *j, *k));
+ bit_cast<int32_t>(bit_cast<uint32_t>(i) + (j == k));
+ CHECK_EQ(expected, m.Call(i, j, k));
}
}
}
@@ -1284,8 +1280,8 @@ TEST(RunInt32AddAndWord32EqualP) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>((*i == *j) + bit_cast<uint32_t>(*k));
- CHECK_EQ(expected, m.Call(*i, *j, *k));
+ bit_cast<int32_t>((i == j) + bit_cast<uint32_t>(k));
+ CHECK_EQ(expected, m.Call(i, j, k));
}
}
}
@@ -1298,30 +1294,30 @@ TEST(RunInt32AddAndWord32EqualImm) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Int32Constant(*i),
- m.Word32Equal(m.Parameter(0), m.Parameter(1))));
- FOR_INT32_INPUTS(j) {
- FOR_INT32_INPUTS(k) {
- // Use uint32_t because signed overflow is UB in C.
- int32_t const expected =
- bit_cast<int32_t>(bit_cast<uint32_t>(*i) + (*j == *k));
- CHECK_EQ(expected, m.Call(*j, *k));
- }
- }
+ m.Return(m.Int32Add(m.Int32Constant(i),
+ m.Word32Equal(m.Parameter(0), m.Parameter(1))));
+ FOR_INT32_INPUTS(j) {
+ FOR_INT32_INPUTS(k) {
+ // Use uint32_t because signed overflow is UB in C.
+ int32_t const expected =
+ bit_cast<int32_t>(bit_cast<uint32_t>(i) + (j == k));
+ CHECK_EQ(expected, m.Call(j, k));
+ }
+ }
}
}
{
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Word32Equal(m.Int32Constant(*i), m.Parameter(0)),
+ m.Return(m.Int32Add(m.Word32Equal(m.Int32Constant(i), m.Parameter(0)),
m.Parameter(1)));
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>((*i == *j) + bit_cast<uint32_t>(*k));
- CHECK_EQ(expected, m.Call(*j, *k));
+ bit_cast<int32_t>((i == j) + bit_cast<uint32_t>(k));
+ CHECK_EQ(expected, m.Call(j, k));
}
}
}
@@ -1340,8 +1336,8 @@ TEST(RunInt32AddAndWord32NotEqualP) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>(bit_cast<uint32_t>(*i) + (*j != *k));
- CHECK_EQ(expected, m.Call(*i, *j, *k));
+ bit_cast<int32_t>(bit_cast<uint32_t>(i) + (j != k));
+ CHECK_EQ(expected, m.Call(i, j, k));
}
}
}
@@ -1356,8 +1352,8 @@ TEST(RunInt32AddAndWord32NotEqualP) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>((*i != *j) + bit_cast<uint32_t>(*k));
- CHECK_EQ(expected, m.Call(*i, *j, *k));
+ bit_cast<int32_t>((i != j) + bit_cast<uint32_t>(k));
+ CHECK_EQ(expected, m.Call(i, j, k));
}
}
}
@@ -1370,30 +1366,30 @@ TEST(RunInt32AddAndWord32NotEqualImm) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Int32Constant(*i),
- m.Word32NotEqual(m.Parameter(0), m.Parameter(1))));
- FOR_INT32_INPUTS(j) {
- FOR_INT32_INPUTS(k) {
- // Use uint32_t because signed overflow is UB in C.
- int32_t const expected =
- bit_cast<int32_t>(bit_cast<uint32_t>(*i) + (*j != *k));
- CHECK_EQ(expected, m.Call(*j, *k));
- }
- }
+ m.Return(m.Int32Add(m.Int32Constant(i),
+ m.Word32NotEqual(m.Parameter(0), m.Parameter(1))));
+ FOR_INT32_INPUTS(j) {
+ FOR_INT32_INPUTS(k) {
+ // Use uint32_t because signed overflow is UB in C.
+ int32_t const expected =
+ bit_cast<int32_t>(bit_cast<uint32_t>(i) + (j != k));
+ CHECK_EQ(expected, m.Call(j, k));
+ }
+ }
}
}
{
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Int32Add(m.Word32NotEqual(m.Int32Constant(*i), m.Parameter(0)),
+ m.Return(m.Int32Add(m.Word32NotEqual(m.Int32Constant(i), m.Parameter(0)),
m.Parameter(1)));
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
int32_t const expected =
- bit_cast<int32_t>((*i != *j) + bit_cast<uint32_t>(*k));
- CHECK_EQ(expected, m.Call(*j, *k));
+ bit_cast<int32_t>((i != j) + bit_cast<uint32_t>(k));
+ CHECK_EQ(expected, m.Call(j, k));
}
}
}
@@ -1411,8 +1407,8 @@ TEST(RunInt32AddAndWord32SarP) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = *i + (*j >> shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = i + (j >> shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1426,8 +1422,8 @@ TEST(RunInt32AddAndWord32SarP) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = (*i >> shift) + *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = (i >> shift) + k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1438,30 +1434,30 @@ TEST(RunInt32AddAndWord32SarP) {
TEST(RunInt32AddAndWord32ShlP) {
{
RawMachineAssemblerTester<int32_t> m(
- MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = *i + (*j << shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = i + (j << shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
}
{
RawMachineAssemblerTester<int32_t> m(
- MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
- FOR_INT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = (*i << shift) + *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = (i << shift) + k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1479,8 +1475,8 @@ TEST(RunInt32AddAndWord32ShrP) {
FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = *i + (*j >> shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = i + (j >> shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1494,8 +1490,8 @@ TEST(RunInt32AddAndWord32ShrP) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = (*i >> shift) + *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = (i >> shift) + k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1518,8 +1514,8 @@ TEST(RunInt32AddInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i + j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1536,8 +1532,8 @@ TEST(RunInt32AddInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i + j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1545,7 +1541,7 @@ TEST(RunInt32AddInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -1553,8 +1549,8 @@ TEST(RunInt32AddInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (i + j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -1562,7 +1558,7 @@ TEST(RunInt32AddInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -1570,8 +1566,8 @@ TEST(RunInt32AddInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (i + j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -1601,17 +1597,17 @@ TEST(RunInt32AddInBranch) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = ((*i + right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = ((i + right) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1628,8 +1624,8 @@ TEST(RunInt32AddInComparison) {
m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i + j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1640,31 +1636,31 @@ TEST(RunInt32AddInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Int32Add(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i + j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i + *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (i + j) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*j + *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(i)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (j + i) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
@@ -1687,17 +1683,17 @@ TEST(RunInt32AddInComparison) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = (*i + right) == 0;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = (i + right) == 0;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1714,8 +1710,8 @@ TEST(RunInt32SubP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i - *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i - j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1724,20 +1720,20 @@ TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i - *j;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Int32Sub(m.Int32Constant(i), m.Parameter(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = i - j;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
+ m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(i)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *j - *i;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = j - i;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -1758,8 +1754,8 @@ TEST(RunInt32SubAndWord32SarP) {
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
- int32_t expected = *i - (*j >> shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = i - (j >> shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1772,8 +1768,8 @@ TEST(RunInt32SubAndWord32SarP) {
FOR_INT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- int32_t expected = (*i >> shift) - *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = (i >> shift) - k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1784,29 +1780,29 @@ TEST(RunInt32SubAndWord32SarP) {
TEST(RunInt32SubAndWord32ShlP) {
{
RawMachineAssemblerTester<int32_t> m(
- MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
- int32_t expected = *i - (*j << shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = i - (j << shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
}
{
RawMachineAssemblerTester<int32_t> m(
- MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
- FOR_INT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = (*i << shift) - *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = (i << shift) - k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1824,8 +1820,8 @@ TEST(RunInt32SubAndWord32ShrP) {
FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
- uint32_t expected = *i - (*j >> shift);
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ uint32_t expected = i - (j >> shift);
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1839,8 +1835,8 @@ TEST(RunInt32SubAndWord32ShrP) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- uint32_t expected = (*i >> shift) - *k;
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ uint32_t expected = (i >> shift) - k;
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -1863,8 +1859,8 @@ TEST(RunInt32SubInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i - j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1881,8 +1877,8 @@ TEST(RunInt32SubInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i - j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1890,7 +1886,7 @@ TEST(RunInt32SubInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -1898,8 +1894,8 @@ TEST(RunInt32SubInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (i - j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -1907,7 +1903,7 @@ TEST(RunInt32SubInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -1915,8 +1911,8 @@ TEST(RunInt32SubInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = (i - j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -1946,17 +1942,17 @@ TEST(RunInt32SubInBranch) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = ((*i - right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = ((i - right) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -1973,8 +1969,8 @@ TEST(RunInt32SubInComparison) {
m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i - j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -1985,31 +1981,31 @@ TEST(RunInt32SubInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Int32Sub(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i - j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i - *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (i - j) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*j - *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(i)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (j - i) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
@@ -2032,17 +2028,17 @@ TEST(RunInt32SubInComparison) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = (*i - right) == 0;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = (i - right) == 0;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -2058,8 +2054,8 @@ TEST(RunInt32MulP) {
bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int expected = base::MulWithWraparound(*i, *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ int expected = base::MulWithWraparound(i, j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2069,8 +2065,8 @@ TEST(RunInt32MulP) {
bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i * *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i * j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2084,8 +2080,8 @@ TEST(RunInt32MulHighP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = static_cast<int32_t>(
- (static_cast<int64_t>(*i) * static_cast<int64_t>(*j)) >> 32);
- CHECK_EQ(expected, bt.call(*i, *j));
+ (static_cast<int64_t>(i) * static_cast<int64_t>(j)) >> 32);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2095,20 +2091,20 @@ TEST(RunInt32MulImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i * *j;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Int32Mul(m.Int32Constant(i), m.Parameter(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = i * j;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(i)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *j * *i;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = j * i;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2119,16 +2115,15 @@ TEST(RunInt32MulAndInt32AddP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- int32_t p0 = *i;
- int32_t p1 = *j;
- m.Return(m.Int32Add(m.Int32Constant(p0),
- m.Int32Mul(m.Parameter(0), m.Int32Constant(p1))));
- FOR_INT32_INPUTS(k) {
- int32_t p2 = *k;
- int expected =
- base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
- CHECK_EQ(expected, m.Call(p2));
- }
+ int32_t p0 = i;
+ int32_t p1 = j;
+ m.Return(m.Int32Add(m.Int32Constant(p0),
+ m.Int32Mul(m.Parameter(0), m.Int32Constant(p1))));
+ FOR_INT32_INPUTS(k) {
+ int32_t p2 = k;
+ int expected = base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
+ CHECK_EQ(expected, m.Call(p2));
+ }
}
}
}
@@ -2140,9 +2135,9 @@ TEST(RunInt32MulAndInt32AddP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- int32_t p0 = *i;
- int32_t p1 = *j;
- int32_t p2 = *k;
+ int32_t p0 = i;
+ int32_t p1 = j;
+ int32_t p2 = k;
int expected =
base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
@@ -2158,9 +2153,9 @@ TEST(RunInt32MulAndInt32AddP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- int32_t p0 = *i;
- int32_t p1 = *j;
- int32_t p2 = *k;
+ int32_t p0 = i;
+ int32_t p1 = j;
+ int32_t p2 = k;
int expected =
base::AddWithWraparound(base::MulWithWraparound(p0, p1), p2);
CHECK_EQ(expected, m.Call(p0, p1, p2));
@@ -2173,13 +2168,13 @@ TEST(RunInt32MulAndInt32AddP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
- m.Int32Add(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+ m.Int32Add(m.Int32Constant(i), m.Int32Mul(bt.param0, bt.param1)));
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- int32_t p0 = *j;
- int32_t p1 = *k;
+ int32_t p0 = j;
+ int32_t p1 = k;
int expected =
- base::AddWithWraparound(*i, base::MulWithWraparound(p0, p1));
+ base::AddWithWraparound(i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -2197,9 +2192,9 @@ TEST(RunInt32MulAndInt32SubP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- int32_t p0 = *i;
- int32_t p1 = *j;
- int32_t p2 = *k;
+ int32_t p0 = i;
+ int32_t p1 = j;
+ int32_t p2 = k;
int expected =
base::SubWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
@@ -2212,13 +2207,13 @@ TEST(RunInt32MulAndInt32SubP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
- m.Int32Sub(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+ m.Int32Sub(m.Int32Constant(i), m.Int32Mul(bt.param0, bt.param1)));
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- int32_t p0 = *j;
- int32_t p1 = *k;
+ int32_t p0 = j;
+ int32_t p1 = k;
int expected =
- base::SubWithWraparound(*i, base::MulWithWraparound(p0, p1));
+ base::SubWithWraparound(i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -2234,8 +2229,8 @@ TEST(RunUint32MulHighP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(
- (static_cast<uint64_t>(*i) * static_cast<uint64_t>(*j)) >> 32));
- CHECK_EQ(expected, bt.call(bit_cast<int32_t>(*i), bit_cast<int32_t>(*j)));
+ (static_cast<uint64_t>(i) * static_cast<uint64_t>(j)) >> 32));
+ CHECK_EQ(expected, bt.call(bit_cast<int32_t>(i), bit_cast<int32_t>(j)));
}
}
}
@@ -2248,8 +2243,8 @@ TEST(RunInt32DivP) {
bt.AddReturn(m.Int32Div(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int p0 = *i;
- int p1 = *j;
+ int p0 = i;
+ int p1 = j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected = static_cast<int32_t>(p0 / p1);
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2263,8 +2258,8 @@ TEST(RunInt32DivP) {
bt.AddReturn(m.Int32Add(bt.param0, m.Int32Div(bt.param0, bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int p0 = *i;
- int p1 = *j;
+ int p0 = i;
+ int p1 = j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected =
static_cast<int32_t>(base::AddWithWraparound(p0, (p0 / p1)));
@@ -2283,8 +2278,8 @@ TEST(RunUint32DivP) {
bt.AddReturn(m.Uint32Div(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t p0 = *i;
- uint32_t p1 = *j;
+ uint32_t p0 = i;
+ uint32_t p1 = j;
if (p1 != 0) {
int32_t expected = bit_cast<int32_t>(p0 / p1);
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2298,8 +2293,8 @@ TEST(RunUint32DivP) {
bt.AddReturn(m.Int32Add(bt.param0, m.Uint32Div(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t p0 = *i;
- uint32_t p1 = *j;
+ uint32_t p0 = i;
+ uint32_t p1 = j;
if (p1 != 0) {
int32_t expected = bit_cast<int32_t>(p0 + (p0 / p1));
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2317,8 +2312,8 @@ TEST(RunInt32ModP) {
bt.AddReturn(m.Int32Mod(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int p0 = *i;
- int p1 = *j;
+ int p0 = i;
+ int p1 = j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected = static_cast<int32_t>(p0 % p1);
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2332,8 +2327,8 @@ TEST(RunInt32ModP) {
bt.AddReturn(m.Int32Add(bt.param0, m.Int32Mod(bt.param0, bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int p0 = *i;
- int p1 = *j;
+ int p0 = i;
+ int p1 = j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected =
static_cast<int32_t>(base::AddWithWraparound(p0, (p0 % p1)));
@@ -2352,8 +2347,8 @@ TEST(RunUint32ModP) {
bt.AddReturn(m.Uint32Mod(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t p0 = *i;
- uint32_t p1 = *j;
+ uint32_t p0 = i;
+ uint32_t p1 = j;
if (p1 != 0) {
uint32_t expected = static_cast<uint32_t>(p0 % p1);
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2367,8 +2362,8 @@ TEST(RunUint32ModP) {
bt.AddReturn(m.Int32Add(bt.param0, m.Uint32Mod(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t p0 = *i;
- uint32_t p1 = *j;
+ uint32_t p0 = i;
+ uint32_t p1 = j;
if (p1 != 0) {
uint32_t expected = static_cast<uint32_t>(p0 + (p0 % p1));
CHECK_EQ(expected, bt.call(p0, p1));
@@ -2386,8 +2381,8 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = *i & *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = i & j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2397,8 +2392,8 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = *i & ~(*j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = i & ~(j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2408,8 +2403,8 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = ~(*i) & *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = ~(i)&j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2424,8 +2419,8 @@ TEST(RunWord32AndAndWord32ShlP) {
m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i << (*j & 0x1F);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i << (j & 0x1F);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2436,8 +2431,8 @@ TEST(RunWord32AndAndWord32ShlP) {
m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i << (0x1F & *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i << (0x1F & j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2452,8 +2447,8 @@ TEST(RunWord32AndAndWord32ShrP) {
m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i >> (*j & 0x1F);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i >> (j & 0x1F);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2464,8 +2459,8 @@ TEST(RunWord32AndAndWord32ShrP) {
m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i >> (0x1F & *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i >> (0x1F & j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2480,8 +2475,8 @@ TEST(RunWord32AndAndWord32SarP) {
m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1F))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i >> (*j & 0x1F);
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = i >> (j & 0x1F);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2492,8 +2487,8 @@ TEST(RunWord32AndAndWord32SarP) {
m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1F), bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i >> (0x1F & *j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = i >> (0x1F & j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2504,21 +2499,21 @@ TEST(RunWord32AndImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i & *j;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32And(m.Int32Constant(i), m.Parameter(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = i & j;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
- m.Word32And(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
+ m.Word32And(m.Int32Constant(i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i & ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = i & ~(j);
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2540,8 +2535,8 @@ TEST(RunWord32AndInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i & j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2558,8 +2553,8 @@ TEST(RunWord32AndInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i & j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2567,7 +2562,7 @@ TEST(RunWord32AndInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -2575,8 +2570,8 @@ TEST(RunWord32AndInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = (i & j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2584,17 +2579,16 @@ TEST(RunWord32AndInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(
- m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)),
- &blocka, &blockb);
+ m.Branch(m.Word32NotEqual(m.Word32And(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(constant));
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = (i & j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2624,17 +2618,17 @@ TEST(RunWord32AndInBranch) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = ((*i & right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = ((i & right) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -2651,8 +2645,8 @@ TEST(RunWord32AndInComparison) {
m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i & j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2663,30 +2657,30 @@ TEST(RunWord32AndInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32And(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i & j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i & *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (i & j) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
+ m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*j & *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (j & i) == 0;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2700,8 +2694,8 @@ TEST(RunWord32OrP) {
bt.AddReturn(m.Word32Or(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i | *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i | j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2711,8 +2705,8 @@ TEST(RunWord32OrP) {
bt.AddReturn(m.Word32Or(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i | ~(*j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i | ~(j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2722,8 +2716,8 @@ TEST(RunWord32OrP) {
bt.AddReturn(m.Word32Or(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = ~(*i) | *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = ~(i) | j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2734,21 +2728,21 @@ TEST(RunWord32OrImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i | *j;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Or(m.Int32Constant(i), m.Parameter(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = i | j;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
- m.Word32Or(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
+ m.Word32Or(m.Int32Constant(i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i | ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = i | ~(j);
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2770,8 +2764,8 @@ TEST(RunWord32OrInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i | j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2788,8 +2782,8 @@ TEST(RunWord32OrInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i | j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2797,7 +2791,7 @@ TEST(RunWord32OrInBranch) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -2805,8 +2799,8 @@ TEST(RunWord32OrInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_INT32_INPUTS(j) {
- int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = (i | j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2814,7 +2808,7 @@ TEST(RunWord32OrInBranch) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -2822,8 +2816,8 @@ TEST(RunWord32OrInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_INT32_INPUTS(j) {
- int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = (i | j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2853,17 +2847,17 @@ TEST(RunWord32OrInBranch) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = ((*i | right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = ((i | right) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -2880,8 +2874,8 @@ TEST(RunWord32OrInComparison) {
m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i | *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i | j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2892,30 +2886,30 @@ TEST(RunWord32OrInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i | *j) == 0;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = (i | j) == 0;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i | *j) == 0;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (i | j) == 0;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
+ m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*j | *i) == 0;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (j | i) == 0;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2926,11 +2920,11 @@ TEST(RunWord32XorP) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
- FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i ^ *j;
- CHECK_EQ(expected, m.Call(*j));
- }
+ m.Return(m.Word32Xor(m.Int32Constant(i), m.Parameter(0)));
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = i ^ j;
+ CHECK_EQ(expected, m.Call(j));
+ }
}
}
{
@@ -2939,8 +2933,8 @@ TEST(RunWord32XorP) {
bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i ^ *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = i ^ j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2950,8 +2944,8 @@ TEST(RunWord32XorP) {
bt.AddReturn(m.Word32Xor(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i ^ ~(*j);
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = i ^ ~(j);
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2961,8 +2955,8 @@ TEST(RunWord32XorP) {
bt.AddReturn(m.Word32Xor(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = ~(*i) ^ *j;
- CHECK_EQ(expected, bt.call(*i, *j));
+ int32_t expected = ~(i) ^ j;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -2970,10 +2964,10 @@ TEST(RunWord32XorP) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
- m.Word32Xor(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
+ m.Word32Xor(m.Int32Constant(i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i ^ ~(*j);
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = i ^ ~(j);
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -2995,8 +2989,8 @@ TEST(RunWord32XorInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i ^ j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -3013,8 +3007,8 @@ TEST(RunWord32XorInBranch) {
bt.AddReturn(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = (i ^ j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -3022,7 +3016,7 @@ TEST(RunWord32XorInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+ m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
@@ -3030,8 +3024,8 @@ TEST(RunWord32XorInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (i ^ j) == 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3039,17 +3033,16 @@ TEST(RunWord32XorInBranch) {
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(
- m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
- m.Int32Constant(0)),
- &blocka, &blockb);
+ m.Branch(m.Word32NotEqual(m.Word32Xor(m.Int32Constant(i), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(constant));
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = (i ^ j) != 0 ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3079,17 +3072,17 @@ TEST(RunWord32XorInBranch) {
default:
UNREACHABLE();
case IrOpcode::kWord32Sar:
- right = *j >> shift;
+ right = j >> shift;
break;
case IrOpcode::kWord32Shl:
- right = *j << shift;
+ right = static_cast<uint32_t>(j) << shift;
break;
case IrOpcode::kWord32Shr:
- right = static_cast<uint32_t>(*j) >> shift;
+ right = static_cast<uint32_t>(j) >> shift;
break;
}
- int32_t expected = ((*i ^ right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = ((i ^ right) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -3104,8 +3097,8 @@ TEST(RunWord32ShlP) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *j << shift;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = j << shift;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3115,8 +3108,8 @@ TEST(RunWord32ShlP) {
bt.AddReturn(m.Word32Shl(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = *i << shift;
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = i << shift;
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3131,8 +3124,8 @@ TEST(RunWord32ShlInComparison) {
m.Word32Equal(m.Word32Shl(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == (*i << shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == (i << shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3143,8 +3136,8 @@ TEST(RunWord32ShlInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32Shl(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == (*i << shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == (i << shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3155,8 +3148,8 @@ TEST(RunWord32ShlInComparison) {
m.Word32Equal(m.Int32Constant(0),
m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == (*i << shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == (i << shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3167,8 +3160,8 @@ TEST(RunWord32ShlInComparison) {
m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == (*i << shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == (i << shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3181,8 +3174,8 @@ TEST(RunWord32ShrP) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *j >> shift;
- CHECK_EQ(expected, m.Call(*j));
+ uint32_t expected = j >> shift;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3192,8 +3185,8 @@ TEST(RunWord32ShrP) {
bt.AddReturn(m.Word32Shr(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = *i >> shift;
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = i >> shift;
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
CHECK_EQ(0x00010000u, bt.call(0x80000000, 15));
@@ -3213,8 +3206,8 @@ TEST(RunWordShiftInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = ((*i << shift) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = ((i << shift) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i));
}
}
FOR_UINT32_SHIFTS(shift) {
@@ -3228,8 +3221,8 @@ TEST(RunWordShiftInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = ((*i >> shift) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = ((i >> shift) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i));
}
}
FOR_UINT32_SHIFTS(shift) {
@@ -3243,8 +3236,8 @@ TEST(RunWordShiftInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_INT32_INPUTS(i) {
- int32_t expected = ((*i >> shift) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(*i));
+ int32_t expected = ((i >> shift) == 0) ? constant : 0 - constant;
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3257,8 +3250,8 @@ TEST(RunWord32ShrInComparison) {
m.Word32Equal(m.Word32Shr(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3269,8 +3262,8 @@ TEST(RunWord32ShrInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32Shr(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3281,8 +3274,8 @@ TEST(RunWord32ShrInComparison) {
m.Word32Equal(m.Int32Constant(0),
m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3293,8 +3286,8 @@ TEST(RunWord32ShrInComparison) {
m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3307,8 +3300,8 @@ TEST(RunWord32SarP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
FOR_INT32_INPUTS(j) {
- int32_t expected = *j >> shift;
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = j >> shift;
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3318,8 +3311,8 @@ TEST(RunWord32SarP) {
bt.AddReturn(m.Word32Sar(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_SHIFTS(shift) {
- int32_t expected = *i >> shift;
- CHECK_EQ(expected, bt.call(*i, shift));
+ int32_t expected = i >> shift;
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
CHECK_EQ(bit_cast<int32_t>(0xFFFF0000), bt.call(0x80000000, 15));
@@ -3335,8 +3328,8 @@ TEST(RunWord32SarInComparison) {
m.Word32Equal(m.Word32Sar(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_INT32_INPUTS(i) {
FOR_INT32_SHIFTS(shift) {
- int32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ int32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3347,8 +3340,8 @@ TEST(RunWord32SarInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32Sar(bt.param0, bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_SHIFTS(shift) {
- int32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ int32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3359,8 +3352,8 @@ TEST(RunWord32SarInComparison) {
m.Word32Equal(m.Int32Constant(0),
m.Word32Sar(m.Parameter(0), m.Int32Constant(shift))));
FOR_INT32_INPUTS(i) {
- int32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, m.Call(*i));
+ int32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3371,8 +3364,8 @@ TEST(RunWord32SarInComparison) {
m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_INT32_INPUTS(i) {
- int32_t expected = 0 == (*i >> shift);
- CHECK_EQ(expected, m.Call(*i));
+ int32_t expected = 0 == (i >> shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3385,8 +3378,8 @@ TEST(RunWord32RorP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.Return(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
- int32_t expected = base::bits::RotateRight32(*j, shift);
- CHECK_EQ(expected, m.Call(*j));
+ int32_t expected = base::bits::RotateRight32(j, shift);
+ CHECK_EQ(expected, m.Call(j));
}
}
}
@@ -3396,8 +3389,8 @@ TEST(RunWord32RorP) {
bt.AddReturn(m.Word32Ror(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = base::bits::RotateRight32(*i, shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = base::bits::RotateRight32(i, shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3412,8 +3405,8 @@ TEST(RunWord32RorInComparison) {
m.Word32Equal(m.Word32Ror(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == base::bits::RotateRight32(*i, shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == base::bits::RotateRight32(i, shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3424,8 +3417,8 @@ TEST(RunWord32RorInComparison) {
m.Word32Equal(m.Int32Constant(0), m.Word32Ror(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
- uint32_t expected = 0 == base::bits::RotateRight32(*i, shift);
- CHECK_EQ(expected, bt.call(*i, shift));
+ uint32_t expected = 0 == base::bits::RotateRight32(i, shift);
+ CHECK_EQ(expected, bt.call(i, shift));
}
}
}
@@ -3436,8 +3429,8 @@ TEST(RunWord32RorInComparison) {
m.Word32Equal(m.Int32Constant(0),
m.Word32Ror(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == base::bits::RotateRight32(*i, shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == base::bits::RotateRight32(i, shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3448,8 +3441,8 @@ TEST(RunWord32RorInComparison) {
m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = 0 == base::bits::RotateRight32(*i, shift);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = 0 == base::bits::RotateRight32(i, shift);
+ CHECK_EQ(expected, m.Call(i));
}
}
}
@@ -3459,8 +3452,8 @@ TEST(RunWord32BitwiseNotP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Word32BitwiseNot(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
- int expected = ~(*i);
- CHECK_EQ(expected, m.Call(*i));
+ int expected = ~(i);
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -3469,8 +3462,8 @@ TEST(RunInt32NegP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Int32Neg(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
- int expected = base::NegateWithWraparound(*i);
- CHECK_EQ(expected, m.Call(*i));
+ int expected = base::NegateWithWraparound(i);
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -3484,8 +3477,8 @@ TEST(RunWord32EqualAndWord32SarP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
- int32_t expected = (*i == (*j >> shift));
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = (i == (j >> shift));
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -3498,8 +3491,8 @@ TEST(RunWord32EqualAndWord32SarP) {
FOR_INT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_INT32_INPUTS(k) {
- int32_t expected = ((*i >> shift) == *k);
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = ((i >> shift) == k);
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -3516,8 +3509,8 @@ TEST(RunWord32EqualAndWord32ShlP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
- int32_t expected = (*i == (*j << shift));
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = (i == (j << shift));
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -3530,8 +3523,8 @@ TEST(RunWord32EqualAndWord32ShlP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- int32_t expected = ((*i << shift) == *k);
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = ((i << shift) == k);
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -3548,8 +3541,8 @@ TEST(RunWord32EqualAndWord32ShrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
- int32_t expected = (*i == (*j >> shift));
- CHECK_EQ(expected, m.Call(*i, *j, shift));
+ int32_t expected = (i == (j >> shift));
+ CHECK_EQ(expected, m.Call(i, j, shift));
}
}
}
@@ -3562,8 +3555,8 @@ TEST(RunWord32EqualAndWord32ShrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
- int32_t expected = ((*i >> shift) == *k);
- CHECK_EQ(expected, m.Call(*i, shift, *k));
+ int32_t expected = ((i >> shift) == k);
+ CHECK_EQ(expected, m.Call(i, shift, k));
}
}
}
@@ -3644,7 +3637,7 @@ TEST(RunFloat32Add) {
m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i + *j, m.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i + j, m.Call(i, j)); }
}
}
@@ -3655,14 +3648,14 @@ TEST(RunFloat32Sub) {
m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i - j, m.Call(i, j)); }
}
}
TEST(RunFloat32Neg) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.AddNode(m.machine()->Float32Neg(), m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(-0.0f - *i, m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(-0.0f - i, m.Call(i)); }
}
TEST(RunFloat32Mul) {
@@ -3671,7 +3664,7 @@ TEST(RunFloat32Mul) {
m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i * *j, m.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i * j, m.Call(i, j)); }
}
}
@@ -3682,9 +3675,7 @@ TEST(RunFloat32Div) {
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- CHECK_FLOAT_EQ(base::Divide(*i, *j), m.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(base::Divide(i, j), m.Call(i, j)); }
}
}
@@ -3695,7 +3686,7 @@ TEST(RunFloat64Add) {
m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i + *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(i + j, m.Call(i, j)); }
}
}
@@ -3706,14 +3697,14 @@ TEST(RunFloat64Sub) {
m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i - *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(i - j, m.Call(i, j)); }
}
}
TEST(RunFloat64Neg) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.AddNode(m.machine()->Float64Neg(), m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(-0.0 - *i, m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(-0.0 - i, m.Call(i)); }
}
TEST(RunFloat64Mul) {
@@ -3722,7 +3713,7 @@ TEST(RunFloat64Mul) {
m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i * *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(i * j, m.Call(i, j)); }
}
}
@@ -3733,9 +3724,7 @@ TEST(RunFloat64Div) {
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- CHECK_DOUBLE_EQ(base::Divide(*i, *j), m.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(base::Divide(i, j), m.Call(i, j)); }
}
}
@@ -3746,7 +3735,7 @@ TEST(RunFloat64Mod) {
m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(*i, *j), m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(i, j), m.Call(i, j)); }
}
}
@@ -3792,7 +3781,7 @@ TEST(RunFloat32AddP) {
bt.AddReturn(m.Float32Add(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl + *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(pl + pr, bt.call(pl, pr)); }
}
}
@@ -3804,7 +3793,7 @@ TEST(RunFloat64AddP) {
bt.AddReturn(m.Float64Add(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl + *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(pl + pr, bt.call(pl, pr)); }
}
}
@@ -3814,9 +3803,7 @@ TEST(RunFloat64MaxP) {
bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(JSMax(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3827,9 +3814,7 @@ TEST(RunFloat64MinP) {
bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(JSMin(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3839,9 +3824,7 @@ TEST(RunFloat32Max) {
bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- CHECK_FLOAT_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(JSMax(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3851,9 +3834,7 @@ TEST(RunFloat32Min) {
bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- CHECK_FLOAT_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(JSMin(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3863,9 +3844,7 @@ TEST(RunFloat64Max) {
bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(JSMax(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3875,9 +3854,7 @@ TEST(RunFloat64Min) {
bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(JSMin(pl, pr), bt.call(pl, pr)); }
}
}
@@ -3888,7 +3865,7 @@ TEST(RunFloat32SubP) {
bt.AddReturn(m.Float32Sub(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl - *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(pl - pr, bt.call(pl, pr)); }
}
}
@@ -3896,9 +3873,9 @@ TEST(RunFloat32SubP) {
TEST(RunFloat32SubImm1) {
FOR_FLOAT32_INPUTS(i) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
- m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0)));
+ m.Return(m.Float32Sub(m.Float32Constant(i), m.Parameter(0)));
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i - j, m.Call(j)); }
}
}
@@ -3906,9 +3883,9 @@ TEST(RunFloat32SubImm1) {
TEST(RunFloat32SubImm2) {
FOR_FLOAT32_INPUTS(i) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
- m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i)));
+ m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(i)));
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*j - *i, m.Call(*j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(j - i, m.Call(j)); }
}
}
@@ -3916,9 +3893,9 @@ TEST(RunFloat32SubImm2) {
TEST(RunFloat64SubImm1) {
FOR_FLOAT64_INPUTS(i) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
- m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0)));
+ m.Return(m.Float64Sub(m.Float64Constant(i), m.Parameter(0)));
- FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(i - j, m.Call(j)); }
}
}
@@ -3926,9 +3903,9 @@ TEST(RunFloat64SubImm1) {
TEST(RunFloat64SubImm2) {
FOR_FLOAT64_INPUTS(i) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
- m.Return(m.Float64Sub(m.Parameter(0), m.Float64Constant(*i)));
+ m.Return(m.Float64Sub(m.Parameter(0), m.Float64Constant(i)));
- FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*j - *i, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(j - i, m.Call(j)); }
}
}
@@ -3941,8 +3918,8 @@ TEST(RunFloat64SubP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl - *pr;
- CHECK_DOUBLE_EQ(expected, bt.call(*pl, *pr));
+ double expected = pl - pr;
+ CHECK_DOUBLE_EQ(expected, bt.call(pl, pr));
}
}
}
@@ -3955,7 +3932,7 @@ TEST(RunFloat32MulP) {
bt.AddReturn(m.Float32Mul(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl * *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(pl * pr, bt.call(pl, pr)); }
}
}
@@ -3968,8 +3945,8 @@ TEST(RunFloat64MulP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl * *pr;
- CHECK_DOUBLE_EQ(expected, bt.call(*pl, *pr));
+ double expected = pl * pr;
+ CHECK_DOUBLE_EQ(expected, bt.call(pl, pr));
}
}
}
@@ -3983,9 +3960,7 @@ TEST(RunFloat64MulAndFloat64Add1) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- FOR_FLOAT64_INPUTS(k) {
- CHECK_DOUBLE_EQ((*i * *j) + *k, m.Call(*i, *j, *k));
- }
+ FOR_FLOAT64_INPUTS(k) { CHECK_DOUBLE_EQ((i * j) + k, m.Call(i, j, k)); }
}
}
}
@@ -3999,9 +3974,7 @@ TEST(RunFloat64MulAndFloat64Add2) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- FOR_FLOAT64_INPUTS(k) {
- CHECK_DOUBLE_EQ(*i + (*j * *k), m.Call(*i, *j, *k));
- }
+ FOR_FLOAT64_INPUTS(k) { CHECK_DOUBLE_EQ(i + (j * k), m.Call(i, j, k)); }
}
}
}
@@ -4015,9 +3988,7 @@ TEST(RunFloat64MulAndFloat64Sub1) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- FOR_FLOAT64_INPUTS(k) {
- CHECK_DOUBLE_EQ((*i * *j) - *k, m.Call(*i, *j, *k));
- }
+ FOR_FLOAT64_INPUTS(k) { CHECK_DOUBLE_EQ((i * j) - k, m.Call(i, j, k)); }
}
}
}
@@ -4031,9 +4002,7 @@ TEST(RunFloat64MulAndFloat64Sub2) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- FOR_FLOAT64_INPUTS(k) {
- CHECK_DOUBLE_EQ(*i - (*j * *k), m.Call(*i, *j, *k));
- }
+ FOR_FLOAT64_INPUTS(k) { CHECK_DOUBLE_EQ(i - (j * k), m.Call(i, j, k)); }
}
}
}
@@ -4042,9 +4011,9 @@ TEST(RunFloat64MulAndFloat64Sub2) {
TEST(RunFloat64MulImm1) {
FOR_FLOAT64_INPUTS(i) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
- m.Return(m.Float64Mul(m.Float64Constant(*i), m.Parameter(0)));
+ m.Return(m.Float64Mul(m.Float64Constant(i), m.Parameter(0)));
- FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*i * *j, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(i * j, m.Call(j)); }
}
}
@@ -4052,9 +4021,9 @@ TEST(RunFloat64MulImm1) {
TEST(RunFloat64MulImm2) {
FOR_FLOAT64_INPUTS(i) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
- m.Return(m.Float64Mul(m.Parameter(0), m.Float64Constant(*i)));
+ m.Return(m.Float64Mul(m.Parameter(0), m.Float64Constant(i)));
- FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*j * *i, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(j * i, m.Call(j)); }
}
}
@@ -4067,7 +4036,7 @@ TEST(RunFloat32DivP) {
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
- CHECK_FLOAT_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
+ CHECK_FLOAT_EQ(base::Divide(pl, pr), bt.call(pl, pr));
}
}
}
@@ -4081,7 +4050,7 @@ TEST(RunFloat64DivP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(base::Divide(pl, pr), bt.call(pl, pr));
}
}
}
@@ -4094,7 +4063,7 @@ TEST(RunFloat64ModP) {
bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(*i, *j), bt.call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(Modulo(i, j), bt.call(i, j)); }
}
}
@@ -4111,7 +4080,7 @@ TEST(RunChangeInt32ToFloat64_B) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Int32());
m.Return(m.ChangeInt32ToFloat64(m.Parameter(0)));
- FOR_INT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(i), m.Call(i)); }
}
@@ -4119,7 +4088,7 @@ TEST(RunChangeUint32ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
- FOR_UINT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(i), m.Call(i)); }
}
@@ -4134,8 +4103,8 @@ TEST(RunTruncateFloat32ToInt32) {
// is.
float lower_bound = static_cast<float>(INT32_MIN);
FOR_FLOAT32_INPUTS(i) {
- if (*i < upper_bound && *i >= lower_bound) {
- CHECK_FLOAT_EQ(static_cast<int32_t>(*i), m.Call(*i));
+ if (i < upper_bound && i >= lower_bound) {
+ CHECK_FLOAT_EQ(static_cast<int32_t>(i), m.Call(i));
}
}
}
@@ -4150,14 +4119,14 @@ TEST(RunTruncateFloat32ToUint32) {
double upper_bound = 4294967296.0f;
double lower_bound = -1.0f;
FOR_UINT32_INPUTS(i) {
- volatile float input = static_cast<float>(*i);
+ volatile float input = static_cast<float>(i);
if (input < upper_bound) {
CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
}
}
FOR_FLOAT32_INPUTS(j) {
- if ((*j < upper_bound) && (*j > lower_bound)) {
- CHECK_FLOAT_EQ(static_cast<uint32_t>(*j), m.Call(*j));
+ if ((j < upper_bound) && (j > lower_bound)) {
+ CHECK_FLOAT_EQ(static_cast<uint32_t>(j), m.Call(j));
}
}
}
@@ -4177,7 +4146,7 @@ TEST(RunChangeFloat64ToInt32_B) {
// Note we don't check fractional inputs, or inputs outside the range of
// int32, because these Convert operators really should be Change operators.
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, m.Call(static_cast<double>(*i))); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, m.Call(static_cast<double>(i))); }
for (int32_t n = 1; n < 31; ++n) {
CHECK_EQ(1 << n, m.Call(static_cast<double>(1 << n)));
@@ -4193,7 +4162,7 @@ TEST(RunChangeFloat64ToUint32) {
m.Return(m.ChangeFloat64ToUint32(m.Parameter(0)));
{
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, m.Call(static_cast<double>(*i))); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, m.Call(static_cast<double>(i))); }
}
// Check various powers of 2.
@@ -4212,7 +4181,7 @@ TEST(RunTruncateFloat64ToFloat32) {
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(DoubleToFloat32(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(DoubleToFloat32(i), m.Call(i)); }
}
uint64_t ToInt64(uint32_t low, uint32_t high) {
@@ -4239,11 +4208,11 @@ TEST(RunInt32PairAdd) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32));
- CHECK_EQ(*i + *j, ToInt64(low, high));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32));
+ CHECK_EQ(i + j, ToInt64(low, high));
}
}
}
@@ -4259,11 +4228,11 @@ TEST(RunInt32PairAddUseOnlyHighWord) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
- static_cast<uint32_t>((*i + *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32))));
+ static_cast<uint32_t>((i + j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32))));
}
}
}
@@ -4286,8 +4255,8 @@ void TestInt32PairAddWithSharedInput(int a, int b, int c, int d) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- m.Call(*i, *j);
- uint32_t inputs[] = {*i, *j};
+ m.Call(i, j);
+ uint32_t inputs[] = {i, j};
CHECK_EQ(ToInt64(inputs[a], inputs[b]) + ToInt64(inputs[c], inputs[d]),
ToInt64(low, high));
}
@@ -4322,11 +4291,11 @@ TEST(RunInt32PairSub) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32));
- CHECK_EQ(*i - *j, ToInt64(low, high));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32));
+ CHECK_EQ(i - j, ToInt64(low, high));
}
}
}
@@ -4342,11 +4311,11 @@ TEST(RunInt32PairSubUseOnlyHighWord) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
- static_cast<uint32_t>((*i - *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32))));
+ static_cast<uint32_t>((i - j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32))));
}
}
}
@@ -4369,8 +4338,8 @@ void TestInt32PairSubWithSharedInput(int a, int b, int c, int d) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- m.Call(*i, *j);
- uint32_t inputs[] = {*i, *j};
+ m.Call(i, j);
+ uint32_t inputs[] = {i, j};
CHECK_EQ(ToInt64(inputs[a], inputs[b]) - ToInt64(inputs[c], inputs[d]),
ToInt64(low, high));
}
@@ -4405,11 +4374,11 @@ TEST(RunInt32PairMul) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32));
- CHECK_EQ(*i * *j, ToInt64(low, high));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32));
+ CHECK_EQ(i * j, ToInt64(low, high));
}
}
}
@@ -4425,11 +4394,11 @@ TEST(RunInt32PairMulUseOnlyHighWord) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
CHECK_EQ(
- static_cast<uint32_t>((*i * *j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32),
- static_cast<uint32_t>(*j & 0xFFFFFFFF),
- static_cast<uint32_t>(*j >> 32))));
+ static_cast<uint32_t>((i * j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32),
+ static_cast<uint32_t>(j & 0xFFFFFFFF),
+ static_cast<uint32_t>(j >> 32))));
}
}
}
@@ -4452,8 +4421,8 @@ void TestInt32PairMulWithSharedInput(int a, int b, int c, int d) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- m.Call(*i, *j);
- uint32_t inputs[] = {*i, *j};
+ m.Call(i, j);
+ uint32_t inputs[] = {i, j};
CHECK_EQ(ToInt64(inputs[a], inputs[b]) * ToInt64(inputs[c], inputs[d]),
ToInt64(low, high));
}
@@ -4488,9 +4457,9 @@ TEST(RunWord32PairShl) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j);
- CHECK_EQ(*i << j, ToInt64(low, high));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j);
+ CHECK_EQ(i << j, ToInt64(low, high));
}
}
}
@@ -4505,9 +4474,9 @@ TEST(RunWord32PairShlUseOnlyHighWord) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
- static_cast<uint32_t>((*i << j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j)));
+ static_cast<uint32_t>((i << j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j)));
}
}
}
@@ -4530,8 +4499,8 @@ void TestWord32PairShlWithSharedInput(int a, int b) {
FOR_UINT32_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(*i, j);
- uint32_t inputs[] = {*i, j};
+ m.Call(i, j);
+ uint32_t inputs[] = {i, j};
CHECK_EQ(ToInt64(inputs[a], inputs[b]) << j, ToInt64(low, high));
}
}
@@ -4562,9 +4531,9 @@ TEST(RunWord32PairShr) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j);
- CHECK_EQ(*i >> j, ToInt64(low, high));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j);
+ CHECK_EQ(i >> j, ToInt64(low, high));
}
}
}
@@ -4579,9 +4548,9 @@ TEST(RunWord32PairShrUseOnlyHighWord) {
FOR_UINT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
- static_cast<uint32_t>((*i >> j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j)));
+ static_cast<uint32_t>((i >> j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j)));
}
}
}
@@ -4604,9 +4573,9 @@ TEST(RunWord32PairSar) {
FOR_INT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
- m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j);
- CHECK_EQ(*i >> j, static_cast<int64_t>(ToInt64(low, high)));
+ m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j);
+ CHECK_EQ(i >> j, static_cast<int64_t>(ToInt64(low, high)));
}
}
}
@@ -4621,9 +4590,9 @@ TEST(RunWord32PairSarUseOnlyHighWord) {
FOR_INT64_INPUTS(i) {
for (uint32_t j = 0; j < 64; j++) {
CHECK_EQ(
- static_cast<uint32_t>((*i >> j) >> 32),
- static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xFFFFFFFF),
- static_cast<uint32_t>(*i >> 32), j)));
+ static_cast<uint32_t>((i >> j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32), j)));
}
}
}
@@ -4740,8 +4709,14 @@ TEST(RunRefDiamond) {
m.Goto(&end);
m.Bind(&end);
Node* phi = m.Phi(MachineRepresentation::kTagged, k2, k1);
- m.Store(MachineRepresentation::kTagged, m.PointerConstant(&buffer),
- m.Int32Constant(0), phi, kNoWriteBarrier);
+ if (COMPRESS_POINTERS_BOOL) {
+ // Since |buffer| is located off-heap, use full pointer store.
+ m.Store(MachineType::PointerRepresentation(), m.PointerConstant(&buffer),
+ m.Int32Constant(0), m.BitcastTaggedToWord(phi), kNoWriteBarrier);
+ } else {
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&buffer),
+ m.Int32Constant(0), phi, kNoWriteBarrier);
+ }
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4774,8 +4749,14 @@ TEST(RunDoubleRefDiamond) {
Node* rphi = m.Phi(MachineRepresentation::kTagged, r2, r1);
m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&dbuffer),
m.Int32Constant(0), dphi, kNoWriteBarrier);
- m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
- m.Int32Constant(0), rphi, kNoWriteBarrier);
+ if (COMPRESS_POINTERS_BOOL) {
+ // Since |buffer| is located off-heap, use full pointer store.
+ m.Store(MachineType::PointerRepresentation(), m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), m.BitcastTaggedToWord(rphi), kNoWriteBarrier);
+ } else {
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), rphi, kNoWriteBarrier);
+ }
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4819,8 +4800,14 @@ TEST(RunDoubleRefDoubleDiamond) {
m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&dbuffer),
m.Int32Constant(0), dphi2, kNoWriteBarrier);
- m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
- m.Int32Constant(0), rphi2, kNoWriteBarrier);
+ if (COMPRESS_POINTERS_BOOL) {
+ // Since |buffer| is located off-heap, use full pointer store.
+ m.Store(MachineType::PointerRepresentation(), m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), m.BitcastTaggedToWord(rphi2), kNoWriteBarrier);
+ } else {
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), rphi2, kNoWriteBarrier);
+ }
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -5093,7 +5080,7 @@ TEST(RunFloat64UnorderedCompare) {
for (size_t o = 0; o < arraysize(operators); ++o) {
for (int j = 0; j < 2; j++) {
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.Float64Constant(*i);
+ Node* a = m.Float64Constant(i);
Node* b = m.Float64Constant(nan);
if (j == 1) std::swap(a, b);
m.Return(m.AddNode(operators[o], a, b));
@@ -5116,8 +5103,8 @@ TEST(RunFloat64Equal) {
CompareWrapper cmp(IrOpcode::kFloat64Equal);
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- input_a = *pl;
- input_b = *pr;
+ input_a = pl;
+ input_b = pr;
int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
CHECK_EQ(expected, m.Call());
}
@@ -5137,8 +5124,8 @@ TEST(RunFloat64LessThan) {
CompareWrapper cmp(IrOpcode::kFloat64LessThan);
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- input_a = *pl;
- input_b = *pr;
+ input_a = pl;
+ input_b = pr;
int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
CHECK_EQ(expected, m.Call());
}
@@ -5276,11 +5263,11 @@ TEST(RunSpillConstantsAndParameters) {
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = base::AddWithWraparound(*i, *j);
+ int32_t expected = base::AddWithWraparound(i, j);
for (int k = 0; k < kInputSize; k++) {
expected = base::AddWithWraparound(expected, kBase + k);
}
- CHECK_EQ(expected, m.Call(*i, *j));
+ CHECK_EQ(expected, m.Call(i, j));
expected = 0;
for (int k = 0; k < kInputSize; k++) {
expected += kBase + k;
@@ -5328,8 +5315,8 @@ TEST(RunInt32AddWithOverflowP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
- int expected_ovf = base::bits::SignedAddOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ int expected_ovf = base::bits::SignedAddOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(i, j));
CHECK_EQ(expected_val, actual_val);
}
}
@@ -5341,41 +5328,39 @@ TEST(RunInt32AddWithOverflowImm) {
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+ Node* add = m.Int32AddWithOverflow(m.Int32Constant(i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedAddOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedAddOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+ Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedAddOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedAddOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
- m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ m.Int32AddWithOverflow(m.Int32Constant(i), m.Int32Constant(j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
- int expected_ovf = base::bits::SignedAddOverflow32(*i, *j, &expected_val);
+ int expected_ovf = base::bits::SignedAddOverflow32(i, j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -5399,9 +5384,8 @@ TEST(RunInt32AddWithOverflowInBranchP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected;
- if (base::bits::SignedAddOverflow32(*i, *j, &expected))
- expected = constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ if (base::bits::SignedAddOverflow32(i, j, &expected)) expected = constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -5419,8 +5403,8 @@ TEST(RunInt32SubWithOverflowP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
- int expected_ovf = base::bits::SignedSubOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ int expected_ovf = base::bits::SignedSubOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(i, j));
CHECK_EQ(expected_val, actual_val);
}
}
@@ -5432,41 +5416,39 @@ TEST(RunInt32SubWithOverflowImm) {
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+ Node* add = m.Int32SubWithOverflow(m.Int32Constant(i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedSubOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedSubOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+ Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedSubOverflow32(*j, *i, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedSubOverflow32(j, i, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
- m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ m.Int32SubWithOverflow(m.Int32Constant(i), m.Int32Constant(j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
- int expected_ovf = base::bits::SignedSubOverflow32(*i, *j, &expected_val);
+ int expected_ovf = base::bits::SignedSubOverflow32(i, j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
@@ -5490,9 +5472,8 @@ TEST(RunInt32SubWithOverflowInBranchP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected;
- if (base::bits::SignedSubOverflow32(*i, *j, &expected))
- expected = constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ if (base::bits::SignedSubOverflow32(i, j, &expected)) expected = constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -5509,8 +5490,8 @@ TEST(RunInt32MulWithOverflowP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
- int expected_ovf = base::bits::SignedMulOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ int expected_ovf = base::bits::SignedMulOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(i, j));
if (!expected_ovf) {
CHECK_EQ(expected_val, actual_val);
}
@@ -5523,15 +5504,14 @@ TEST(RunInt32MulWithOverflowImm) {
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32MulWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+ Node* add = m.Int32MulWithOverflow(m.Int32Constant(i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedMulOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedMulOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
if (!expected_ovf) {
CHECK_EQ(expected_val, actual_val);
}
@@ -5539,15 +5519,14 @@ TEST(RunInt32MulWithOverflowImm) {
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* add = m.Int32MulWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+ Node* add = m.Int32MulWithOverflow(m.Parameter(0), m.Int32Constant(i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
- int expected_ovf =
- base::bits::SignedMulOverflow32(*i, *j, &expected_val);
- CHECK_EQ(expected_ovf, m.Call(*j));
+ int expected_ovf = base::bits::SignedMulOverflow32(i, j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(j));
if (!expected_ovf) {
CHECK_EQ(expected_val, actual_val);
}
@@ -5556,12 +5535,12 @@ TEST(RunInt32MulWithOverflowImm) {
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
- m.Int32MulWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ m.Int32MulWithOverflow(m.Int32Constant(i), m.Int32Constant(j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
- int expected_ovf = base::bits::SignedMulOverflow32(*i, *j, &expected_val);
+ int expected_ovf = base::bits::SignedMulOverflow32(i, j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
if (!expected_ovf) {
CHECK_EQ(expected_val, actual_val);
@@ -5586,9 +5565,8 @@ TEST(RunInt32MulWithOverflowInBranchP) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected;
- if (base::bits::SignedMulOverflow32(*i, *j, &expected))
- expected = constant;
- CHECK_EQ(expected, bt.call(*i, *j));
+ if (base::bits::SignedMulOverflow32(i, j, &expected)) expected = constant;
+ CHECK_EQ(expected, bt.call(i, j));
}
}
}
@@ -5614,44 +5592,44 @@ TEST(RunWord64EqualInBranchP) {
TEST(RunChangeInt32ToInt64P) {
- if (kPointerSize < 8) return;
+ if (kSystemPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.StoreToPointer(&actual, MachineRepresentation::kWord64,
m.ChangeInt32ToInt64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_INT32_INPUTS(i) {
- int64_t expected = *i;
- CHECK_EQ(0, m.Call(*i));
+ int64_t expected = i;
+ CHECK_EQ(0, m.Call(i));
CHECK_EQ(expected, actual);
}
}
TEST(RunChangeUint32ToUint64P) {
- if (kPointerSize < 8) return;
+ if (kSystemPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.StoreToPointer(&actual, MachineRepresentation::kWord64,
m.ChangeUint32ToUint64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_UINT32_INPUTS(i) {
- int64_t expected = static_cast<uint64_t>(*i);
- CHECK_EQ(0, m.Call(*i));
+ int64_t expected = static_cast<uint64_t>(i);
+ CHECK_EQ(0, m.Call(i));
CHECK_EQ(expected, actual);
}
}
TEST(RunTruncateInt64ToInt32P) {
- if (kPointerSize < 8) return;
+ if (kSystemPointerSize < 8) return;
int64_t expected = -1;
RawMachineAssemblerTester<int32_t> m;
m.Return(m.TruncateInt64ToInt32(
m.LoadFromPointer(&expected, MachineType::Int64())));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- expected = (static_cast<uint64_t>(*j) << 32) | *i;
+ expected = (static_cast<uint64_t>(j) << 32) | i;
CHECK_EQ(static_cast<int32_t>(expected), m.Call());
}
}
@@ -5739,17 +5717,15 @@ TEST(RunChangeFloat32ToFloat64) {
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) {
- CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i));
- }
+ FOR_FLOAT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(i), m.Call(i)); }
}
TEST(RunFloat32Constant) {
FOR_FLOAT32_INPUTS(i) {
BufferedRawMachineAssemblerTester<float> m;
- m.Return(m.Float32Constant(*i));
- CHECK_FLOAT_EQ(*i, m.Call());
+ m.Return(m.Float32Constant(i));
+ CHECK_FLOAT_EQ(i, m.Call());
}
}
@@ -5758,8 +5734,8 @@ TEST(RunFloat64ExtractLowWord32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.Float64ExtractLowWord32(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) {
- uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(*i));
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(i));
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -5768,8 +5744,8 @@ TEST(RunFloat64ExtractHighWord32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.Float64ExtractHighWord32(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) {
- uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(*i) >> 32);
- CHECK_EQ(expected, m.Call(*i));
+ uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(i) >> 32);
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -5781,9 +5757,9 @@ TEST(RunFloat64InsertLowWord32) {
FOR_FLOAT64_INPUTS(i) {
FOR_INT32_INPUTS(j) {
double expected =
- bit_cast<double>((bit_cast<uint64_t>(*i) & ~(uint64_t{0xFFFFFFFF})) |
- (static_cast<uint64_t>(bit_cast<uint32_t>(*j))));
- CHECK_DOUBLE_EQ(expected, m.Call(*i, *j));
+ bit_cast<double>((bit_cast<uint64_t>(i) & ~(uint64_t{0xFFFFFFFF})) |
+ (static_cast<uint64_t>(bit_cast<uint32_t>(j))));
+ CHECK_DOUBLE_EQ(expected, m.Call(i, j));
}
}
}
@@ -5795,10 +5771,10 @@ TEST(RunFloat64InsertHighWord32) {
m.Return(m.Float64InsertHighWord32(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint64_t expected = (bit_cast<uint64_t>(*i) & 0xFFFFFFFF) |
- (static_cast<uint64_t>(*j) << 32);
+ uint64_t expected = (bit_cast<uint64_t>(i) & 0xFFFFFFFF) |
+ (static_cast<uint64_t>(j) << 32);
- CHECK_DOUBLE_EQ(bit_cast<double>(expected), m.Call(*i, *j));
+ CHECK_DOUBLE_EQ(bit_cast<double>(expected), m.Call(i, j));
}
}
}
@@ -5807,46 +5783,38 @@ TEST(RunFloat64InsertHighWord32) {
TEST(RunFloat32Abs) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Abs(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(std::abs(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(std::abs(i), m.Call(i)); }
}
TEST(RunFloat64Abs) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Abs(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(std::abs(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(std::abs(i), m.Call(i)); }
}
TEST(RunFloat64Acos) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Acos(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::acos(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::acos(i), m.Call(i)); }
}
TEST(RunFloat64Acosh) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Acosh(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::acosh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::acosh(i), m.Call(i)); }
}
TEST(RunFloat64Asin) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Asin(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::asin(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::asin(i), m.Call(i)); }
}
TEST(RunFloat64Asinh) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Asinh(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::asinh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::asinh(i), m.Call(i)); }
}
TEST(RunFloat64Atan) {
@@ -5856,9 +5824,7 @@ TEST(RunFloat64Atan) {
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::atan(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::atan(i), m.Call(i)); }
}
TEST(RunFloat64Atanh) {
@@ -5870,9 +5836,7 @@ TEST(RunFloat64Atanh) {
CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-1.0));
CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::atanh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::atanh(i), m.Call(i)); }
}
TEST(RunFloat64Atan2) {
@@ -5881,7 +5845,7 @@ TEST(RunFloat64Atan2) {
m.Return(m.Float64Atan2(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- CHECK_DOUBLE_EQ(base::ieee754::atan2(*i, *j), m.Call(*i, *j));
+ CHECK_DOUBLE_EQ(base::ieee754::atan2(i, j), m.Call(i, j));
}
}
}
@@ -5891,7 +5855,7 @@ TEST(RunFloat64Cos) {
m.Return(m.Float64Cos(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::cos(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::cos(i), m.Call(i)); }
}
TEST(RunFloat64Cosh) {
@@ -5899,9 +5863,7 @@ TEST(RunFloat64Cosh) {
m.Return(m.Float64Cosh(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::cosh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::cosh(i), m.Call(i)); }
}
TEST(RunFloat64Exp) {
@@ -5914,7 +5876,7 @@ TEST(RunFloat64Exp) {
CHECK_DOUBLE_EQ(1.0, m.Call(0.0));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::exp(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::exp(i), m.Call(i)); }
}
TEST(RunFloat64Expm1) {
@@ -5925,9 +5887,7 @@ TEST(RunFloat64Expm1) {
CHECK_EQ(-1.0, m.Call(-std::numeric_limits<double>::infinity()));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::expm1(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::expm1(i), m.Call(i)); }
}
TEST(RunFloat64Log) {
@@ -5942,7 +5902,7 @@ TEST(RunFloat64Log) {
CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::log(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::log(i), m.Call(i)); }
}
TEST(RunFloat64Log1p) {
@@ -5956,9 +5916,7 @@ TEST(RunFloat64Log1p) {
CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::log1p(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::log1p(i), m.Call(i)); }
}
TEST(RunFloat64Log2) {
@@ -5973,9 +5931,7 @@ TEST(RunFloat64Log2) {
CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::log2(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::log2(i), m.Call(i)); }
}
TEST(RunFloat64Log10) {
@@ -5989,9 +5945,7 @@ TEST(RunFloat64Log10) {
CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
m.Call(std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::log10(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::log10(i), m.Call(i)); }
}
TEST(RunFloat64Cbrt) {
@@ -6003,9 +5957,7 @@ TEST(RunFloat64Cbrt) {
m.Call(std::numeric_limits<double>::infinity()));
CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(),
m.Call(-std::numeric_limits<double>::infinity()));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::cbrt(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::cbrt(i), m.Call(i)); }
}
TEST(RunFloat64Sin) {
@@ -6013,7 +5965,7 @@ TEST(RunFloat64Sin) {
m.Return(m.Float64Sin(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::sin(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::sin(i), m.Call(i)); }
}
TEST(RunFloat64Sinh) {
@@ -6021,9 +5973,7 @@ TEST(RunFloat64Sinh) {
m.Return(m.Float64Sinh(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::sinh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::sinh(i), m.Call(i)); }
}
TEST(RunFloat64Tan) {
@@ -6031,7 +5981,7 @@ TEST(RunFloat64Tan) {
m.Return(m.Float64Tan(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::tan(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::tan(i), m.Call(i)); }
}
TEST(RunFloat64Tanh) {
@@ -6039,9 +5989,7 @@ TEST(RunFloat64Tanh) {
m.Return(m.Float64Tanh(m.Parameter(0)));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
- FOR_FLOAT64_INPUTS(i) {
- CHECK_DOUBLE_EQ(base::ieee754::tanh(*i), m.Call(*i));
- }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(base::ieee754::tanh(i), m.Call(i)); }
}
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
@@ -6148,7 +6096,7 @@ TEST(RunFloat32RoundDown) {
m.Return(m.Float32RoundDown(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(i), m.Call(i)); }
}
@@ -6158,7 +6106,7 @@ TEST(RunFloat64RoundDown1) {
m.Return(m.Float64RoundDown(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(i), m.Call(i)); }
}
@@ -6180,7 +6128,7 @@ TEST(RunFloat32RoundUp) {
if (!m.machine()->Float32RoundUp().IsSupported()) return;
m.Return(m.Float32RoundUp(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(i), m.Call(i)); }
}
@@ -6189,7 +6137,7 @@ TEST(RunFloat64RoundUp) {
if (!m.machine()->Float64RoundUp().IsSupported()) return;
m.Return(m.Float64RoundUp(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(i), m.Call(i)); }
}
@@ -6198,7 +6146,7 @@ TEST(RunFloat32RoundTiesEven) {
if (!m.machine()->Float32RoundTiesEven().IsSupported()) return;
m.Return(m.Float32RoundTiesEven(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyint(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyint(i), m.Call(i)); }
}
@@ -6207,7 +6155,7 @@ TEST(RunFloat64RoundTiesEven) {
if (!m.machine()->Float64RoundTiesEven().IsSupported()) return;
m.Return(m.Float64RoundTiesEven(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(i), m.Call(i)); }
}
@@ -6217,7 +6165,7 @@ TEST(RunFloat32RoundTruncate) {
m.Return(m.Float32RoundTruncate(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(i), m.Call(i)); }
}
@@ -6283,7 +6231,7 @@ TEST(RunCallCFunction1) {
m.Return(m.CallCFunction1(MachineType::Int32(), MachineType::Int32(),
function, m.Parameter(0)));
FOR_INT32_INPUTS(i) {
- int32_t const expected = *i;
+ int32_t const expected = i;
CHECK_EQ(expected, m.Call(expected));
}
}
@@ -6298,9 +6246,9 @@ TEST(RunCallCFunction2) {
MachineType::Int32(), function, m.Parameter(0),
m.Parameter(1)));
FOR_INT32_INPUTS(i) {
- int32_t const x = *i;
+ int32_t const x = i;
FOR_INT32_INPUTS(j) {
- int32_t const y = *j;
+ int32_t const y = j;
CHECK_EQ(base::SubWithWraparound(x, y), m.Call(x, y));
}
}
@@ -6318,7 +6266,7 @@ TEST(RunCallCFunction8) {
MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
function, param, param, param, param, param, param, param, param));
FOR_INT32_INPUTS(i) {
- int32_t const x = *i;
+ int32_t const x = i;
CHECK_EQ(base::MulWithWraparound(x, 8), m.Call(x));
}
}
@@ -6342,7 +6290,7 @@ TEST(RunCallCFunction9) {
m.Int32Add(param, m.Int32Constant(7)),
m.Int32Add(param, m.Int32Constant(8))));
FOR_INT32_INPUTS(i) {
- int32_t const x = *i;
+ int32_t const x = i;
CHECK_EQ(base::AddWithWraparound(base::MulWithWraparound(x, 9), 36),
m.Call(x));
}
@@ -6357,8 +6305,8 @@ TEST(RunChangeFloat64ToInt64) {
m.Return(m.ChangeFloat64ToInt64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
- double input = static_cast<double>(*i);
- if (static_cast<int64_t>(input) == *i) {
+ double input = static_cast<double>(i);
+ if (static_cast<int64_t>(input) == i) {
CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
}
}
@@ -6368,9 +6316,9 @@ TEST(RunChangeInt64ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Int64());
m.Return(m.ChangeInt64ToFloat64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
- double output = static_cast<double>(*i);
- if (static_cast<int64_t>(output) == *i) {
- CHECK_EQ(output, m.Call(*i));
+ double output = static_cast<double>(i);
+ if (static_cast<int64_t>(output) == i) {
+ CHECK_EQ(output, m.Call(i));
}
}
}
@@ -6384,7 +6332,7 @@ TEST(RunBitcastInt64ToFloat64) {
m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, MachineType::Int64())));
m.Return(m.Int32Constant(11));
FOR_INT64_INPUTS(i) {
- input = *i;
+ input = i;
CHECK_EQ(11, m.Call());
Float64 expected = Float64::FromBits(input);
CHECK_EQ(expected.get_bits(), output.get_bits());
@@ -6396,7 +6344,7 @@ TEST(RunBitcastFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
m.Return(m.BitcastFloat64ToInt64(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_EQ(bit_cast<int64_t>(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_EQ(bit_cast<int64_t>(i), m.Call(i)); }
}
@@ -6405,7 +6353,7 @@ TEST(RunTryTruncateFloat32ToInt64WithoutCheck) {
m.Return(m.TryTruncateFloat32ToInt64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
- float input = static_cast<float>(*i);
+ float input = static_cast<float>(i);
if (input < static_cast<float>(INT64_MAX) &&
input >= static_cast<float>(INT64_MIN)) {
CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
@@ -6424,12 +6372,12 @@ TEST(RunTryTruncateFloat32ToInt64WithCheck) {
m.Return(val);
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(INT64_MAX) &&
- *i >= static_cast<float>(INT64_MIN)) {
- CHECK_EQ(static_cast<int64_t>(*i), m.Call(*i));
+ if (i < static_cast<float>(INT64_MAX) &&
+ i >= static_cast<float>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(i), m.Call(i));
CHECK_NE(0, success);
} else {
- m.Call(*i);
+ m.Call(i);
CHECK_EQ(0, success);
}
}
@@ -6441,7 +6389,7 @@ TEST(RunTryTruncateFloat64ToInt64WithoutCheck) {
m.Return(m.TryTruncateFloat64ToInt64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
- double input = static_cast<double>(*i);
+ double input = static_cast<double>(i);
CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
}
}
@@ -6457,13 +6405,13 @@ TEST(RunTryTruncateFloat64ToInt64WithCheck) {
m.Return(val);
FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<double>(INT64_MAX) &&
- *i >= static_cast<double>(INT64_MIN)) {
+ if (i < static_cast<double>(INT64_MAX) &&
+ i >= static_cast<double>(INT64_MIN)) {
// Conversions within this range should succeed.
- CHECK_EQ(static_cast<int64_t>(*i), m.Call(*i));
+ CHECK_EQ(static_cast<int64_t>(i), m.Call(i));
CHECK_NE(0, success);
} else {
- m.Call(*i);
+ m.Call(i);
CHECK_EQ(0, success);
}
}
@@ -6475,7 +6423,7 @@ TEST(RunTryTruncateFloat32ToUint64WithoutCheck) {
m.Return(m.TryTruncateFloat32ToUint64(m.Parameter(0)));
FOR_UINT64_INPUTS(i) {
- float input = static_cast<float>(*i);
+ float input = static_cast<float>(i);
// This condition on 'input' is required because
// static_cast<float>(UINT64_MAX) results in a value outside uint64 range.
if (input < static_cast<float>(UINT64_MAX)) {
@@ -6495,12 +6443,12 @@ TEST(RunTryTruncateFloat32ToUint64WithCheck) {
m.Return(val);
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(UINT64_MAX) && *i > -1.0) {
+ if (i < static_cast<float>(UINT64_MAX) && i > -1.0) {
// Conversions within this range should succeed.
- CHECK_EQ(static_cast<uint64_t>(*i), m.Call(*i));
+ CHECK_EQ(static_cast<uint64_t>(i), m.Call(i));
CHECK_NE(0, success);
} else {
- m.Call(*i);
+ m.Call(i);
CHECK_EQ(0, success);
}
}
@@ -6512,7 +6460,7 @@ TEST(RunTryTruncateFloat64ToUint64WithoutCheck) {
m.Return(m.TryTruncateFloat64ToUint64(m.Parameter(0)));
FOR_UINT64_INPUTS(j) {
- double input = static_cast<double>(*j);
+ double input = static_cast<double>(j);
if (input < static_cast<float>(UINT64_MAX)) {
CHECK_EQ(static_cast<uint64_t>(input), m.Call(input));
@@ -6531,12 +6479,12 @@ TEST(RunTryTruncateFloat64ToUint64WithCheck) {
m.Return(val);
FOR_FLOAT64_INPUTS(i) {
- if (*i < 18446744073709551616.0 && *i > -1) {
+ if (i < 18446744073709551616.0 && i > -1) {
// Conversions within this range should succeed.
- CHECK_EQ(static_cast<uint64_t>(*i), static_cast<uint64_t>(m.Call(*i)));
+ CHECK_EQ(static_cast<uint64_t>(i), static_cast<uint64_t>(m.Call(i)));
CHECK_NE(0, success);
} else {
- m.Call(*i);
+ m.Call(i);
CHECK_EQ(0, success);
}
}
@@ -6546,14 +6494,14 @@ TEST(RunTryTruncateFloat64ToUint64WithCheck) {
TEST(RunRoundInt64ToFloat32) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Int64());
m.Return(m.RoundInt64ToFloat32(m.Parameter(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), m.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<float>(i), m.Call(i)); }
}
TEST(RunRoundInt64ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Int64());
m.Return(m.RoundInt64ToFloat64(m.Parameter(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), m.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<double>(i), m.Call(i)); }
}
@@ -6745,7 +6693,7 @@ TEST(RunBitcastFloat32ToInt32) {
m.Return(m.BitcastFloat32ToInt32(
m.LoadFromPointer(&input, MachineType::Float32())));
FOR_FLOAT32_INPUTS(i) {
- input = *i;
+ input = i;
int32_t expected = bit_cast<int32_t>(input);
CHECK_EQ(expected, m.Call());
}
@@ -6756,8 +6704,8 @@ TEST(RunRoundInt32ToFloat32) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Int32());
m.Return(m.RoundInt32ToFloat32(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
- volatile float expected = static_cast<float>(*i);
- CHECK_EQ(expected, m.Call(*i));
+ volatile float expected = static_cast<float>(i);
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -6766,8 +6714,8 @@ TEST(RunRoundUint32ToFloat32) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Uint32());
m.Return(m.RoundUint32ToFloat32(m.Parameter(0)));
FOR_UINT32_INPUTS(i) {
- volatile float expected = static_cast<float>(*i);
- CHECK_EQ(expected, m.Call(*i));
+ volatile float expected = static_cast<float>(i);
+ CHECK_EQ(expected, m.Call(i));
}
}
@@ -6781,7 +6729,7 @@ TEST(RunBitcastInt32ToFloat32) {
m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, MachineType::Int32())));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
- input = *i;
+ input = i;
CHECK_EQ(11, m.Call());
Float32 expected = Float32::FromBits(input);
CHECK_EQ(expected.get_bits(), output.get_bits());
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 19c6abb8fc..7037bd5f2b 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -143,7 +143,7 @@ class Allocator {
}
int StackWords(MachineType type) {
int size = 1 << ElementSizeLog2Of(type.representation());
- return size <= kPointerSize ? 1 : size / kPointerSize;
+ return size <= kSystemPointerSize ? 1 : size / kSystemPointerSize;
}
void Reset() {
stack_offset_ = 0;
@@ -542,9 +542,9 @@ static void TestInt32Sub(CallDescriptor* desc) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) -
- static_cast<uint32_t>(*j));
- int32_t result = runnable.Call(*i, *j);
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(i) -
+ static_cast<uint32_t>(j));
+ int32_t result = runnable.Call(i, j);
CHECK_EQ(expected, result);
}
}
@@ -831,7 +831,8 @@ TEST_INT32_SELECT(63)
TEST(Int64Select_registers) {
if (GetRegConfig()->num_allocatable_general_registers() < 2) return;
- if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms
+ // TODO(titzer): int64 on 32-bit platforms
+ if (kSystemPointerSize < 8) return;
int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
ArgsBuffer<int64_t>::Sig sig(2);
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 45750e7e28..34079f1032 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -317,14 +317,9 @@ class ValueHelper {
}
};
-// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
-// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
-#define FOR_INPUTS(ctype, itype, var) \
- Vector<const ctype> var##_vec = \
- ::v8::internal::compiler::ValueHelper::itype##_vector(); \
- for (Vector<const ctype>::iterator var = var##_vec.begin(), \
- var##_end = var##_vec.end(); \
- var != var##_end; ++var)
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... i ... }
+#define FOR_INPUTS(ctype, itype, var) \
+ for (ctype var : ::v8::internal::compiler::ValueHelper::itype##_vector())
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 084bf6ef1b..0e437ed9d8 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -15,6 +15,10 @@ namespace v8 {
namespace internal {
namespace heap {
+void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
+
+void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
+
void SealCurrentObjects(Heap* heap) {
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
@@ -26,7 +30,8 @@ void SealCurrentObjects(Heap* heap) {
}
int FixedArrayLenFromSize(int size) {
- return (size - FixedArray::kHeaderSize) / kTaggedSize;
+ return Min((size - FixedArray::kHeaderSize) / kTaggedSize,
+ FixedArray::kMaxRegularLength);
}
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
@@ -101,9 +106,10 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
}
}
handles.push_back(isolate->factory()->NewFixedArray(length, tenure));
- CHECK((tenure == NOT_TENURED && Heap::InNewSpace(*handles.back())) ||
+ CHECK((tenure == NOT_TENURED &&
+ heap->new_space()->Contains(*handles.back())) ||
(tenure == TENURED && heap->InOldSpace(*handles.back())));
- free_memory -= allocate_memory;
+ free_memory -= handles.back()->Size();
}
return handles;
}
@@ -148,6 +154,7 @@ void SimulateFullSpace(v8::internal::NewSpace* space,
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
+ const double kStepSizeInMs = 100;
CHECK(FLAG_incremental_marking);
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
@@ -166,8 +173,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
if (!force_completion) return;
while (!marking->IsComplete()) {
- marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::StepOrigin::kV8);
+ marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index a494f54210..30a18b5895 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -5,6 +5,7 @@
#ifndef HEAP_HEAP_UTILS_H_
#define HEAP_HEAP_UTILS_H_
+#include "src/api-inl.h"
#include "src/heap/heap.h"
namespace v8 {
@@ -52,6 +53,17 @@ void GcAndSweep(Heap* heap, AllocationSpace space);
void ForceEvacuationCandidate(Page* page);
+void InvokeScavenge();
+
+void InvokeMarkSweep();
+
+template <typename GlobalOrPersistent>
+bool InYoungGeneration(v8::Isolate* isolate, const GlobalOrPersistent& global) {
+ v8::HandleScope scope(isolate);
+ auto tmp = global.Get(isolate);
+ return i::Heap::InYoungGeneration(*v8::Utils::OpenHandle(*tmp));
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 89bb25b56c..cf01d9fe9b 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -30,6 +30,7 @@
#include "src/accessors.h"
#include "src/api-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/property.h"
@@ -48,15 +49,16 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
AlwaysAllocateScope scope(CcTest::i_isolate());
Heap* heap = CcTest::heap();
int size = FixedArray::SizeFor(100);
- // New space.
- HeapObject obj = heap->AllocateRaw(size, NEW_SPACE).ToObjectChecked();
+ // Young generation.
+ HeapObject obj =
+ heap->AllocateRaw(size, AllocationType::kYoung).ToObjectChecked();
// In order to pass heap verification on Isolate teardown, mark the
// allocated area as a filler.
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
- // Old space.
+ // Old generation.
heap::SimulateFullSpace(heap->old_space());
- obj = heap->AllocateRaw(size, OLD_SPACE).ToObjectChecked();
+ obj = heap->AllocateRaw(size, AllocationType::kOld).ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
// Large object space.
@@ -67,24 +69,24 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
CHECK_GT(kLargeObjectSpaceFillerSize,
static_cast<size_t>(heap->old_space()->AreaSize()));
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
- obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, OLD_SPACE)
+ obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, AllocationType::kOld)
.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
}
- obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, OLD_SPACE)
+ obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, AllocationType::kOld)
.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
// Map space.
heap::SimulateFullSpace(heap->map_space());
- obj = heap->AllocateRaw(Map::kSize, MAP_SPACE).ToObjectChecked();
+ obj = heap->AllocateRaw(Map::kSize, AllocationType::kMap).ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), Map::kSize,
ClearRecordedSlots::kNo);
// Code space.
heap::SimulateFullSpace(heap->code_space());
size = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal)->Size();
- obj = heap->AllocateRaw(size, CODE_SPACE).ToObjectChecked();
+ obj = heap->AllocateRaw(size, AllocationType::kCode).ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return CcTest::i_isolate()->factory()->true_value();
}
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index daeccca777..9cc2d84ec9 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -308,10 +308,11 @@ TEST(ArrayBuffer_SemiSpaceCopyThenPagePromotion) {
UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
if (FLAG_optimize_for_size) return;
+ ManualGCScope manual_gc_scope;
// Test allocates JSArrayBuffer on different pages before triggering a
// full GC that performs the semispace copy. If parallelized, this test
// ensures proper synchronization in TSAN configurations.
- FLAG_min_semi_space_size = 2 * Page::kPageSize / MB;
+ FLAG_min_semi_space_size = Max(2 * Page::kPageSize / MB, 1);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 9fb989482c..eea4a6eb43 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -96,8 +97,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
const int objects_per_page = 10;
const int object_size =
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page;
+ Min(kMaxRegularHeapObjectSize,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -175,8 +177,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
const int objects_per_page = 10;
const int object_size =
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page;
+ Min(kMaxRegularHeapObjectSize,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -267,8 +270,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
const int objects_per_page = 10;
const int object_size =
- static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
- objects_per_page;
+ Min(kMaxRegularHeapObjectSize,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page);
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -302,7 +306,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
root_array->set(0, *compaction_page_handles.back());
Handle<FixedArray> new_space_array =
isolate->factory()->NewFixedArray(1, NOT_TENURED);
- CHECK(Heap::InNewSpace(*new_space_array));
+ CHECK(Heap::InYoungGeneration(*new_space_array));
compaction_page_handles.front()->set(1, *new_space_array);
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
}
@@ -329,7 +333,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
current =
Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
- CHECK(!Heap::InNewSpace(*current));
+ CHECK(!Heap::InYoungGeneration(*current));
CHECK(current->IsFixedArray());
if (Page::FromHeapObject(*current) != to_be_aborted_page) {
in_place = false;
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 5134392886..ace016dbd0 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -2,13 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <unordered_map>
+#include <vector>
+
#include "include/v8.h"
#include "src/api-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/module.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -35,9 +40,14 @@ v8::Local<v8::Object> ConstructTraceableJSApiObject(
return scope.Escape(instance);
}
+enum class TracePrologueBehavior { kNoop, kCallV8WriteBarrier };
+
class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
public:
- explicit TestEmbedderHeapTracer(v8::Isolate* isolate) : isolate_(isolate) {}
+ TestEmbedderHeapTracer() = default;
+ TestEmbedderHeapTracer(TracePrologueBehavior prologue_behavior,
+ v8::Global<v8::Array> array)
+ : prologue_behavior_(prologue_behavior), array_(std::move(array)) {}
void RegisterV8References(
const std::vector<std::pair<void*, void*>>& embedder_fields) final {
@@ -45,13 +55,13 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
embedder_fields.begin(), embedder_fields.end());
}
- void AddReferenceForTracing(v8::Persistent<v8::Object>* persistent) {
- to_register_with_v8_.push_back(persistent);
+ void AddReferenceForTracing(v8::TracedGlobal<v8::Object>* global) {
+ to_register_with_v8_.push_back(global);
}
bool AdvanceTracing(double deadline_in_ms) final {
- for (auto persistent : to_register_with_v8_) {
- persistent->RegisterExternalReference(isolate_);
+ for (auto global : to_register_with_v8_) {
+ RegisterEmbedderReference(global->As<v8::Value>());
}
to_register_with_v8_.clear();
return true;
@@ -59,9 +69,15 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
bool IsTracingDone() final { return to_register_with_v8_.empty(); }
- void TracePrologue() final {}
+ void TracePrologue() final {
+ if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
+ auto local = array_.Get(isolate());
+ local->Set(local->CreationContext(), 0, v8::Object::New(isolate()))
+ .Check();
+ }
+ }
+
void TraceEpilogue() final {}
- void AbortTracing() final {}
void EnterFinalPause(EmbedderStackState) final {}
bool IsRegisteredFromV8(void* first_field) const {
@@ -71,10 +87,20 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
return false;
}
+ void ConsiderTracedGlobalAsRoot(bool value) {
+ consider_traced_global_as_root_ = value;
+ }
+
+ bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+ return consider_traced_global_as_root_;
+ }
+
private:
- v8::Isolate* const isolate_;
std::vector<std::pair<void*, void*>> registered_from_v8_;
- std::vector<v8::Persistent<v8::Object>*> to_register_with_v8_;
+ std::vector<v8::TracedGlobal<v8::Object>*> to_register_with_v8_;
+ bool consider_traced_global_as_root_ = true;
+ TracePrologueBehavior prologue_behavior_ = TracePrologueBehavior::kNoop;
+ v8::Global<v8::Array> array_;
};
class TemporaryEmbedderHeapTracerScope {
@@ -101,7 +127,7 @@ TEST(V8RegisteringEmbedderReference) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -121,19 +147,18 @@ TEST(EmbedderRegisteringV8Reference) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Persistent<v8::Object> g;
+ v8::TracedGlobal<v8::Object> g;
{
v8::HandleScope inner_scope(isolate);
v8::Local<v8::Object> o =
v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
g.Reset(isolate, o);
- g.SetWeak();
}
tracer.AddReferenceForTracing(&g);
CcTest::CollectGarbage(i::OLD_SPACE);
@@ -155,7 +180,7 @@ TEST(TracingInRevivedSubgraph) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -183,7 +208,7 @@ TEST(TracingInEphemerons) {
ManualGCScope manual_gc;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -214,7 +239,7 @@ TEST(FinalizeTracingIsNoopWhenNotMarking) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
// Finalize a potentially running garbage collection.
@@ -233,7 +258,7 @@ TEST(FinalizeTracingWhenMarking) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
// Finalize a potentially running garbage collection.
@@ -258,7 +283,7 @@ TEST(GarbageCollectionForTesting) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
- TestEmbedderHeapTracer tracer(isolate);
+ TestEmbedderHeapTracer tracer;
TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
int saved_gc_counter = i_isolate->heap()->gc_count();
@@ -266,6 +291,293 @@ TEST(GarbageCollectionForTesting) {
CHECK_GT(i_isolate->heap()->gc_count(), saved_gc_counter);
}
+namespace {
+
+void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ v8::TracedGlobal<v8::Object>* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ CHECK(!object.IsEmpty());
+ *global = v8::TracedGlobal<v8::Object>(isolate, object);
+ CHECK(!global->IsEmpty());
+}
+
+void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ v8::TracedGlobal<v8::Object>* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(
+ ConstructTraceableJSApiObject(context, nullptr, nullptr));
+ CHECK(!object.IsEmpty());
+ *global = v8::TracedGlobal<v8::Object>(isolate, object);
+ CHECK(!global->IsEmpty());
+}
+
+enum class SurvivalMode { kSurvives, kDies };
+
+template <typename ModifierFunction, typename ConstructTracedGlobalFunction>
+void TracedGlobalTest(v8::Isolate* isolate,
+ ConstructTracedGlobalFunction construct_function,
+ ModifierFunction modifier_function, void (*gc_function)(),
+ SurvivalMode survives) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::TracedGlobal<v8::Object> global;
+ construct_function(isolate, context, &global);
+ CHECK(InYoungGeneration(isolate, global));
+ modifier_function(global);
+ gc_function();
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !global.IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies, global.IsEmpty());
+}
+
+} // namespace
+
+TEST(TracedGlobalReset) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::TracedGlobal<v8::Object> traced;
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
+ CHECK(!traced.IsEmpty());
+ traced.Reset();
+ CHECK(traced.IsEmpty());
+}
+
+TEST(TracedGlobalInStdVector) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ std::vector<v8::TracedGlobal<v8::Object>> vec;
+ {
+ v8::HandleScope scope(isolate);
+ vec.emplace_back(isolate, v8::Object::New(isolate));
+ }
+ CHECK(!vec[0].IsEmpty());
+ InvokeMarkSweep();
+ CHECK(vec[0].IsEmpty());
+}
+
+TEST(TracedGlobalInStdUnorderedMap) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ std::unordered_map<int, v8::TracedGlobal<v8::Object>> map;
+ {
+ v8::HandleScope scope(isolate);
+ map.emplace(std::piecewise_construct, std::forward_as_tuple(1),
+ std::forward_as_tuple(isolate, v8::Object::New(isolate)));
+ }
+ CHECK(!map[1].IsEmpty());
+ InvokeMarkSweep();
+ CHECK(map[1].IsEmpty());
+}
+
+TEST(TracedGlobalToUnmodifiedJSObjectDiesOnMarkSweep) {
+ CcTest::InitializeVM();
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSObject,
+ [](const TracedGlobal<v8::Object>& global) {}, InvokeMarkSweep,
+ SurvivalMode::kDies);
+}
+
+TEST(TracedGlobalToUnmodifiedJSObjectSurvivesMarkSweepWhenHeldAliveOtherwise) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Global<v8::Object> strong_global;
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSObject,
+ [isolate, &strong_global](const TracedGlobal<v8::Object>& global) {
+ v8::HandleScope scope(isolate);
+ strong_global = v8::Global<v8::Object>(isolate, global.Get(isolate));
+ },
+ InvokeMarkSweep, SurvivalMode::kSurvives);
+}
+
+TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavenge) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSObject,
+ [](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
+ SurvivalMode::kSurvives);
+}
+
+TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavengeWhenExcludedFromRoots) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ tracer.ConsiderTracedGlobalAsRoot(false);
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSObject,
+ [](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
+ SurvivalMode::kSurvives);
+}
+
+TEST(TracedGlobalToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ tracer.ConsiderTracedGlobalAsRoot(true);
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSApiObject,
+ [](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
+ SurvivalMode::kSurvives);
+}
+
+TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ tracer.ConsiderTracedGlobalAsRoot(false);
+ TracedGlobalTest(
+ CcTest::isolate(), ConstructJSApiObject,
+ [](const TracedGlobal<v8::Object>& global) {}, InvokeScavenge,
+ SurvivalMode::kDies);
+}
+
+TEST(TracedGlobalWrapperClassId) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ v8::TracedGlobal<v8::Object> traced;
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
+ CHECK_EQ(0, traced.WrapperClassId());
+ traced.SetWrapperClassId(17);
+ CHECK_EQ(17, traced.WrapperClassId());
+}
+
+namespace {
+
+class TracedGlobalVisitor final
+ : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
+ public:
+ ~TracedGlobalVisitor() override = default;
+ void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) final {
+ if (value.WrapperClassId() == 57) {
+ count_++;
+ }
+ }
+
+ size_t count() const { return count_; }
+
+ private:
+ size_t count_ = 0;
+};
+
+} // namespace
+
+TEST(TracedGlobalIteration) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ v8::TracedGlobal<v8::Object> traced;
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
+ CHECK(!traced.IsEmpty());
+ traced.SetWrapperClassId(57);
+ TracedGlobalVisitor visitor;
+ {
+ v8::HandleScope scope(isolate);
+ tracer.IterateTracedGlobalHandles(&visitor);
+ }
+ CHECK_EQ(1, visitor.count());
+}
+
+namespace {
+
+void FinalizationCallback(const WeakCallbackInfo<void>& data) {
+ v8::TracedGlobal<v8::Object>* traced =
+ reinterpret_cast<v8::TracedGlobal<v8::Object>*>(data.GetParameter());
+ CHECK_EQ(reinterpret_cast<void*>(0x4), data.GetInternalField(0));
+ CHECK_EQ(reinterpret_cast<void*>(0x8), data.GetInternalField(1));
+ traced->Reset();
+}
+
+} // namespace
+
+TEST(TracedGlobalSetFinalizationCallbackScavenge) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ TestEmbedderHeapTracer tracer;
+ tracer.ConsiderTracedGlobalAsRoot(false);
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ v8::TracedGlobal<v8::Object> traced;
+ ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
+ CHECK(!traced.IsEmpty());
+ {
+ v8::HandleScope scope(isolate);
+ auto local = traced.Get(isolate);
+ local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
+ local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
+ }
+ traced.SetFinalizationCallback(&traced, FinalizationCallback);
+ heap::InvokeScavenge();
+ CHECK(traced.IsEmpty());
+}
+
+TEST(TracedGlobalSetFinalizationCallbackMarkSweep) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ TestEmbedderHeapTracer tracer;
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ v8::TracedGlobal<v8::Object> traced;
+ ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
+ CHECK(!traced.IsEmpty());
+ {
+ v8::HandleScope scope(isolate);
+ auto local = traced.Get(isolate);
+ local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
+ local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
+ }
+ traced.SetFinalizationCallback(&traced, FinalizationCallback);
+ heap::InvokeMarkSweep();
+ CHECK(traced.IsEmpty());
+}
+
+TEST(TracePrologueCallingIntoV8WriteBarrier) {
+ // Regression test: https://crbug.com/940003
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Global<v8::Array> global;
+ {
+ v8::HandleScope scope(isolate);
+ auto local = v8::Array::New(isolate, 10);
+ global.Reset(isolate, local);
+ }
+ TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
+ std::move(global));
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ SimulateIncrementalMarking(CcTest::i_isolate()->heap());
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index 36a9391307..bcc00764de 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -4,6 +4,7 @@
#include "src/api-inl.h"
#include "src/api.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -193,7 +194,7 @@ TEST(ExternalString_PromotedThinString) {
i::Handle<i::String> isymbol1 = factory->InternalizeString(string1);
CHECK(isymbol1->IsInternalizedString());
CHECK(string1->IsExternalString());
- CHECK(!heap->InNewSpace(*isymbol1));
+ CHECK(!heap->InYoungGeneration(*isymbol1));
// New external string in the young space. This string has the same content
// as the previous one (that was already internalized).
@@ -209,7 +210,7 @@ TEST(ExternalString_PromotedThinString) {
i::Handle<i::String> isymbol2 = factory->InternalizeString(istring);
CHECK(isymbol2->IsInternalizedString());
CHECK(istring->IsThinString());
- CHECK(heap->InNewSpace(*istring));
+ CHECK(heap->InYoungGeneration(*istring));
// Collect thin string. References to the thin string will be updated to
// point to the actual external string in the old space.
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index c7c1d93f87..f12ba10979 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -37,8 +37,10 @@
#include "src/execution.h"
#include "src/field-type.h"
#include "src/global-handles.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/factory.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
@@ -46,6 +48,7 @@
#include "src/ic/ic.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/frame-array-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -188,7 +191,7 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
HandleScope sc(isolate);
Handle<HeapNumber> value = factory->NewHeapNumber(1.000123);
- CHECK(Heap::InNewSpace(*value));
+ CHECK(Heap::InYoungGeneration(*value));
i::byte buffer[i::Assembler::kMinimalBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
@@ -368,16 +371,11 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate a function and keep it in global object's property.
Handle<JSFunction> function = factory->NewFunctionForTest(name);
- Object::SetProperty(isolate, global, name, function, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, global, name, function).Check();
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- Object::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_namex, twenty_four,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name, twenty_three).Check();
+ Object::SetProperty(isolate, obj, prop_namex, twenty_four).Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(isolate, obj, prop_name).ToHandleChecked());
@@ -399,11 +397,8 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- Object::SetProperty(isolate, global, obj_name, obj, LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, global, obj_name, obj).Check();
+ Object::SetProperty(isolate, obj, prop_name, twenty_three).Check();
}
// After gc, it should survive.
@@ -698,7 +693,7 @@ TEST(WeakGlobalHandlesMark) {
// Make sure the objects are promoted.
CcTest::CollectGarbage(OLD_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(!Heap::InNewSpace(*h1) && !Heap::InNewSpace(*h2));
+ CHECK(!Heap::InYoungGeneration(*h1) && !Heap::InYoungGeneration(*h2));
std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
GlobalHandles::MakeWeak(
@@ -944,15 +939,11 @@ TEST(FunctionAllocation) {
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- Object::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name, twenty_three).Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(isolate, obj, prop_name).ToHandleChecked());
// Check that we can add properties to function objects.
- Object::SetProperty(isolate, function, prop_name, twenty_four,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, function, prop_name, twenty_four).Check();
CHECK_EQ(
Smi::FromInt(24),
*Object::GetProperty(isolate, function, prop_name).ToHandleChecked());
@@ -983,7 +974,7 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first
- Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, first, one).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
// delete first
@@ -992,8 +983,8 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first and then second
- Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
- Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, first, one).Check();
+ Object::SetProperty(isolate, obj, second, two).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
@@ -1007,8 +998,8 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// add first and then second
- Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
- Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, first, one).Check();
+ Object::SetProperty(isolate, obj, second, two).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
@@ -1024,15 +1015,14 @@ TEST(ObjectProperties) {
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
- Object::SetProperty(isolate, obj, s1, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, s1, one).Check();
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- Object::SetProperty(isolate, obj, s2_string, one, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, s2_string, one).Check();
Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2));
}
@@ -1053,9 +1043,7 @@ TEST(JSObjectMaps) {
// Set a propery
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- Object::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name, twenty_three).Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(isolate, obj, prop_name).ToHandleChecked());
@@ -1090,7 +1078,7 @@ TEST(JSArray) {
CHECK(array->HasSmiOrObjectElements());
// array[length] = name.
- Object::SetElement(isolate, array, 0, name, LanguageMode::kSloppy).Check();
+ Object::SetElement(isolate, array, 0, name, ShouldThrow::kDontThrow).Check();
CHECK_EQ(Smi::FromInt(1), array->length());
element = i::Object::GetElement(isolate, array, 0).ToHandleChecked();
CHECK_EQ(*element, *name);
@@ -1104,7 +1092,7 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- Object::SetElement(isolate, array, int_length, name, LanguageMode::kSloppy)
+ Object::SetElement(isolate, array, int_length, name, ShouldThrow::kDontThrow)
.Check();
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
@@ -1136,11 +1124,11 @@ TEST(JSObjectCopy) {
Handle<Smi> one(Smi::FromInt(1), isolate);
Handle<Smi> two(Smi::FromInt(2), isolate);
- Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
- Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, first, one).Check();
+ Object::SetProperty(isolate, obj, second, two).Check();
- Object::SetElement(isolate, obj, 0, first, LanguageMode::kSloppy).Check();
- Object::SetElement(isolate, obj, 1, second, LanguageMode::kSloppy).Check();
+ Object::SetElement(isolate, obj, 0, first, ShouldThrow::kDontThrow).Check();
+ Object::SetElement(isolate, obj, 1, second, ShouldThrow::kDontThrow).Check();
// Make the clone.
Handle<Object> value1, value2;
@@ -1162,13 +1150,12 @@ TEST(JSObjectCopy) {
CHECK_EQ(*value1, *value2);
// Flip the values.
- Object::SetProperty(isolate, clone, first, two, LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, clone, second, one, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, clone, first, two).Check();
+ Object::SetProperty(isolate, clone, second, one).Check();
- Object::SetElement(isolate, clone, 0, second, LanguageMode::kSloppy).Check();
- Object::SetElement(isolate, clone, 1, first, LanguageMode::kSloppy).Check();
+ Object::SetElement(isolate, clone, 0, second, ShouldThrow::kDontThrow)
+ .Check();
+ Object::SetElement(isolate, clone, 1, first, ShouldThrow::kDontThrow).Check();
value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked();
value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked();
@@ -1800,8 +1787,9 @@ static HeapObject NewSpaceAllocateAligned(int size,
static Address AlignNewSpace(AllocationAlignment alignment, int offset) {
Address* top_addr = CcTest::heap()->new_space()->allocation_top_address();
int fill = Heap::GetFillToAlign(*top_addr, alignment);
- if (fill) {
- NewSpaceAllocateAligned(fill + offset, kWordAligned);
+ int allocation = fill + offset;
+ if (allocation) {
+ NewSpaceAllocateAligned(allocation, kWordAligned);
}
return *top_addr;
}
@@ -1923,6 +1911,64 @@ TEST(TestAlignedOverAllocation) {
}
}
+TEST(HeapNumberAlignment) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope sc(isolate);
+
+ const auto required_alignment =
+ HeapObject::RequiredAlignment(*factory->heap_number_map());
+ const int maximum_misalignment =
+ Heap::GetMaximumFillToAlign(required_alignment);
+
+ for (int offset = 0; offset <= maximum_misalignment; offset += kTaggedSize) {
+ AlignNewSpace(required_alignment, offset);
+ Handle<Object> number_new = factory->NewNumber(1.000123);
+ CHECK(number_new->IsHeapNumber());
+ CHECK(Heap::InYoungGeneration(*number_new));
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
+ required_alignment));
+
+ AlignOldSpace(required_alignment, offset);
+ Handle<Object> number_old = factory->NewNumber(1.000321, TENURED);
+ CHECK(number_old->IsHeapNumber());
+ CHECK(heap->InOldSpace(*number_old));
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old)->address(),
+ required_alignment));
+ }
+}
+
+TEST(MutableHeapNumberAlignment) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope sc(isolate);
+
+ const auto required_alignment =
+ HeapObject::RequiredAlignment(*factory->mutable_heap_number_map());
+ const int maximum_misalignment =
+ Heap::GetMaximumFillToAlign(required_alignment);
+
+ for (int offset = 0; offset <= maximum_misalignment; offset += kTaggedSize) {
+ AlignNewSpace(required_alignment, offset);
+ Handle<Object> number_new = factory->NewMutableHeapNumber(1.000123);
+ CHECK(number_new->IsMutableHeapNumber());
+ CHECK(Heap::InYoungGeneration(*number_new));
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
+ required_alignment));
+
+ AlignOldSpace(required_alignment, offset);
+ Handle<Object> number_old =
+ factory->NewMutableHeapNumber(1.000321, TENURED);
+ CHECK(number_old->IsMutableHeapNumber());
+ CHECK(heap->InOldSpace(*number_old));
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old)->address(),
+ required_alignment));
+ }
+}
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
CcTest::InitializeVM();
@@ -1975,6 +2021,8 @@ TEST(GrowAndShrinkNewSpace) {
// Make sure we're in a consistent state to start out.
CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ new_space->Shrink();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
@@ -2285,11 +2333,12 @@ TEST(InstanceOfStubWriteBarrier) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
+ const double kStepSizeInMs = 100;
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
}
CHECK(marking->IsMarking());
@@ -2352,7 +2401,7 @@ HEAP_TEST(Regress845060) {
// Preparation: create a string in new space.
Local<Value> str = CompileRun("var str = (new Array(10000)).join('x'); str");
- CHECK(Heap::InNewSpace(*v8::Utils::OpenHandle(*str)));
+ CHECK(Heap::InYoungGeneration(*v8::Utils::OpenHandle(*str)));
// Idle incremental marking sets the "kReduceMemoryFootprint" flag, which
// causes from_space to be unmapped after scavenging.
@@ -2363,7 +2412,7 @@ HEAP_TEST(Regress845060) {
// promoted to old space. Unmapping of from_space causes accesses to any
// stale raw pointers to crash.
CompileRun("while (%InNewSpace(str)) { str.split(''); }");
- CHECK(!Heap::InNewSpace(*v8::Utils::OpenHandle(*str)));
+ CHECK(!Heap::InYoungGeneration(*v8::Utils::OpenHandle(*str)));
}
TEST(IdleNotificationFinishMarking) {
@@ -2380,9 +2429,10 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
+ const double kStepSizeInMs = 100;
do {
- marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
} while (
!CcTest::heap()->mark_compact_collector()->marking_worklist()->IsEmpty());
@@ -2431,7 +2481,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
i::Handle<JSReceiver> o =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
- CHECK(Heap::InNewSpace(*o));
+ CHECK(Heap::InYoungGeneration(*o));
}
@@ -2569,7 +2619,7 @@ TEST(OptimizedPretenuringNestedInObjectProperties) {
// Nested literal sites are only pretenured if the top level
// literal is pretenured
- CHECK(Heap::InNewSpace(*o));
+ CHECK(Heap::InYoungGeneration(*o));
}
TEST(OptimizedPretenuringMixedInObjectProperties) {
@@ -2897,7 +2947,7 @@ TEST(OptimizedAllocationArrayLiterals) {
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
- CHECK(Heap::InNewSpace(o->elements()));
+ CHECK(Heap::InYoungGeneration(o->elements()));
}
static int CountMapTransitions(i::Isolate* isolate, Map map) {
@@ -2952,6 +3002,13 @@ TEST(Regress1465) {
CHECK_EQ(1, transitions_after);
}
+static i::Handle<JSObject> GetByName(const char* name) {
+ return i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ CcTest::global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
+ .ToLocalChecked())));
+}
#ifdef DEBUG
static void AddTransitions(int transitions_count) {
@@ -2964,15 +3021,6 @@ static void AddTransitions(int transitions_count) {
}
-static i::Handle<JSObject> GetByName(const char* name) {
- return i::Handle<JSObject>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()
- ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
- .ToLocalChecked())));
-}
-
-
static void AddPropertyTo(
int gc_count, Handle<JSObject> object, const char* property_name) {
Isolate* isolate = CcTest::i_isolate();
@@ -2983,9 +3031,7 @@ static void AddPropertyTo(
FLAG_gc_global = true;
FLAG_retain_maps_for_n_gc = 0;
CcTest::heap()->set_allocation_timeout(gc_count);
- Object::SetProperty(isolate, object, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, object, prop_name, twenty_three).Check();
}
@@ -3106,6 +3152,9 @@ TEST(ReleaseOverReservedPages) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
+ // Ensure that the young generation is empty.
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
@@ -3139,7 +3188,7 @@ TEST(ReleaseOverReservedPages) {
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
CcTest::CollectAllAvailableGarbage();
- CHECK_EQ(initial_page_count, old_space->CountTotalPages());
+ CHECK_GE(initial_page_count, old_space->CountTotalPages());
}
static int forced_gc_counter = 0;
@@ -3239,7 +3288,7 @@ static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
FeedbackVectorHelper helper(vector);
FeedbackSlot slot = helper.slot(slot_index);
FeedbackNexus nexus(vector, slot);
- CHECK(nexus.StateFromFeedback() == desired_state);
+ CHECK(nexus.ic_state() == desired_state);
}
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
@@ -3488,6 +3537,119 @@ UNINITIALIZED_TEST(ReleaseStackTraceData) {
isolate->Dispose();
}
+// TODO(mmarchini) also write tests for async/await and Promise.all
+void DetailedErrorStackTraceTest(const char* src,
+ std::function<void(Handle<FrameArray>)> test) {
+ FLAG_detailed_error_stack_trace = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::TryCatch try_catch(CcTest::isolate());
+ CompileRun(src);
+
+ CHECK(try_catch.HasCaught());
+ Handle<Object> exception = v8::Utils::OpenHandle(*try_catch.Exception());
+
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<Name> key = isolate->factory()->stack_trace_symbol();
+
+ Handle<FrameArray> stack_trace(
+ FrameArray::cast(
+ Handle<JSArray>::cast(
+ Object::GetProperty(isolate, exception, key).ToHandleChecked())
+ ->elements()),
+ isolate);
+
+ test(stack_trace);
+}
+
+// * Test interpreted function error
+TEST(DetailedErrorStackTrace) {
+ static const char* source =
+ "function func1(arg1) { "
+ " let err = new Error(); "
+ " throw err; "
+ "} "
+ "function func2(arg1, arg2) { "
+ " func1(42); "
+ "} "
+ "class Foo {}; "
+ "function main(arg1, arg2) { "
+ " func2(arg1, false); "
+ "} "
+ "var foo = new Foo(); "
+ "main(foo); ";
+
+ DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
+ FixedArray foo_parameters = stack_trace->Parameters(0);
+ CHECK_EQ(foo_parameters->length(), 1);
+ CHECK(foo_parameters->get(0)->IsSmi());
+ CHECK_EQ(Smi::ToInt(foo_parameters->get(0)), 42);
+
+ FixedArray bar_parameters = stack_trace->Parameters(1);
+ CHECK_EQ(bar_parameters->length(), 2);
+ CHECK(bar_parameters->get(0)->IsJSObject());
+ CHECK(bar_parameters->get(1)->IsBoolean());
+ Handle<Object> foo = Handle<Object>::cast(GetByName("foo"));
+ CHECK_EQ(bar_parameters->get(0), *foo);
+ CHECK(!bar_parameters->get(1)->BooleanValue(CcTest::i_isolate()));
+
+ FixedArray main_parameters = stack_trace->Parameters(2);
+ CHECK_EQ(main_parameters->length(), 2);
+ CHECK(main_parameters->get(0)->IsJSObject());
+ CHECK(main_parameters->get(1)->IsUndefined());
+ CHECK_EQ(main_parameters->get(0), *foo);
+ });
+}
+
+// * Test optimized function with inline frame error
+TEST(DetailedErrorStackTraceInline) {
+ FLAG_allow_natives_syntax = true;
+ static const char* source =
+ "function add(x) { "
+ " if (x == 42) "
+ " throw new Error(); "
+ " return x + x; "
+ "} "
+ "add(0); "
+ "add(1); "
+ "function foo(x) { "
+ " return add(x + 1) "
+ "} "
+ "foo(40); "
+ "%OptimizeFunctionOnNextCall(foo); "
+ "foo(41); ";
+
+ DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
+ FixedArray parameters_add = stack_trace->Parameters(0);
+ CHECK_EQ(parameters_add->length(), 1);
+ CHECK(parameters_add->get(0)->IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters_add->get(0)), 42);
+
+ FixedArray parameters_foo = stack_trace->Parameters(1);
+ CHECK_EQ(parameters_foo->length(), 1);
+ CHECK(parameters_foo->get(0)->IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters_foo->get(0)), 41);
+ });
+}
+
+// * Test builtin exit error
+TEST(DetailedErrorStackTraceBuiltinExit) {
+ static const char* source =
+ "function test(arg1) { "
+ " (new Number()).toFixed(arg1); "
+ "} "
+ "test(9999); ";
+
+ DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
+ FixedArray parameters = stack_trace->Parameters(0);
+
+ CHECK_EQ(parameters->length(), 2);
+ CHECK(parameters->get(0)->IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters->get(0)), 9999);
+ });
+}
+
TEST(Regress169928) {
FLAG_allow_natives_syntax = true;
#ifndef V8_LITE_MODE
@@ -3595,8 +3757,6 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
- heap->incremental_marking()->AdvanceIncrementalMarking(
- 10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD, StepOrigin::kV8);
// Create references from the large object to the object on the evacuation
// candidate.
@@ -3606,6 +3766,8 @@ TEST(LargeObjectSlotRecording) {
CHECK(lo->get(i) == old_location);
}
+ heap::SimulateIncrementalMarking(heap, true);
+
// Move the evaucation candidate object.
CcTest::CollectAllGarbage();
@@ -3659,9 +3821,7 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
CcTest::heap()->StartIncrementalMarking(
i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
}
- // This big step should be sufficient to mark the whole array.
- marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ heap::SimulateIncrementalMarking(CcTest::heap());
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
@@ -4069,10 +4229,10 @@ TEST(NewSpaceObjectsInOptimizedCode) {
->Get(context.local(), v8_str("foo"))
.ToLocalChecked())));
- CHECK(Heap::InNewSpace(*foo));
+ CHECK(Heap::InYoungGeneration(*foo));
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(!Heap::InNewSpace(*foo));
+ CHECK(!Heap::InYoungGeneration(*foo));
#ifdef VERIFY_HEAP
CcTest::heap()->Verify();
#endif
@@ -4519,7 +4679,7 @@ void CheckIC(Handle<JSFunction> function, int slot_index,
FeedbackVector vector = function->feedback_vector();
FeedbackSlot slot(slot_index);
FeedbackNexus nexus(vector, slot);
- CHECK_EQ(nexus.StateFromFeedback(), state);
+ CHECK_EQ(nexus.ic_state(), state);
}
TEST(MonomorphicStaysMonomorphicAfterGC) {
@@ -4735,8 +4895,8 @@ TEST(Regress507979) {
Handle<FixedArray> o1 = isolate->factory()->NewFixedArray(kFixedArrayLen);
Handle<FixedArray> o2 = isolate->factory()->NewFixedArray(kFixedArrayLen);
- CHECK(Heap::InNewSpace(*o1));
- CHECK(Heap::InNewSpace(*o2));
+ CHECK(Heap::InYoungGeneration(*o1));
+ CHECK(Heap::InYoungGeneration(*o2));
HeapIterator it(isolate->heap(), i::HeapIterator::kFilterUnreachable);
@@ -4826,12 +4986,7 @@ TEST(Regress3631) {
Handle<JSReceiver> obj =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(JSWeakCollection::cast(*obj), isolate);
- HeapObject weak_map_table = HeapObject::cast(weak_map->table());
- IncrementalMarking::MarkingState* marking_state = marking->marking_state();
- while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
- }
+ SimulateIncrementalMarking(heap);
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
// The following line will update the backing store.
@@ -4855,8 +5010,7 @@ TEST(Regress442710) {
Handle<JSArray> array = factory->NewJSArray(2);
Handle<String> name = factory->InternalizeUtf8String("testArray");
- Object::SetProperty(isolate, global, name, array, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, global, name, array).Check();
CompileRun("testArray[0] = 1; testArray[1] = 2; testArray.shift();");
CcTest::CollectGarbage(OLD_SPACE);
}
@@ -4991,7 +5145,7 @@ void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
static_cast<int>((bytes - FixedArray::kHeaderSize) / kTaggedSize);
Handle<FixedArray> array = factory->NewFixedArray(
elements, space == NEW_SPACE ? NOT_TENURED : TENURED);
- CHECK((space == NEW_SPACE) == Heap::InNewSpace(*array));
+ CHECK((space == NEW_SPACE) == Heap::InYoungGeneration(*array));
CHECK_EQ(bytes, static_cast<size_t>(array->Size()));
}
@@ -5213,7 +5367,8 @@ AllocationResult HeapTester::AllocateByteArrayForTest(Heap* heap, int length,
AllocationSpace space = heap->SelectSpace(pretenure);
HeapObject result;
{
- AllocationResult allocation = heap->AllocateRaw(size, space);
+ AllocationResult allocation =
+ heap->AllocateRaw(size, Heap::SelectType(space));
if (!allocation.To(&result)) return allocation;
}
@@ -5239,7 +5394,7 @@ HEAP_TEST(Regress587004) {
Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
CHECK(heap->old_space()->Contains(*array));
Handle<Object> number = factory->NewHeapNumber(1.0);
- CHECK(Heap::InNewSpace(*number));
+ CHECK(Heap::InYoungGeneration(*number));
for (int i = 0; i < N; i++) {
array->set(i, *number);
}
@@ -5349,7 +5504,8 @@ TEST(Regress598319) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- const int kNumberOfObjects = kMaxRegularHeapObjectSize / kTaggedSize;
+ // The size of the array should be larger than kProgressBarScanningChunk.
+ const int kNumberOfObjects = Max(FixedArray::kMaxRegularLength + 1, 128 * KB);
struct Arr {
Arr(Isolate* isolate, int number_of_objects) {
@@ -5374,7 +5530,7 @@ TEST(Regress598319) {
CHECK_EQ(arr.get()->length(), kNumberOfObjects);
CHECK(heap->lo_space()->Contains(arr.get()));
- LargePage* page = heap->lo_space()->FindPage(arr.get()->address());
+ LargePage* page = LargePage::FromHeapObject(arr.get());
CHECK_NOT_NULL(page);
// GC to cleanup state
@@ -5409,11 +5565,13 @@ TEST(Regress598319) {
// Now we search for a state where we are in incremental marking and have
// only partially marked the large object.
+ const double kSmallStepSizeInMs = 0.1;
while (!marking->IsComplete()) {
- marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
- if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
- CHECK_NE(page->progress_bar(), arr.get()->Size());
+ marking->V8Step(kSmallStepSizeInMs,
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
+ if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->ProgressBar() > 0) {
+ CHECK_NE(page->ProgressBar(), arr.get()->Size());
{
// Shift by 1, effectively moving one white object across the progress
// bar, meaning that we will miss marking it.
@@ -5427,9 +5585,11 @@ TEST(Regress598319) {
}
// Finish marking with bigger steps to speed up test.
+ const double kLargeStepSizeInMs = 1000;
while (!marking->IsComplete()) {
- marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->V8Step(kLargeStepSizeInMs,
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5509,9 +5669,10 @@ TEST(Regress615489) {
v8::HandleScope inner(CcTest::isolate());
isolate->factory()->NewFixedArray(500, TENURED)->Size();
}
+ const double kStepSizeInMs = 100;
while (!marking->IsComplete()) {
- marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5568,10 +5729,11 @@ TEST(Regress631969) {
CcTest::CollectGarbage(NEW_SPACE);
// Finish incremental marking.
+ const double kStepSizeInMs = 100;
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
- marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5657,7 +5819,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
- IncrementalMarking::MarkingState* marking_state = marking->marking_state();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ marking->non_atomic_marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
@@ -5724,7 +5887,8 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
- IncrementalMarking::MarkingState* marking_state = marking->marking_state();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ marking->non_atomic_marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
@@ -5786,7 +5950,8 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
- CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
+ CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
Handle<Object> number = isolate->factory()->NewHeapNumber(123.456);
array_small->set(0, *number);
@@ -5797,7 +5962,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// generation large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
- CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
}
@@ -5815,7 +5980,8 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
- CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
+ CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
Handle<Object> number = isolate->factory()->NewHeapNumber(123.456);
array_small->set(0, *number);
@@ -5826,7 +5992,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
- CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
}
@@ -5846,7 +6012,7 @@ TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
- CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
}
}
@@ -5975,7 +6141,7 @@ HEAP_TEST(Regress670675) {
if (marking->IsStopped()) {
marking->Start(i::GarbageCollectionReason::kTesting);
}
- size_t array_length = Page::kPageSize / kTaggedSize + 100;
+ size_t array_length = 128 * KB;
size_t n = heap->OldGenerationSpaceAvailable() / array_length;
for (size_t i = 0; i < n + 40; i++) {
{
@@ -5985,7 +6151,7 @@ HEAP_TEST(Regress670675) {
}
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
- marking->AdvanceIncrementalMarking(
+ marking->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
DCHECK(marking->IsStopped());
@@ -6159,7 +6325,7 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
{
v8::Isolate::Scope isolate_scope(isolate);
CHECK_EQ(static_cast<uint64_t>(1337 * i),
- reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
+ HashSeed(reinterpret_cast<i::Isolate*>(isolate)));
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
CHECK(!context.IsEmpty());
@@ -6258,7 +6424,7 @@ HEAP_TEST(Regress779503) {
// The byte array filled with kHeapObjectTag ensures that we cannot read
// from the slot again and interpret it as heap value. Doing so will crash.
Handle<ByteArray> byte_array = isolate->factory()->NewByteArray(kArraySize);
- CHECK(Heap::InNewSpace(*byte_array));
+ CHECK(Heap::InYoungGeneration(*byte_array));
for (int i = 0; i < kArraySize; i++) {
byte_array->set(i, kHeapObjectTag);
}
@@ -6268,7 +6434,7 @@ HEAP_TEST(Regress779503) {
// The FixedArray in old space serves as space for slots.
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArray(kArraySize, TENURED);
- CHECK(!Heap::InNewSpace(*fixed_array));
+ CHECK(!Heap::InYoungGeneration(*fixed_array));
for (int i = 0; i < kArraySize; i++) {
fixed_array->set(i, *byte_array);
}
@@ -6277,7 +6443,7 @@ HEAP_TEST(Regress779503) {
// currently scavenging.
heap->delay_sweeper_tasks_for_testing_ = true;
CcTest::CollectGarbage(OLD_SPACE);
- CHECK(Heap::InNewSpace(*byte_array));
+ CHECK(Heap::InYoungGeneration(*byte_array));
}
// Scavenging and sweeping the same page will crash as slots will be
// overridden.
@@ -6291,6 +6457,7 @@ struct OutOfMemoryState {
size_t old_generation_capacity_at_oom;
size_t memory_allocator_size_at_oom;
size_t new_space_capacity_at_oom;
+ size_t new_lo_space_size_at_oom;
size_t current_heap_limit;
size_t initial_heap_limit;
};
@@ -6303,6 +6470,7 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
state->old_generation_capacity_at_oom = heap->OldGenerationCapacity();
state->memory_allocator_size_at_oom = heap->memory_allocator()->Size();
state->new_space_capacity_at_oom = heap->new_space()->Capacity();
+ state->new_lo_space_size_at_oom = heap->new_lo_space()->Size();
state->current_heap_limit = current_heap_limit;
state->initial_heap_limit = initial_heap_limit;
return initial_heap_limit + 100 * MB;
@@ -6378,11 +6546,14 @@ UNINITIALIZED_TEST(OutOfMemoryLargeObjects) {
}
CHECK_LE(state.old_generation_capacity_at_oom, kOldGenerationLimit);
CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
+ state.new_space_capacity_at_oom +
+ state.new_lo_space_size_at_oom +
FixedArray::SizeFor(kFixedArrayLength));
CHECK_LE(
state.memory_allocator_size_at_oom,
MemoryAllocatorSizeFromHeapCapacity(state.old_generation_capacity_at_oom +
- 2 * state.new_space_capacity_at_oom));
+ 2 * state.new_space_capacity_at_oom +
+ state.new_lo_space_size_at_oom));
reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
}
@@ -6466,7 +6637,7 @@ TEST(Regress8617) {
Handle<Object> foo =
v8::Utils::OpenHandle(*CompileRun("function foo() { return 42; };"
"foo;"));
- if (heap->InNewSpace(*foo)) {
+ if (heap->InYoungGeneration(*foo)) {
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
}
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 8213ea6080..d90c2c2139 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -71,14 +71,14 @@ class MockPlatform : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
- UNREACHABLE();
- };
+ task_ = std::move(task);
+ }
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
UNREACHABLE();
}
- bool IdleTasksEnabled() override { return false; };
+ bool IdleTasksEnabled() override { return false; }
bool PendingTask() { return task_ != nullptr; }
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 897f4d0242..9eeda75cc5 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -330,25 +330,16 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
// Start incremental marking.
heap::SimulateIncrementalMarking(heap);
// Set properties to point to the evacuation candidate.
- Object::SetProperty(isolate, obj, prop_name1, evacuated,
- LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_name2, evacuated,
- LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_name3, evacuated,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name1, evacuated).Check();
+ Object::SetProperty(isolate, obj, prop_name2, evacuated).Check();
+ Object::SetProperty(isolate, obj, prop_name3, evacuated).Check();
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
- Object::SetProperty(isolate, obj, prop_name1, dead, LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_name2, dead, LanguageMode::kSloppy)
- .Check();
- Object::SetProperty(isolate, obj, prop_name3, dead, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name1, dead).Check();
+ Object::SetProperty(isolate, obj, prop_name2, dead).Check();
+ Object::SetProperty(isolate, obj, prop_name3, dead).Check();
Handle<Map> map(obj->map(), isolate);
Handle<Map> normalized_map =
Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, "testing");
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 4f141af7a5..e85c73405f 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -101,7 +101,7 @@ HEAP_TEST(NoPromotion) {
AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
Heap* heap = isolate->heap();
HeapObject obj;
- AllocationResult alloc = heap->AllocateRaw(Map::kSize, MAP_SPACE);
+ AllocationResult alloc = heap->AllocateRaw(Map::kSize, AllocationType::kMap);
if (!alloc.To(&obj)) return alloc;
obj->set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
SKIP_WRITE_BARRIER);
@@ -119,7 +119,7 @@ AllocationResult HeapTester::AllocateFixedArrayForTest(
AllocationSpace space = heap->SelectSpace(pretenure);
HeapObject obj;
{
- AllocationResult result = heap->AllocateRaw(size, space);
+ AllocationResult result = heap->AllocateRaw(size, Heap::SelectType(space));
if (!result.To(&obj)) return result;
}
obj->set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
@@ -165,9 +165,7 @@ HEAP_TEST(MarkCompactCollector) {
// allocate a garbage
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
Handle<JSFunction> function = factory->NewFunctionForTest(func_name);
- Object::SetProperty(isolate, global, func_name, function,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, global, func_name, function).Check();
factory->NewJSObject(function);
}
@@ -184,13 +182,10 @@ HEAP_TEST(MarkCompactCollector) {
Handle<JSObject> obj = factory->NewJSObject(function);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
- Object::SetProperty(isolate, global, obj_name, obj, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, global, obj_name, obj).Check();
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- Object::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, prop_name, twenty_three).Check();
}
CcTest::CollectGarbage(OLD_SPACE);
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index 2db538d484..b68484e3c0 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -67,6 +67,11 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
+ // Ensure that the new space is empty so that the page to be promoted
+ // does not contain the age mark.
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
+
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 8219c1487d..337447dcea 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -45,18 +45,24 @@ namespace heap {
// Temporarily sets a given allocator in an isolate.
class TestMemoryAllocatorScope {
public:
- TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
- : isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
- isolate->heap()->memory_allocator_ = allocator;
+ TestMemoryAllocatorScope(Isolate* isolate, size_t max_capacity,
+ size_t code_range_size)
+ : isolate_(isolate),
+ old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
+ isolate->heap()->memory_allocator_.reset(
+ new MemoryAllocator(isolate, max_capacity, code_range_size));
}
+ MemoryAllocator* allocator() { return isolate_->heap()->memory_allocator(); }
+
~TestMemoryAllocatorScope() {
- isolate_->heap()->memory_allocator_ = old_allocator_;
+ isolate_->heap()->memory_allocator()->TearDown();
+ isolate_->heap()->memory_allocator_.swap(old_allocator_);
}
private:
Isolate* isolate_;
- MemoryAllocator* old_allocator_;
+ std::unique_ptr<MemoryAllocator> old_allocator_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
@@ -89,41 +95,37 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space) {
- MemoryAllocator* memory_allocator =
- new MemoryAllocator(isolate, heap->MaxReserved(), 0);
- {
- TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
- TestCodePageAllocatorScope test_code_page_allocator_scope(
- isolate, code_page_allocator);
-
- v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(executable);
-
- size_t allocatable_memory_area_offset =
- MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
- size_t guard_size =
- (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
-
- MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
- reserve_area_size, commit_area_size, executable, space);
- size_t reserved_size =
- ((executable == EXECUTABLE))
- ? allocatable_memory_area_offset +
- RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
- guard_size
- : RoundUp(allocatable_memory_area_offset + reserve_area_size,
- page_allocator->CommitPageSize());
- CHECK(memory_chunk->size() == reserved_size);
- CHECK(memory_chunk->area_start() <
- memory_chunk->address() + memory_chunk->size());
- CHECK(memory_chunk->area_end() <=
- memory_chunk->address() + memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
-
- memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
- }
- memory_allocator->TearDown();
- delete memory_allocator;
+ TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
+ 0);
+ MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
+ TestCodePageAllocatorScope test_code_page_allocator_scope(
+ isolate, code_page_allocator);
+
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable);
+
+ size_t allocatable_memory_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
+ size_t guard_size =
+ (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
+
+ MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
+ reserve_area_size, commit_area_size, executable, space);
+ size_t reserved_size =
+ ((executable == EXECUTABLE))
+ ? allocatable_memory_area_offset +
+ RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
+ guard_size
+ : RoundUp(allocatable_memory_area_offset + reserve_area_size,
+ page_allocator->CommitPageSize());
+ CHECK(memory_chunk->size() == reserved_size);
+ CHECK(memory_chunk->area_start() <
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(memory_chunk->area_end() <=
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
+
+ memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
}
static unsigned int PseudorandomAreaSize() {
@@ -170,48 +172,43 @@ TEST(MemoryAllocator) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- MemoryAllocator* memory_allocator =
- new MemoryAllocator(isolate, heap->MaxReserved(), 0);
- CHECK_NOT_NULL(memory_allocator);
- TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
+ TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
+ 0);
+ MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
- {
- int total_pages = 0;
- OldSpace faked_space(heap);
- CHECK(!faked_space.first_page());
- CHECK(!faked_space.last_page());
- Page* first_page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
- NOT_EXECUTABLE);
-
- faked_space.memory_chunk_list().PushBack(first_page);
- CHECK(first_page->next_page() == nullptr);
- total_pages++;
-
- for (Page* p = first_page; p != nullptr; p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
- }
+ int total_pages = 0;
+ OldSpace faked_space(heap);
+ CHECK(!faked_space.first_page());
+ CHECK(!faked_space.last_page());
+ Page* first_page = memory_allocator->AllocatePage(
+ faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
+ NOT_EXECUTABLE);
- // Again, we should get n or n - 1 pages.
- Page* other = memory_allocator->AllocatePage(
- faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
- NOT_EXECUTABLE);
- total_pages++;
- faked_space.memory_chunk_list().PushBack(other);
- int page_count = 0;
- for (Page* p = first_page; p != nullptr; p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
- page_count++;
- }
- CHECK(total_pages == page_count);
+ faked_space.memory_chunk_list().PushBack(first_page);
+ CHECK(first_page->next_page() == nullptr);
+ total_pages++;
- Page* second_page = first_page->next_page();
- CHECK_NOT_NULL(second_page);
+ for (Page* p = first_page; p != nullptr; p = p->next_page()) {
+ CHECK(p->owner() == &faked_space);
+ }
- // OldSpace's destructor will tear down the space and free up all pages.
+ // Again, we should get n or n - 1 pages.
+ Page* other = memory_allocator->AllocatePage(
+ faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
+ NOT_EXECUTABLE);
+ total_pages++;
+ faked_space.memory_chunk_list().PushBack(other);
+ int page_count = 0;
+ for (Page* p = first_page; p != nullptr; p = p->next_page()) {
+ CHECK(p->owner() == &faked_space);
+ page_count++;
}
- memory_allocator->TearDown();
- delete memory_allocator;
+ CHECK(total_pages == page_count);
+
+ Page* second_page = first_page->next_page();
+ CHECK_NOT_NULL(second_page);
+
+ // OldSpace's destructor will tear down the space and free up all pages.
}
TEST(ComputeDiscardMemoryAreas) {
@@ -256,9 +253,9 @@ TEST(ComputeDiscardMemoryAreas) {
TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- MemoryAllocator* memory_allocator =
- new MemoryAllocator(isolate, heap->MaxReserved(), 0);
- TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
+ TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
+ 0);
+ MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
NewSpace new_space(heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
@@ -273,17 +270,14 @@ TEST(NewSpace) {
new_space.TearDown();
memory_allocator->unmapper()->EnsureUnmappingCompleted();
- memory_allocator->TearDown();
- delete memory_allocator;
}
TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- MemoryAllocator* memory_allocator =
- new MemoryAllocator(isolate, heap->MaxReserved(), 0);
- TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
+ TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
+ 0);
OldSpace* s = new OldSpace(heap);
CHECK_NOT_NULL(s);
@@ -293,8 +287,6 @@ TEST(OldSpace) {
}
delete s;
- memory_allocator->TearDown();
- delete memory_allocator;
}
TEST(LargeObjectSpace) {
@@ -315,8 +307,6 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(HeapObject::cast(obj)));
- CHECK(lo->FindObject(ho->address()) == obj);
-
CHECK(lo->Contains(ho));
while (true) {
@@ -396,7 +386,7 @@ TEST(SizeOfInitialHeap) {
#endif // DEBUG
static HeapObject AllocateUnaligned(NewSpace* space, int size) {
- AllocationResult allocation = space->AllocateRawUnaligned(size);
+ AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
@@ -406,7 +396,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
}
static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
- AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
+ AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 8a2ad3c184..bcf8622d31 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -5,6 +5,7 @@
#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects/smi.h"
#include "test/cctest/cctest.h"
@@ -41,12 +42,12 @@ TEST(WeakReferencesBasic) {
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
MaybeObject code_object = fv->optimized_code_weak_or_smi();
CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage();
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
CHECK_EQ(code_object, fv->optimized_code_weak_or_smi());
{
@@ -123,7 +124,7 @@ TEST(WeakReferencesOldToNew) {
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array));
+ CHECK(Heap::InYoungGeneration(*fixed_array));
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectAllGarbage();
@@ -148,7 +149,7 @@ TEST(WeakReferencesOldToNewScavenged) {
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array));
+ CHECK(Heap::InYoungGeneration(*fixed_array));
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectGarbage(NEW_SPACE);
@@ -192,13 +193,13 @@ TEST(ObjectMovesBeforeClearingWeakField) {
HandleScope outer_scope(isolate);
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
FeedbackVector fv_location = *fv;
{
HandleScope inner_scope(isolate);
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array));
+ CHECK(Heap::InYoungGeneration(*fixed_array));
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
// inner_scope will go out of scope, so when marking the next time,
// *fixed_array will stay white.
@@ -233,12 +234,12 @@ TEST(ObjectWithWeakFieldDies) {
HandleScope outer_scope(isolate);
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
{
HandleScope inner_scope(isolate);
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array));
+ CHECK(Heap::InYoungGeneration(*fixed_array));
fv->set_optimized_code_weak_or_smi(
HeapObjectReference::Weak(*fixed_array));
// inner_scope will go out of scope, so when marking the next time,
@@ -266,11 +267,11 @@ TEST(ObjectWithWeakReferencePromoted) {
HandleScope outer_scope(isolate);
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array));
+ CHECK(Heap::InYoungGeneration(*fixed_array));
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectGarbage(NEW_SPACE);
@@ -292,13 +293,13 @@ TEST(ObjectWithClearedWeakReferencePromoted) {
HandleScope outer_scope(isolate);
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
fv->set_optimized_code_weak_or_smi(
HeapObjectReference::ClearedValue(isolate));
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
CcTest::CollectGarbage(NEW_SPACE);
@@ -323,21 +324,21 @@ TEST(WeakReferenceWriteBarrier) {
HandleScope outer_scope(isolate);
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InNewSpace(*fv));
+ CHECK(Heap::InYoungGeneration(*fv));
{
HandleScope inner_scope(isolate);
// Create a new FixedArray which the FeedbackVector will point to.
Handle<FixedArray> fixed_array1 = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array1));
+ CHECK(Heap::InYoungGeneration(*fixed_array1));
fv->set_optimized_code_weak_or_smi(
HeapObjectReference::Weak(*fixed_array1));
SimulateIncrementalMarking(heap, true);
Handle<FixedArray> fixed_array2 = factory->NewFixedArray(1);
- CHECK(Heap::InNewSpace(*fixed_array2));
+ CHECK(Heap::InYoungGeneration(*fixed_array2));
// This write will trigger the write barrier.
fv->set_optimized_code_weak_or_smi(
HeapObjectReference::Weak(*fixed_array2));
@@ -374,7 +375,7 @@ TEST(WeakArraysBasic) {
CHECK(array->IsWeakFixedArray());
CHECK(!array->IsFixedArray());
CHECK_EQ(array->length(), length);
- CHECK(Heap::InNewSpace(*array));
+ CHECK(Heap::InYoungGeneration(*array));
for (int i = 0; i < length; ++i) {
HeapObject heap_object;
@@ -481,7 +482,7 @@ TEST(WeakArrayListBasic) {
isolate, array, MaybeObjectHandle(Smi::FromInt(7), isolate));
CHECK_EQ(array->length(), 8);
- CHECK(Heap::InNewSpace(*array));
+ CHECK(Heap::InYoungGeneration(*array));
CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*index0));
CHECK_EQ(array->Get(1).ToSmi().value(), 1);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 7e1d6329c8..0a169b766c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -307,11 +307,10 @@ void BytecodeExpectationsPrinter::PrintConstant(
void BytecodeExpectationsPrinter::PrintFrameSize(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
- const int kPointerSize = sizeof(void*);
int frame_size = bytecode_array->frame_size();
- DCHECK_EQ(frame_size % kPointerSize, 0);
- stream << "frame size: " << frame_size / kPointerSize
+ DCHECK(IsAligned(frame_size, kSystemPointerSize));
+ stream << "frame size: " << frame_size / kSystemPointerSize
<< "\nparameter count: " << bytecode_array->parameter_count() << '\n';
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index b0d3e93003..231a9050b8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,7 +14,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 190
+bytecode array length: 180
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
@@ -35,7 +35,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(95),
+ B(Jump), U8(85),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
@@ -53,8 +53,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(51),
- B(Jump), U8(36),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(4),
B(Star), R(4),
@@ -70,10 +69,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -111,8 +106,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 136, 144],
- [23, 100, 102],
+ [20, 134, 134],
+ [23, 100, 100],
]
---
@@ -122,7 +117,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 235
+bytecode array length: 225
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
B(Mov), R(closure), R(1),
@@ -143,7 +138,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(140),
+ B(Jump), U8(130),
/* 22 S> */ B(LdaSmi), I8(42),
B(Star), R(6),
B(LdaFalse),
@@ -160,7 +155,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(95),
+ B(Jump), U8(85),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
@@ -178,8 +173,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(51),
- B(Jump), U8(36),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(7),
B(Star), R(4),
@@ -195,10 +189,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -239,8 +229,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 181, 189],
- [23, 145, 147],
+ [20, 179, 179],
+ [23, 145, 145],
]
---
@@ -250,21 +240,21 @@ snippet: "
"
frame size: 20
parameter count: 1
-bytecode array length: 416
+bytecode array length: 406
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 17 E> */ B(StackCheck),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
- B(Ldar), R(2),
- /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(8), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(8),
+ B(Ldar), R(0),
+ /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(8), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(8),
B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
B(Ldar), R(8),
/* 17 E> */ B(Throw),
@@ -297,18 +287,18 @@ bytecodes: [
B(Star), R(16),
B(LdaFalse),
B(Star), R(12),
- B(Mov), R(16), R(3),
+ B(Mov), R(16), R(1),
/* 22 E> */ B(StackCheck),
- /* 31 S> */ B(Mov), R(3), R(0),
+ /* 31 S> */ B(Mov), R(1), R(3),
/* 42 S> */ B(LdaFalse),
B(Star), R(19),
- B(Mov), R(2), R(17),
- B(Mov), R(0), R(18),
+ B(Mov), R(0), R(17),
+ B(Mov), R(3), R(18),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(17), U8(3),
- /* 42 E> */ B(SuspendGenerator), R(2), R(0), U8(17), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(17),
+ /* 42 E> */ B(SuspendGenerator), R(0), R(0), U8(17), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(17),
B(Star), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Ldar), R(17),
/* 42 E> */ B(Throw),
@@ -336,7 +326,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(18),
B(LdaConstant), U8(13),
B(Star), R(19),
@@ -364,15 +354,15 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(14), R(5),
- B(Jump), U8(95),
+ B(Jump), U8(85),
B(LdaUndefined),
B(Star), R(9),
- B(Mov), R(2), R(8),
+ B(Mov), R(0), R(8),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(8), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(8), U8(2),
- B(ResumeGenerator), R(2), R(0), U8(8),
+ B(SuspendGenerator), R(0), R(0), U8(8), U8(2),
+ B(ResumeGenerator), R(0), R(0), U8(8),
B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(9),
B(LdaZero),
B(TestReferenceEqual), R(9),
@@ -382,8 +372,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(8), R(5),
- B(Jump), U8(51),
- B(Jump), U8(36),
+ B(Jump), U8(41),
B(Star), R(8),
B(CreateCatchContext), R(8), U8(16),
B(Star), R(7),
@@ -393,16 +382,12 @@ bytecodes: [
B(PushContext), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(10),
- B(Mov), R(2), R(9),
+ B(Mov), R(0), R(9),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(9), U8(2),
B(PopContext), R(8),
B(Star), R(5),
B(LdaSmi), I8(2),
B(Star), R(4),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(5),
- B(Star), R(4),
B(Jump), U8(7),
B(Star), R(5),
B(LdaZero),
@@ -410,7 +395,7 @@ bytecodes: [
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
@@ -420,7 +405,7 @@ bytecodes: [
B(ReThrow),
B(LdaTrue),
B(Star), R(9),
- B(Mov), R(2), R(7),
+ B(Mov), R(0), R(7),
B(Mov), R(5), R(8),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(7), U8(3),
/* 50 S> */ B(Return),
@@ -447,14 +432,14 @@ constant pool: [
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
- Smi [321],
+ Smi [311],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 362, 370],
- [23, 326, 328],
+ [20, 360, 360],
+ [23, 326, 326],
[93, 180, 188],
[234, 247, 249],
]
@@ -467,7 +452,7 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 482
+bytecode array length: 472
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
@@ -595,7 +580,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(7), R(2),
- B(Jump), U8(95),
+ B(Jump), U8(85),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
@@ -613,8 +598,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(51),
- B(Jump), U8(36),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(17),
B(Star), R(4),
@@ -630,10 +614,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -679,14 +659,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [387],
- Smi [287],
+ Smi [377],
+ Smi [277],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 428, 436],
- [23, 392, 394],
+ [20, 426, 426],
+ [23, 392, 392],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 6bbc4d11ba..e5c7177efc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -696,8 +696,8 @@ bytecode array length: 50
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
- B(Star), R(1),
- /* 52 S> */ B(Ldar), R(1),
+ B(Star), R(0),
+ /* 52 S> */ B(Ldar), R(0),
B(JumpIfToBooleanFalse), U8(42),
/* 45 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
@@ -705,10 +705,10 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
+ B(Star), R(2),
/* 73 S> */ B(LdaSmi), I8(1),
/* 73 E> */ B(StaCurrentContextSlot), U8(4),
- /* 102 S> */ B(Mov), R(0), R(2),
+ /* 102 S> */ B(Mov), R(2), R(1),
/* 106 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(6),
/* 113 S> */ B(PopContext), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 179ac8071a..b6184f084f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -109,11 +109,11 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
+ B(Star), R(1),
/* 53 S> */ B(LdaSmi), I8(10),
/* 53 E> */ B(StaCurrentContextSlot), U8(4),
- /* 85 S> */ B(Mov), R(0), R(1),
- B(Ldar), R(0),
+ /* 85 S> */ B(Mov), R(1), R(0),
+ B(Ldar), R(1),
/* 88 S> */ B(Jump), U8(2),
B(PopContext), R(2),
B(LdaUndefined),
@@ -155,10 +155,10 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(0),
+ B(Star), R(1),
/* 76 S> */ B(LdaSmi), I8(2),
/* 76 E> */ B(StaCurrentContextSlot), U8(4),
- /* 113 S> */ B(Mov), R(0), R(1),
+ /* 113 S> */ B(Mov), R(1), R(0),
/* 118 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(6),
/* 125 S> */ B(PopContext), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 440936ffad..1dd2c099eb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -74,7 +74,7 @@ bytecodes: [
B(LdaSmi), I8(2),
B(Star), R(4),
B(Mov), R(this), R(1),
- /* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
+ /* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper), R(1), U8(4),
/* 143 S> */ B(LdaConstant), U8(0),
/* 150 E> */ B(LdaKeyedProperty), R(closure), U8(2),
B(Star), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index 27911a41c2..f79bb9457a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -30,9 +30,9 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -70,9 +70,9 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -128,9 +128,9 @@ bytecodes: [
B(Star), R(11),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(7),
B(Star), R(5),
- B(Mov), R(4), R(0),
+ B(Mov), R(4), R(1),
B(PopContext), R(3),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
B(LdaUndefined),
/* 129 S> */ B(Return),
]
@@ -174,11 +174,11 @@ bytecodes: [
B(Mov), R(4), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
B(Star), R(5),
- B(Mov), R(6), R(0),
+ B(Mov), R(6), R(1),
B(PopContext), R(3),
- B(Mov), R(0), R(1),
- /* 87 S> */ B(Ldar), R(1),
- /* 94 E> */ B(Construct), R(1), R(0), U8(0), U8(1),
+ B(Mov), R(1), R(0),
+ /* 87 S> */ B(Ldar), R(0),
+ /* 94 E> */ B(Construct), R(0), R(0), U8(0), U8(1),
/* 102 S> */ B(Return),
]
constant pool: [
@@ -225,9 +225,9 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
index 83a267c29f..d9413a1866 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
@@ -211,7 +211,7 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -221,8 +221,6 @@ bytecodes: [
/* 74 S> */ B(Return),
/* 86 S> */ B(LdaSmi), I8(2),
/* 95 S> */ B(Return),
- B(LdaUndefined),
- /* 98 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index 73e7e0d8e0..f35ff24b39 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -9,16 +9,14 @@ wrap: yes
snippet: "
const x = 10; function f1() {return x;}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 15
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
/* 44 E> */ B(StaCurrentContextSlot), U8(4),
@@ -27,7 +25,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -36,16 +33,14 @@ handlers: [
snippet: "
const x = 10; function f1() {return x;} return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 16
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
/* 44 E> */ B(StaCurrentContextSlot), U8(4),
@@ -54,7 +49,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -63,21 +57,19 @@ handlers: [
snippet: "
const x = (x = 20); function f1() {return x;}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 26
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(20),
- B(Star), R(2),
+ B(Star), R(1),
B(LdaCurrentContextSlot), U8(4),
- /* 47 E> */ B(ThrowReferenceErrorIfHole), U8(2),
+ /* 47 E> */ B(ThrowReferenceErrorIfHole), U8(1),
B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
/* 44 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
@@ -85,7 +77,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
@@ -95,16 +86,14 @@ handlers: [
snippet: "
const x = 10; x = 20; function f1() {return x;}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 28
+bytecode array length: 22
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
/* 44 E> */ B(StaCurrentContextSlot), U8(4),
@@ -115,7 +104,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index 6973d1166a..37daec48ad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -18,7 +18,7 @@ bytecodes: [
B(CreateRestParameter),
B(Star), R(1),
/* 10 E> */ B(StackCheck),
- /* 22 S> */ B(Star), R(0),
+ B(Star), R(0),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -38,8 +38,8 @@ bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
/* 10 E> */ B(StackCheck),
- /* 12 S> */ B(Mov), R(arg0), R(0),
- /* 25 S> */ B(Mov), R(2), R(1),
+ B(Mov), R(arg0), R(0),
+ B(Mov), R(2), R(1),
/* 29 S> */ B(Ldar), R(1),
/* 45 S> */ B(Return),
]
@@ -60,8 +60,8 @@ bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
/* 10 E> */ B(StackCheck),
- /* 12 S> */ B(Mov), R(arg0), R(0),
- /* 25 S> */ B(Mov), R(2), R(1),
+ B(Mov), R(arg0), R(0),
+ B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
/* 44 E> */ B(LdaKeyedProperty), R(1), U8(0),
/* 48 S> */ B(Return),
@@ -85,8 +85,8 @@ bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
/* 10 E> */ B(StackCheck),
- /* 12 S> */ B(Mov), R(arg0), R(0),
- /* 25 S> */ B(Mov), R(2), R(1),
+ B(Mov), R(arg0), R(0),
+ B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
/* 44 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(Star), R(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 3a2ea7d5d8..f4a7c340c4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -66,7 +66,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(13),
B(LdaConstant), U8(6),
B(Star), R(14),
@@ -115,7 +115,7 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 266
+bytecode array length: 264
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -185,7 +185,6 @@ bytecodes: [
B(Star), R(14),
B(JumpLoop), U8(33), I8(0),
B(Mov), R(13), R(1),
- B(Ldar), R(1),
B(LdaSmi), I8(-1),
B(Star), R(10),
B(Star), R(9),
@@ -204,7 +203,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(14),
B(LdaConstant), U8(6),
B(Star), R(15),
@@ -242,8 +241,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [44, 174, 182],
- [228, 241, 243],
+ [44, 172, 180],
+ [226, 239, 241],
]
---
@@ -328,7 +327,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(15),
B(LdaConstant), U8(7),
B(Star), R(16),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 3c89cfed30..6fe59da400 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,13 +16,13 @@ snippet: "
"
frame size: 21
parameter count: 1
-bytecode array length: 329
+bytecode array length: 325
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -49,12 +49,12 @@ bytecodes: [
B(Star), R(9),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
B(Star), R(15),
- B(Mov), R(2), R(14),
+ B(Mov), R(0), R(14),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(14),
+ B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(14),
B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(15),
B(LdaZero),
B(TestReferenceEqual), R(15),
@@ -71,9 +71,9 @@ bytecodes: [
B(Star), R(13),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(13), R(3),
+ B(Mov), R(13), R(1),
/* 23 E> */ B(StackCheck),
- /* 38 S> */ B(Mov), R(3), R(0),
+ /* 38 S> */ B(Mov), R(1), R(3),
B(Ldar), R(13),
B(JumpLoop), U8(77), I8(0),
B(LdaSmi), I8(-1),
@@ -94,7 +94,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -103,12 +103,12 @@ bytecodes: [
B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(19),
B(Star), R(19),
- B(Mov), R(2), R(18),
+ B(Mov), R(0), R(18),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(18),
+ B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(18),
B(Star), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(19),
B(LdaZero),
B(TestReferenceEqual), R(19),
@@ -137,10 +137,9 @@ bytecodes: [
B(Star), R(6),
B(LdaTrue),
B(Star), R(7),
- B(Mov), R(2), R(5),
+ B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 57 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(10),
B(Star), R(4),
@@ -152,11 +151,9 @@ bytecodes: [
B(Star), R(7),
B(LdaTrue),
B(Star), R(8),
- B(Mov), R(2), R(6),
+ B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 57 S> */ B(Return),
- B(LdaUndefined),
- /* 57 S> */ B(Return),
]
constant pool: [
Smi [98],
@@ -172,7 +169,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 297, 299],
+ [20, 297, 297],
[77, 157, 165],
[211, 260, 262],
]
@@ -186,13 +183,13 @@ snippet: "
"
frame size: 21
parameter count: 1
-bytecode array length: 350
+bytecode array length: 346
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -219,12 +216,12 @@ bytecodes: [
B(Star), R(9),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
B(Star), R(15),
- B(Mov), R(2), R(14),
+ B(Mov), R(0), R(14),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(14),
+ B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(14),
B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(15),
B(LdaZero),
B(TestReferenceEqual), R(15),
@@ -241,9 +238,9 @@ bytecodes: [
B(Star), R(13),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(13), R(3),
+ B(Mov), R(13), R(1),
/* 23 E> */ B(StackCheck),
- /* 38 S> */ B(Mov), R(3), R(0),
+ /* 38 S> */ B(Mov), R(1), R(3),
/* 56 S> */ B(LdaSmi), I8(1),
B(Mov), R(13), R(11),
B(Star), R(10),
@@ -266,7 +263,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -275,12 +272,12 @@ bytecodes: [
B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(19),
B(Star), R(19),
- B(Mov), R(2), R(18),
+ B(Mov), R(0), R(18),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(18),
+ B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(18),
B(Star), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(19),
B(LdaZero),
B(TestReferenceEqual), R(19),
@@ -307,7 +304,7 @@ bytecodes: [
B(ReThrow),
B(LdaTrue),
B(Star), R(18),
- B(Mov), R(2), R(16),
+ B(Mov), R(0), R(16),
B(Mov), R(11), R(17),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(16), U8(3),
/* 68 S> */ B(Return),
@@ -315,10 +312,9 @@ bytecodes: [
B(Star), R(6),
B(LdaTrue),
B(Star), R(7),
- B(Mov), R(2), R(5),
+ B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 68 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(12),
B(Star), R(4),
@@ -330,11 +326,9 @@ bytecodes: [
B(Star), R(7),
B(LdaTrue),
B(Star), R(8),
- B(Mov), R(2), R(6),
+ B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 68 S> */ B(Return),
- B(LdaUndefined),
- /* 68 S> */ B(Return),
]
constant pool: [
Smi [98],
@@ -352,7 +346,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 318, 320],
+ [20, 318, 318],
[77, 161, 169],
[215, 264, 266],
]
@@ -369,13 +363,13 @@ snippet: "
"
frame size: 21
parameter count: 1
-bytecode array length: 345
+bytecode array length: 341
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -402,12 +396,12 @@ bytecodes: [
B(Star), R(9),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
B(Star), R(15),
- B(Mov), R(2), R(14),
+ B(Mov), R(0), R(14),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(14),
+ B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(14),
B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(15),
B(LdaZero),
B(TestReferenceEqual), R(15),
@@ -424,15 +418,15 @@ bytecodes: [
B(Star), R(13),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(13), R(3),
+ B(Mov), R(13), R(1),
/* 23 E> */ B(StackCheck),
- /* 38 S> */ B(Mov), R(3), R(0),
+ /* 38 S> */ B(Mov), R(1), R(3),
/* 63 S> */ B(LdaSmi), I8(10),
- /* 69 E> */ B(TestEqual), R(0), U8(17),
+ /* 69 E> */ B(TestEqual), R(3), U8(17),
B(JumpIfFalse), U8(4),
/* 76 S> */ B(Jump), U8(11),
/* 90 S> */ B(LdaSmi), I8(20),
- /* 96 E> */ B(TestEqual), R(0), U8(18),
+ /* 96 E> */ B(TestEqual), R(3), U8(18),
B(JumpIfFalse), U8(4),
/* 103 S> */ B(Jump), U8(5),
B(JumpLoop), U8(93), I8(0),
@@ -454,7 +448,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -463,12 +457,12 @@ bytecodes: [
B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(21),
B(Star), R(19),
- B(Mov), R(2), R(18),
+ B(Mov), R(0), R(18),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(18),
+ B(SuspendGenerator), R(0), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(18),
B(Star), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(19),
B(LdaZero),
B(TestReferenceEqual), R(19),
@@ -497,10 +491,9 @@ bytecodes: [
B(Star), R(6),
B(LdaTrue),
B(Star), R(7),
- B(Mov), R(2), R(5),
+ B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 114 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(10),
B(Star), R(4),
@@ -512,11 +505,9 @@ bytecodes: [
B(Star), R(7),
B(LdaTrue),
B(Star), R(8),
- B(Mov), R(2), R(6),
+ B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 114 S> */ B(Return),
- B(LdaUndefined),
- /* 114 S> */ B(Return),
]
constant pool: [
Smi [98],
@@ -532,7 +523,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 313, 315],
+ [20, 313, 313],
[77, 173, 181],
[227, 276, 278],
]
@@ -547,7 +538,7 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 265
+bytecode array length: 261
bytecodes: [
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
@@ -609,7 +600,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(14),
B(LdaConstant), U8(8),
B(Star), R(15),
@@ -647,7 +638,6 @@ bytecodes: [
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 96 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(3),
B(CreateCatchContext), R(3), U8(11),
B(Star), R(2),
@@ -662,8 +652,6 @@ bytecodes: [
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 96 S> */ B(Return),
- B(LdaUndefined),
- /* 96 S> */ B(Return),
]
constant pool: [
OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
@@ -680,7 +668,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 233, 235],
+ [16, 233, 233],
[59, 112, 120],
[166, 179, 181],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 67f5c389e6..571002d16e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -62,7 +62,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(12),
B(LdaConstant), U8(6),
B(Star), R(13),
@@ -165,7 +165,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(13),
B(LdaConstant), U8(6),
B(Star), R(14),
@@ -278,7 +278,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(12),
B(LdaConstant), U8(6),
B(Star), R(13),
@@ -384,7 +384,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 33cccfc896..2672e0688e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -42,10 +42,10 @@ bytecodes: [
B(Star), R(12),
B(LdaFalse),
B(Star), R(8),
- B(Mov), R(12), R(3),
+ B(Mov), R(12), R(0),
/* 20 E> */ B(StackCheck),
- /* 29 S> */ B(Mov), R(3), R(1),
- /* 49 S> */ B(Mov), R(1), R(0),
+ /* 29 S> */ B(Mov), R(0), R(2),
+ /* 49 S> */ B(Mov), R(2), R(3),
B(Ldar), R(12),
B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
@@ -66,7 +66,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(14),
B(LdaConstant), U8(5),
B(Star), R(15),
@@ -203,7 +203,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(15),
B(LdaConstant), U8(10),
B(Star), R(16),
@@ -286,19 +286,19 @@ bytecodes: [
B(Star), R(10),
B(LdaFalse),
B(Star), R(6),
- B(Mov), R(10), R(1),
+ B(Mov), R(10), R(0),
/* 20 E> */ B(StackCheck),
B(CreateBlockContext), U8(4),
B(PushContext), R(11),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- /* 29 S> */ B(Ldar), R(1),
+ /* 29 S> */ B(Ldar), R(0),
/* 29 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(CreateClosure), U8(5), U8(12), U8(2),
B(Star), R(12),
/* 67 E> */ B(CallUndefinedReceiver0), R(12), U8(13),
B(PopContext), R(11),
- B(Mov), R(1), R(10),
+ B(Mov), R(0), R(10),
B(JumpLoop), U8(60), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(8),
@@ -318,7 +318,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -397,7 +397,7 @@ bytecodes: [
B(Star), R(14),
B(LdaFalse),
B(Star), R(10),
- B(Mov), R(14), R(5),
+ B(Mov), R(14), R(0),
/* 20 E> */ B(StackCheck),
/* 36 S> */ B(Ldar), R(14),
B(JumpIfNull), U8(4),
@@ -405,12 +405,12 @@ bytecodes: [
/* 29 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
B(Star), R(15),
/* 31 S> */ B(LdaNamedProperty), R(15), U8(4), U8(12),
- B(Star), R(1),
+ B(Star), R(3),
/* 34 S> */ B(LdaNamedProperty), R(15), U8(5), U8(14),
- B(Star), R(2),
- /* 56 S> */ B(Ldar), R(2),
- /* 58 E> */ B(Add), R(1), U8(16),
- B(Star), R(0),
+ B(Star), R(4),
+ /* 56 S> */ B(Ldar), R(4),
+ /* 58 E> */ B(Add), R(3), U8(16),
+ B(Star), R(5),
B(JumpLoop), U8(67), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(12),
@@ -430,7 +430,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(16),
B(LdaConstant), U8(7),
B(Star), R(17),
@@ -484,16 +484,16 @@ frame size: 17
parameter count: 2
bytecode array length: 214
bytecodes: [
- B(SwitchOnGeneratorState), R(3), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
- B(Star), R(3),
+ B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(5), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(5),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(5),
/* 11 E> */ B(Throw),
@@ -523,10 +523,10 @@ bytecodes: [
B(Star), R(13),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(13), R(4),
+ B(Mov), R(13), R(1),
/* 21 E> */ B(StackCheck),
- /* 30 S> */ B(Mov), R(4), R(1),
- /* 50 S> */ B(Mov), R(1), R(0),
+ /* 30 S> */ B(Mov), R(1), R(3),
+ /* 50 S> */ B(Mov), R(3), R(4),
B(Ldar), R(13),
B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
@@ -547,7 +547,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(15),
B(LdaConstant), U8(8),
B(Star), R(16),
@@ -602,16 +602,16 @@ frame size: 16
parameter count: 2
bytecode array length: 258
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(4),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 11 E> */ B(Throw),
@@ -641,17 +641,17 @@ bytecodes: [
B(Star), R(12),
B(LdaFalse),
B(Star), R(8),
- B(Mov), R(12), R(3),
+ B(Mov), R(12), R(1),
/* 21 E> */ B(StackCheck),
- /* 30 S> */ B(Mov), R(3), R(0),
+ /* 30 S> */ B(Mov), R(1), R(3),
/* 40 S> */ B(LdaFalse),
B(Star), R(14),
- B(Mov), R(0), R(13),
+ B(Mov), R(3), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
- /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(13),
+ /* 40 E> */ B(SuspendGenerator), R(0), R(0), U8(13), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(13),
B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
B(Ldar), R(13),
/* 40 E> */ B(Throw),
@@ -679,7 +679,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(14),
B(LdaConstant), U8(11),
B(Star), R(15),
@@ -739,12 +739,12 @@ snippet: "
"
frame size: 18
parameter count: 2
-bytecode array length: 232
+bytecode array length: 228
bytecodes: [
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(5), U8(2),
- B(Star), R(3),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(5),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
@@ -771,10 +771,10 @@ bytecodes: [
B(Star), R(14),
B(LdaFalse),
B(Star), R(10),
- B(Mov), R(14), R(4),
+ B(Mov), R(14), R(1),
/* 26 E> */ B(StackCheck),
- /* 35 S> */ B(Mov), R(4), R(1),
- /* 55 S> */ B(Mov), R(1), R(0),
+ /* 35 S> */ B(Mov), R(1), R(3),
+ /* 55 S> */ B(Mov), R(3), R(4),
B(Ldar), R(14),
B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
@@ -795,7 +795,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(16),
B(LdaConstant), U8(5),
B(Star), R(17),
@@ -824,10 +824,9 @@ bytecodes: [
B(Star), R(7),
B(LdaFalse),
B(Star), R(8),
- B(Mov), R(3), R(6),
+ B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
/* 60 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(6),
B(CreateCatchContext), R(6), U8(6),
B(Star), R(5),
@@ -839,11 +838,9 @@ bytecodes: [
B(Star), R(8),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(3), R(7),
+ B(Mov), R(0), R(7),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
/* 60 S> */ B(Return),
- B(LdaUndefined),
- /* 60 S> */ B(Return),
]
constant pool: [
SYMBOL_TYPE,
@@ -855,7 +852,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 200, 202],
+ [16, 200, 200],
[50, 96, 104],
[150, 163, 165],
]
@@ -869,13 +866,13 @@ snippet: "
"
frame size: 17
parameter count: 2
-bytecode array length: 268
+bytecode array length: 264
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(4),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(1), U8(0),
@@ -902,16 +899,16 @@ bytecodes: [
B(Star), R(13),
B(LdaFalse),
B(Star), R(9),
- B(Mov), R(13), R(3),
+ B(Mov), R(13), R(1),
/* 26 E> */ B(StackCheck),
- /* 35 S> */ B(Mov), R(3), R(0),
- /* 45 S> */ B(Mov), R(2), R(14),
- B(Mov), R(0), R(15),
+ /* 35 S> */ B(Mov), R(1), R(3),
+ /* 45 S> */ B(Mov), R(0), R(14),
+ B(Mov), R(3), R(15),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
- /* 45 E> */ B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(14),
+ /* 45 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(14),
B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(15),
B(LdaZero),
B(TestReferenceEqual), R(15),
@@ -938,7 +935,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(15),
B(LdaConstant), U8(6),
B(Star), R(16),
@@ -967,10 +964,9 @@ bytecodes: [
B(Star), R(6),
B(LdaTrue),
B(Star), R(7),
- B(Mov), R(2), R(5),
+ B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 54 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(7),
B(Star), R(4),
@@ -982,11 +978,9 @@ bytecodes: [
B(Star), R(7),
B(LdaTrue),
B(Star), R(8),
- B(Mov), R(2), R(6),
+ B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 54 S> */ B(Return),
- B(LdaUndefined),
- /* 54 S> */ B(Return),
]
constant pool: [
Smi [107],
@@ -999,7 +993,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 236, 238],
+ [20, 236, 236],
[54, 132, 140],
[186, 199, 201],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index f6520129c1..77b1924c73 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -102,16 +102,16 @@ frame size: 16
parameter count: 1
bytecode array length: 261
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(4),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 11 E> */ B(Throw),
@@ -142,17 +142,17 @@ bytecodes: [
B(Star), R(12),
B(LdaFalse),
B(Star), R(8),
- B(Mov), R(12), R(3),
+ B(Mov), R(12), R(1),
/* 16 E> */ B(StackCheck),
- /* 25 S> */ B(Mov), R(3), R(0),
+ /* 25 S> */ B(Mov), R(1), R(3),
/* 36 S> */ B(LdaFalse),
B(Star), R(14),
- B(Mov), R(0), R(13),
+ B(Mov), R(3), R(13),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
- /* 36 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(13),
+ /* 36 E> */ B(SuspendGenerator), R(0), R(0), U8(13), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(13),
B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
B(Ldar), R(13),
/* 36 E> */ B(Throw),
@@ -180,7 +180,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(154),
+ B(Wide), B(LdaSmi), I16(155),
B(Star), R(14),
B(LdaConstant), U8(12),
B(Star), R(15),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index e68211a189..46d972af0c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -147,7 +147,7 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 19
+bytecode array length: 17
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaZero),
@@ -157,8 +157,6 @@ bytecodes: [
/* 47 S> */ B(Return),
/* 63 S> */ B(Wide), B(LdaSmi), I16(-200),
/* 75 S> */ B(Return),
- B(LdaUndefined),
- /* 80 S> */ B(Return),
]
constant pool: [
]
@@ -171,11 +169,11 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg1),
- /* 25 E> */ B(TestIn), R(arg0),
+ /* 25 E> */ B(TestIn), R(arg0), U8(0),
B(JumpIfFalse), U8(7),
/* 33 S> */ B(Wide), B(LdaSmi), I16(200),
/* 44 S> */ B(Return),
@@ -258,7 +256,7 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 29
+bytecode array length: 27
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaZero),
@@ -273,8 +271,6 @@ bytecodes: [
/* 1092 S> */ B(Return),
/* 1102 S> */ B(Wide), B(LdaSmi), I16(-200),
/* 1114 S> */ B(Return),
- B(LdaUndefined),
- /* 1117 S> */ B(Return),
]
constant pool: [
HEAP_NUMBER_TYPE [0.01],
@@ -357,7 +353,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaZero),
@@ -371,8 +367,6 @@ bytecodes: [
/* 1087 S> */ B(Return),
/* 1097 S> */ B(Wide), B(LdaSmi), I16(-200),
/* 1109 S> */ B(Return),
- B(LdaUndefined),
- /* 1112 S> */ B(Return),
]
constant pool: [
]
@@ -396,7 +390,7 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 82
+bytecode array length: 83
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
@@ -430,12 +424,12 @@ bytecodes: [
/* 174 S> */ B(LdaSmi), I8(1),
/* 183 S> */ B(Return),
/* 188 S> */ B(Ldar), R(arg1),
- /* 194 E> */ B(TestIn), R(arg0),
+ /* 194 E> */ B(TestIn), R(arg0), U8(6),
B(JumpIfFalse), U8(5),
/* 202 S> */ B(LdaSmi), I8(1),
/* 211 S> */ B(Return),
/* 216 S> */ B(Ldar), R(arg1),
- /* 222 E> */ B(TestInstanceOf), R(arg0), U8(6),
+ /* 222 E> */ B(TestInstanceOf), R(arg0), U8(8),
B(JumpIfFalse), U8(5),
/* 238 S> */ B(LdaSmi), I8(1),
/* 247 S> */ B(Return),
@@ -461,7 +455,7 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaZero),
@@ -471,8 +465,6 @@ bytecodes: [
/* 53 S> */ B(Return),
/* 69 S> */ B(LdaSmi), I8(-20),
/* 80 S> */ B(Return),
- B(LdaUndefined),
- /* 85 S> */ B(Return),
]
constant pool: [
]
@@ -494,7 +486,7 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 36
+bytecode array length: 34
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
@@ -515,8 +507,6 @@ bytecodes: [
/* 102 S> */ B(Return),
/* 118 S> */ B(LdaSmi), I8(-1),
/* 128 S> */ B(Return),
- B(LdaUndefined),
- /* 133 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index 62d07cef7e..e6bf0f717e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -9,16 +9,14 @@ wrap: yes
snippet: "
let x = 10; function f1() {return x;}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 15
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
@@ -27,7 +25,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -36,16 +33,14 @@ handlers: [
snippet: "
let x = 10; function f1() {return x;} return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 16
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
@@ -54,7 +49,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -63,22 +57,20 @@ handlers: [
snippet: "
let x = (x = 20); function f1() {return x;}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 31
+bytecode array length: 25
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(20),
- B(Star), R(2),
+ B(Star), R(1),
B(LdaCurrentContextSlot), U8(4),
- /* 45 E> */ B(ThrowReferenceErrorIfHole), U8(2),
- B(Ldar), R(2),
+ /* 45 E> */ B(ThrowReferenceErrorIfHole), U8(1),
+ B(Ldar), R(1),
B(StaCurrentContextSlot), U8(4),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
@@ -86,7 +78,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
@@ -96,16 +87,14 @@ handlers: [
snippet: "
let x = 10; x = 20; function f1() {return x;}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 25
+bytecode array length: 19
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
- B(PushContext), R(1),
+ B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
@@ -116,7 +105,6 @@ bytecodes: [
]
constant pool: [
SCOPE_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 629e4f36d1..55a56a88b3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -99,7 +99,7 @@ frame size: 6
parameter count: 2
bytecode array length: 92
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
B(Star), R(4),
B(Mov), R(arg0), R(3),
@@ -108,12 +108,12 @@ bytecodes: [
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 0 E> */ B(Throw),
@@ -126,14 +126,14 @@ bytecodes: [
B(Star), R(5),
/* 32 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(0),
/* 47 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star), R(2),
/* 52 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(4),
B(Star), R(4),
B(LdaSmi), I8(42),
B(Star), R(5),
/* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(2),
- B(Star), R(2),
+ B(Star), R(1),
/* 65 S> */ B(Return),
]
constant pool: [
@@ -156,7 +156,7 @@ frame size: 6
parameter count: 2
bytecode array length: 90
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
B(Star), R(4),
B(Mov), R(arg0), R(3),
@@ -165,12 +165,12 @@ bytecodes: [
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 0 E> */ B(Throw),
@@ -182,14 +182,14 @@ bytecodes: [
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 34 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star), R(2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(4), R(2),
- B(Ldar), R(2),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 50 S> */ B(Return),
]
constant pool: [
@@ -211,7 +211,7 @@ frame size: 6
parameter count: 2
bytecode array length: 96
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
B(Star), R(4),
B(Mov), R(arg0), R(3),
@@ -220,15 +220,15 @@ bytecodes: [
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(Ldar), R(1),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 0 E> */ B(Throw),
@@ -240,14 +240,14 @@ bytecodes: [
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 34 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star), R(2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(4), R(2),
- B(Ldar), R(2),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 50 S> */ B(Return),
]
constant pool: [
@@ -269,7 +269,7 @@ frame size: 6
parameter count: 2
bytecode array length: 100
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
B(Star), R(4),
B(Mov), R(arg0), R(3),
@@ -278,15 +278,15 @@ bytecodes: [
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
- B(Ldar), R(1),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Ldar), R(0),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
/* 0 E> */ B(Throw),
@@ -298,14 +298,14 @@ bytecodes: [
B(Inc), U8(0),
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
/* 36 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star), R(2),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
B(Star), R(4),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Mov), R(4), R(2),
- B(Ldar), R(2),
+ B(Mov), R(4), R(1),
+ B(Ldar), R(1),
/* 52 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index aa89a500db..18fd7989c5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -26,13 +26,13 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
/* 89 S> */ B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(3),
- B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(1), U8(2),
+ B(Ldar), R(0),
+ /* 89 E> */ B(ConstructWithSpread), R(0), R(3), U8(1), U8(2),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
@@ -66,15 +66,15 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
/* 89 S> */ B(LdaZero),
B(Star), R(3),
B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(4),
- B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(2), U8(2),
+ B(Ldar), R(0),
+ /* 89 E> */ B(ConstructWithSpread), R(0), R(3), U8(2), U8(2),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
@@ -108,9 +108,9 @@ bytecodes: [
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(1),
B(PopContext), R(2),
- B(Mov), R(0), R(1),
+ B(Mov), R(1), R(0),
/* 89 S> */ B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(4),
B(LdaConstant), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 9070a36805..be6ef0138b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -240,9 +240,9 @@ handlers: [
snippet: "
var a = 1; return { 1: a };
"
-frame size: 6
+frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 29
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
@@ -251,11 +251,9 @@ bytecodes: [
B(Star), R(1),
B(LdaSmi), I8(1),
B(Star), R(3),
- B(LdaZero),
- B(Star), R(5),
B(Mov), R(1), R(2),
B(Mov), R(0), R(4),
- /* 57 E> */ B(CallRuntime), U16(Runtime::kSetKeyedProperty), R(2), U8(4),
+ /* 57 E> */ B(CallRuntime), U16(Runtime::kSetKeyedProperty), R(2), U8(3),
B(Ldar), R(2),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index 71a7119326..5afcdde7f6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -47,12 +47,12 @@ bytecodes: [
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
B(Star), R(6),
- B(Mov), R(7), R(1),
+ B(Mov), R(7), R(3),
B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(5), U8(2),
B(PopContext), R(4),
- B(Mov), R(1), R(2),
+ B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(4),
B(LdaTheHole),
@@ -72,16 +72,16 @@ bytecodes: [
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
B(Star), R(6),
- B(Mov), R(7), R(0),
+ B(Mov), R(7), R(2),
B(CreateClosure), U8(9), U8(5), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(5), U8(6),
B(PopContext), R(4),
- B(Mov), R(0), R(3),
- /* 136 S> */ B(Ldar), R(1),
- /* 136 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
- /* 145 S> */ B(Ldar), R(0),
- /* 145 E> */ B(Construct), R(0), R(0), U8(0), U8(10),
+ B(Mov), R(2), R(1),
+ /* 136 S> */ B(Ldar), R(3),
+ /* 136 E> */ B(Construct), R(3), R(0), U8(0), U8(8),
+ /* 145 S> */ B(Ldar), R(2),
+ /* 145 E> */ B(Construct), R(2), R(0), U8(0), U8(10),
B(LdaUndefined),
/* 154 S> */ B(Return),
]
@@ -167,12 +167,12 @@ bytecodes: [
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
B(Star), R(8),
- B(Mov), R(9), R(2),
+ B(Mov), R(9), R(5),
B(CreateClosure), U8(6), U8(2), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(3),
B(PopContext), R(6),
- B(Mov), R(2), R(3),
+ B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(8),
B(PushContext), R(6),
B(LdaTheHole),
@@ -212,12 +212,12 @@ bytecodes: [
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(9), R(1),
+ B(Mov), R(9), R(4),
B(CreateClosure), U8(16), U8(9), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(10),
B(PopContext), R(6),
- B(Mov), R(1), R(4),
+ B(Mov), R(4), R(1),
/* 140 E> */ B(CreateBlockContext), U8(17),
B(PushContext), R(6),
B(LdaTheHole),
@@ -233,21 +233,21 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
B(StaCurrentContextSlot), U8(4),
B(Mov), R(7), R(9),
- B(Mov), R(1), R(10),
+ B(Mov), R(4), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
B(Star), R(8),
- B(Mov), R(9), R(0),
+ B(Mov), R(9), R(3),
B(CreateClosure), U8(20), U8(13), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(14),
B(PopContext), R(6),
- B(Mov), R(0), R(5),
- /* 430 S> */ B(Ldar), R(2),
- /* 430 E> */ B(Construct), R(2), R(0), U8(0), U8(16),
- /* 439 S> */ B(Ldar), R(1),
- /* 439 E> */ B(Construct), R(1), R(0), U8(0), U8(18),
- /* 448 S> */ B(Ldar), R(0),
- /* 448 E> */ B(Construct), R(0), R(0), U8(0), U8(20),
+ B(Mov), R(3), R(2),
+ /* 430 S> */ B(Ldar), R(5),
+ /* 430 E> */ B(Construct), R(5), R(0), U8(0), U8(16),
+ /* 439 S> */ B(Ldar), R(4),
+ /* 439 E> */ B(Construct), R(4), R(0), U8(0), U8(18),
+ /* 448 S> */ B(Ldar), R(3),
+ /* 448 E> */ B(Construct), R(3), R(0), U8(0), U8(20),
B(LdaUndefined),
/* 458 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index cb09c45b1a..23cce9fbab 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -43,12 +43,12 @@ bytecodes: [
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
B(Star), R(6),
- B(Mov), R(7), R(1),
+ B(Mov), R(7), R(3),
B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(5), U8(2),
B(PopContext), R(4),
- B(Mov), R(1), R(2),
+ B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(4),
B(LdaTheHole),
@@ -65,16 +65,16 @@ bytecodes: [
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(4),
B(Star), R(6),
- B(Mov), R(7), R(0),
+ B(Mov), R(7), R(2),
B(CreateClosure), U8(9), U8(5), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(5), U8(6),
B(PopContext), R(4),
- B(Mov), R(0), R(3),
- /* 120 S> */ B(Ldar), R(1),
- /* 120 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
- /* 129 S> */ B(Ldar), R(0),
- /* 129 E> */ B(Construct), R(0), R(0), U8(0), U8(10),
+ B(Mov), R(2), R(1),
+ /* 120 S> */ B(Ldar), R(3),
+ /* 120 E> */ B(Construct), R(3), R(0), U8(0), U8(8),
+ /* 129 S> */ B(Ldar), R(2),
+ /* 129 E> */ B(Construct), R(2), R(0), U8(0), U8(10),
B(LdaUndefined),
/* 138 S> */ B(Return),
]
@@ -152,12 +152,12 @@ bytecodes: [
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(9), R(2),
+ B(Mov), R(9), R(5),
B(CreateClosure), U8(6), U8(2), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(3),
B(PopContext), R(6),
- B(Mov), R(2), R(3),
+ B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(8),
B(PushContext), R(6),
B(LdaTheHole),
@@ -184,12 +184,12 @@ bytecodes: [
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(9), R(1),
+ B(Mov), R(9), R(4),
B(CreateClosure), U8(14), U8(8), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(9),
B(PopContext), R(6),
- B(Mov), R(1), R(4),
+ B(Mov), R(4), R(1),
/* 90 E> */ B(CreateBlockContext), U8(15),
B(PushContext), R(6),
B(LdaTheHole),
@@ -202,21 +202,21 @@ bytecodes: [
B(StaCurrentContextSlot), U8(4),
B(Star), R(11),
B(Mov), R(7), R(9),
- B(Mov), R(1), R(10),
+ B(Mov), R(4), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(4),
B(Star), R(8),
- B(Mov), R(9), R(0),
+ B(Mov), R(9), R(3),
B(CreateClosure), U8(18), U8(12), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(7), U8(13),
B(PopContext), R(6),
- B(Mov), R(0), R(5),
- /* 329 S> */ B(Ldar), R(2),
- /* 329 E> */ B(Construct), R(2), R(0), U8(0), U8(15),
- /* 338 S> */ B(Ldar), R(1),
- /* 338 E> */ B(Construct), R(1), R(0), U8(0), U8(17),
- /* 347 S> */ B(Ldar), R(0),
- /* 347 E> */ B(Construct), R(0), R(0), U8(0), U8(19),
+ B(Mov), R(3), R(2),
+ /* 329 S> */ B(Ldar), R(5),
+ /* 329 E> */ B(Construct), R(5), R(0), U8(0), U8(15),
+ /* 338 S> */ B(Ldar), R(4),
+ /* 338 E> */ B(Construct), R(4), R(0), U8(0), U8(17),
+ /* 347 S> */ B(Ldar), R(3),
+ /* 347 E> */ B(Construct), R(3), R(0), U8(0), U8(19),
B(LdaUndefined),
/* 356 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 34e5de5443..d72bb35fee 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -19,15 +19,15 @@ bytecode array length: 26
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star), R(0),
/* 35 S> */ B(LdaSmi), I8(10),
- /* 35 E> */ B(TestLessThan), R(1), U8(0),
+ /* 35 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(15),
/* 17 E> */ B(StackCheck),
- /* 56 S> */ B(Mov), R(1), R(0),
- /* 43 S> */ B(Ldar), R(0),
+ /* 56 S> */ B(Mov), R(0), R(1),
+ /* 43 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(1),
+ B(Star), R(0),
B(JumpLoop), U8(17), I8(0),
B(LdaUndefined),
/* 61 S> */ B(Return),
@@ -152,28 +152,28 @@ bytecode array length: 106
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaZero),
+ B(Star), R(3),
B(Star), R(0),
- B(Star), R(1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star), R(1),
/* 78 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
B(PushContext), R(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(Ldar), R(1),
+ B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
B(LdaSmi), I8(1),
- B(TestEqual), R(2), U8(0),
+ B(TestEqual), R(1), U8(0),
B(JumpIfFalse), U8(7),
B(LdaZero),
- B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(8),
/* 43 S> */ B(LdaCurrentContextSlot), U8(4),
B(Inc), U8(1),
/* 43 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star), R(2),
/* 35 S> */ B(LdaCurrentContextSlot), U8(4),
B(Star), R(5),
B(LdaSmi), I8(10),
@@ -183,19 +183,19 @@ bytecodes: [
B(PopContext), R(4),
B(Jump), U8(45),
B(LdaSmi), I8(1),
- B(TestEqual), R(3), U8(3),
+ B(TestEqual), R(2), U8(3),
B(JumpIfFalse), U8(22),
/* 17 E> */ B(StackCheck),
/* 48 S> */ B(CreateClosure), U8(1), U8(4), U8(2),
B(Star), R(5),
/* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(5),
B(LdaZero),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaCurrentContextSlot), U8(4),
- B(Star), R(1),
+ B(Star), R(0),
B(JumpLoop), U8(24), I8(1),
B(LdaSmi), I8(1),
- /* 78 E> */ B(TestEqual), R(3), U8(7),
+ /* 78 E> */ B(TestEqual), R(2), U8(7),
B(JumpIfFalse), U8(6),
B(PopContext), R(4),
B(Jump), U8(7),
@@ -229,19 +229,19 @@ bytecodes: [
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
B(Star), R(3),
/* 28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
- B(Star), R(1),
+ B(Star), R(0),
/* 31 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
- B(Star), R(2),
+ B(Star), R(1),
/* 55 S> */ B(LdaZero),
- /* 55 E> */ B(TestGreaterThan), R(2), U8(5),
+ /* 55 E> */ B(TestGreaterThan), R(1), U8(5),
B(JumpIfFalse), U8(19),
/* 17 E> */ B(StackCheck),
- /* 75 S> */ B(Ldar), R(2),
- /* 77 E> */ B(Add), R(1), U8(6),
- B(Star), R(0),
- /* 62 S> */ B(Ldar), R(2),
- B(Dec), U8(7),
+ /* 75 S> */ B(Ldar), R(1),
+ /* 77 E> */ B(Add), R(0), U8(6),
B(Star), R(2),
+ /* 62 S> */ B(Ldar), R(1),
+ B(Dec), U8(7),
+ B(Star), R(1),
B(JumpLoop), U8(20), I8(0),
B(LdaUndefined),
/* 84 S> */ B(Return),
@@ -265,16 +265,16 @@ frame size: 5
parameter count: 1
bytecode array length: 67
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(3),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(3),
/* 11 E> */ B(Throw),
@@ -286,8 +286,8 @@ bytecodes: [
/* 36 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(15),
/* 18 E> */ B(StackCheck),
- /* 57 S> */ B(Mov), R(1), R(0),
- /* 44 S> */ B(Ldar), R(0),
+ /* 57 S> */ B(Mov), R(1), R(2),
+ /* 44 S> */ B(Ldar), R(2),
B(Inc), U8(1),
B(Star), R(1),
B(JumpLoop), U8(17), I8(0),
@@ -313,43 +313,43 @@ frame size: 4
parameter count: 1
bytecode array length: 99
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(1), R(0), U8(2), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(2),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
B(Star), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
/* 11 E> */ B(Throw),
B(Ldar), R(2),
/* 56 S> */ B(Return),
/* 31 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star), R(1),
/* 36 S> */ B(LdaSmi), I8(10),
- /* 36 E> */ B(TestLessThan), R(0), U8(0),
+ /* 36 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(47),
/* 18 E> */ B(StackCheck),
/* 47 S> */ B(LdaFalse),
B(Star), R(3),
- B(Mov), R(0), R(2),
+ B(Mov), R(1), R(2),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(2), U8(2),
- /* 47 E> */ B(SuspendGenerator), R(1), R(0), U8(2), U8(1),
- B(ResumeGenerator), R(1), R(0), U8(2),
+ /* 47 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(2),
B(Star), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
B(Ldar), R(2),
/* 47 E> */ B(Throw),
B(Ldar), R(2),
/* 56 S> */ B(Return),
- /* 44 S> */ B(Ldar), R(0),
+ /* 44 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(0),
+ B(Star), R(1),
B(JumpLoop), U8(49), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
@@ -374,12 +374,12 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 85
+bytecode array length: 81
bytecodes: [
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(3),
/* 36 S> */ B(LdaZero),
@@ -388,8 +388,8 @@ bytecodes: [
/* 41 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(15),
/* 23 E> */ B(StackCheck),
- /* 62 S> */ B(Mov), R(1), R(0),
- /* 49 S> */ B(Ldar), R(0),
+ /* 62 S> */ B(Mov), R(1), R(2),
+ /* 49 S> */ B(Ldar), R(2),
B(Inc), U8(1),
B(Star), R(1),
B(JumpLoop), U8(17), I8(0),
@@ -397,10 +397,9 @@ bytecodes: [
B(Star), R(5),
B(LdaFalse),
B(Star), R(6),
- B(Mov), R(2), R(4),
+ B(Mov), R(0), R(4),
/* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 67 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(4),
B(CreateCatchContext), R(4), U8(0),
B(Star), R(3),
@@ -412,17 +411,15 @@ bytecodes: [
B(Star), R(6),
B(LdaFalse),
B(Star), R(7),
- B(Mov), R(2), R(5),
+ B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 67 S> */ B(Return),
- B(LdaUndefined),
- /* 67 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 53, 55],
+ [16, 53, 53],
]
---
@@ -434,46 +431,45 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 121
+bytecode array length: 117
bytecodes: [
- B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 16 E> */ B(StackCheck),
B(Mov), R(context), R(2),
/* 36 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star), R(1),
/* 41 S> */ B(LdaSmi), I8(10),
- /* 41 E> */ B(TestLessThan), R(0), U8(0),
+ /* 41 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(47),
/* 23 E> */ B(StackCheck),
- /* 52 S> */ B(Mov), R(1), R(3),
- B(Mov), R(0), R(4),
+ /* 52 S> */ B(Mov), R(0), R(3),
+ B(Mov), R(1), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
- /* 52 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(1), R(0), U8(3),
+ /* 52 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(4),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
B(Ldar), R(3),
B(ReThrow),
- /* 49 S> */ B(Ldar), R(0),
+ /* 49 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(0),
+ B(Star), R(1),
B(JumpLoop), U8(49), I8(0),
B(LdaUndefined),
B(Star), R(4),
B(LdaTrue),
B(Star), R(5),
- B(Mov), R(1), R(3),
+ B(Mov), R(0), R(3),
/* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 61 S> */ B(Return),
- B(Jump), U8(30),
B(Star), R(3),
B(CreateCatchContext), R(3), U8(1),
B(Star), R(2),
@@ -485,17 +481,15 @@ bytecodes: [
B(Star), R(5),
B(LdaTrue),
B(Star), R(6),
- B(Mov), R(1), R(4),
+ B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 61 S> */ B(Return),
- B(LdaUndefined),
- /* 61 S> */ B(Return),
]
constant pool: [
Smi [46],
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 89, 91],
+ [20, 89, 89],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index da5c922456..93ba673a0e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -58,15 +58,15 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
B(Star), R(6),
- B(Mov), R(5), R(1),
+ B(Mov), R(5), R(3),
B(CreateClosure), U8(6), U8(2), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(7), U8(3),
B(CreateClosure), U8(8), U8(5), U8(2),
B(Star), R(9),
- B(CallProperty0), R(9), R(1), U8(6),
+ B(CallProperty0), R(9), R(3), U8(6),
B(PopContext), R(4),
- B(Mov), R(1), R(2),
+ B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(9),
B(PushContext), R(4),
B(LdaTheHole),
@@ -93,19 +93,19 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(5),
B(Star), R(6),
- B(Mov), R(5), R(0),
+ B(Mov), R(5), R(2),
B(CreateClosure), U8(12), U8(9), U8(2),
B(Star), R(7),
B(StaNamedProperty), R(5), U8(7), U8(10),
B(CreateClosure), U8(13), U8(12), U8(2),
B(Star), R(9),
- B(CallProperty0), R(9), R(0), U8(13),
+ B(CallProperty0), R(9), R(2), U8(13),
B(PopContext), R(4),
- B(Mov), R(0), R(3),
- /* 197 S> */ B(Ldar), R(2),
- /* 197 E> */ B(Construct), R(2), R(0), U8(0), U8(15),
- /* 206 S> */ B(Ldar), R(0),
- /* 206 E> */ B(Construct), R(0), R(0), U8(0), U8(17),
+ B(Mov), R(2), R(1),
+ /* 197 S> */ B(Ldar), R(0),
+ /* 197 E> */ B(Construct), R(0), R(0), U8(0), U8(15),
+ /* 206 S> */ B(Ldar), R(2),
+ /* 206 E> */ B(Construct), R(2), R(0), U8(0), U8(17),
B(LdaUndefined),
/* 215 S> */ B(Return),
]
@@ -203,15 +203,15 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(7), R(2),
+ B(Mov), R(7), R(5),
B(CreateClosure), U8(8), U8(3), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(9), U8(4),
B(CreateClosure), U8(10), U8(6), U8(2),
B(Star), R(11),
- B(CallProperty0), R(11), R(2), U8(7),
+ B(CallProperty0), R(11), R(5), U8(7),
B(PopContext), R(6),
- B(Mov), R(2), R(3),
+ B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(11),
B(PushContext), R(6),
B(LdaTheHole),
@@ -248,15 +248,15 @@ bytecodes: [
B(Star), R(13),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(6),
B(Star), R(8),
- B(Mov), R(7), R(1),
+ B(Mov), R(7), R(4),
B(CreateClosure), U8(17), U8(12), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(9), U8(13),
B(CreateClosure), U8(18), U8(15), U8(2),
B(Star), R(11),
- B(CallProperty0), R(11), R(1), U8(16),
+ B(CallProperty0), R(11), R(4), U8(16),
B(PopContext), R(6),
- B(Mov), R(1), R(4),
+ B(Mov), R(4), R(1),
/* 122 E> */ B(CreateBlockContext), U8(19),
B(PushContext), R(6),
B(LdaTheHole),
@@ -274,7 +274,7 @@ bytecodes: [
B(Star), R(12),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(12), U8(2),
- B(Mov), R(1), R(10),
+ B(Mov), R(4), R(10),
B(Mov), R(7), R(9),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
@@ -282,23 +282,23 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
- B(Mov), R(7), R(0),
+ B(Mov), R(7), R(3),
B(CreateClosure), U8(22), U8(19), U8(2),
B(Star), R(9),
B(StaNamedProperty), R(7), U8(9), U8(20),
B(CreateClosure), U8(23), U8(22), U8(2),
B(Star), R(11),
- B(Ldar), R(0),
+ B(Ldar), R(3),
B(StaNamedProperty), R(11), U8(24), U8(23),
- B(CallProperty0), R(11), R(0), U8(25),
+ B(CallProperty0), R(11), R(3), U8(25),
B(PopContext), R(6),
- B(Mov), R(0), R(5),
- /* 456 S> */ B(Ldar), R(3),
- /* 456 E> */ B(Construct), R(3), R(0), U8(0), U8(27),
- /* 465 S> */ B(Ldar), R(4),
- /* 465 E> */ B(Construct), R(4), R(0), U8(0), U8(29),
- /* 474 S> */ B(Ldar), R(0),
- /* 474 E> */ B(Construct), R(0), R(0), U8(0), U8(31),
+ B(Mov), R(3), R(2),
+ /* 456 S> */ B(Ldar), R(0),
+ /* 456 E> */ B(Construct), R(0), R(0), U8(0), U8(27),
+ /* 465 S> */ B(Ldar), R(1),
+ /* 465 E> */ B(Construct), R(1), R(0), U8(0), U8(29),
+ /* 474 S> */ B(Ldar), R(3),
+ /* 474 E> */ B(Construct), R(3), R(0), U8(0), U8(31),
B(LdaUndefined),
/* 483 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index a84807e5fc..74849d1c85 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -57,7 +57,7 @@ bytecodes: [
B(Star), R(3),
B(Mov), R(closure), R(1),
/* 128 E> */ B(StackCheck),
- /* 136 S> */ B(Mov), R(3), R(2),
+ B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(LdaSmi), I8(1),
@@ -99,7 +99,7 @@ bytecodes: [
B(Star), R(3),
B(Mov), R(closure), R(1),
/* 128 E> */ B(StackCheck),
- /* 136 S> */ B(Mov), R(3), R(2),
+ B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(CreateEmptyArrayLiteral), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index b078ef048a..ec70270dae 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -11,13 +11,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Mov), R(context), R(0),
/* 40 S> */ B(LdaSmi), I8(1),
/* 49 S> */ B(Return),
- B(Jump), U8(18),
B(Star), R(1),
B(CreateCatchContext), R(1), U8(0),
B(Star), R(0),
@@ -27,14 +26,12 @@ bytecodes: [
B(PushContext), R(1),
/* 63 S> */ B(LdaSmi), I8(2),
/* 72 S> */ B(Return),
- B(LdaUndefined),
- /* 75 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [4, 7, 9],
+ [4, 7, 7],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 6d7309b732..19a09ba49c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -7,174 +7,488 @@ wrap: yes
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
x0 = x127;
return x0;
"
frame size: 157
parameter count: 1
-bytecode array length: 10
+bytecode array length: 547
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 1494 S> */ B(Wide), B(Mov), R16(127), R16(0),
- /* 1505 S> */ B(Ldar), R(0),
- /* 1515 S> */ B(Return),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2122 S> */ B(Wide), B(Mov), R16(127), R16(0),
+ /* 2133 S> */ B(Ldar), R(0),
+ /* 2143 S> */ B(Return),
]
constant pool: [
]
@@ -183,174 +497,488 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
x127 = x126;
return x127;
"
frame size: 157
parameter count: 1
-bytecode array length: 12
+bytecode array length: 549
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 1494 S> */ B(Wide), B(Mov), R16(126), R16(127),
- /* 1507 S> */ B(Wide), B(Ldar), R16(127),
- /* 1519 S> */ B(Return),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2122 S> */ B(Wide), B(Mov), R16(126), R16(127),
+ /* 2135 S> */ B(Wide), B(Ldar), R16(127),
+ /* 2147 S> */ B(Return),
]
constant pool: [
]
@@ -359,178 +987,492 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
if (x2 > 3) { return x129; }
return x128;
"
frame size: 157
parameter count: 1
-bytecode array length: 18
+bytecode array length: 555
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 1494 S> */ B(LdaSmi), I8(3),
- /* 1501 E> */ B(TestGreaterThan), R(2), U8(0),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2122 S> */ B(LdaSmi), I8(3),
+ /* 2129 E> */ B(TestGreaterThan), R(2), U8(0),
B(JumpIfFalse), U8(7),
- /* 1508 S> */ B(Wide), B(Ldar), R16(129),
- /* 1520 S> */ B(Return),
- /* 1523 S> */ B(Wide), B(Ldar), R16(128),
- /* 1535 S> */ B(Return),
+ /* 2136 S> */ B(Wide), B(Ldar), R16(129),
+ /* 2148 S> */ B(Return),
+ /* 2151 S> */ B(Wide), B(Ldar), R16(128),
+ /* 2163 S> */ B(Return),
]
constant pool: [
]
@@ -539,163 +1481,163 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
var x0 = 0;
if (x129 == 3) { var x129 = x0; }
if (x2 > 3) { return x0; }
@@ -703,23 +1645,337 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 39
+bytecode array length: 576
bytecodes: [
/* 30 E> */ B(StackCheck),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
/* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2131 S> */ B(LdaZero),
B(Star), R(0),
- /* 1506 S> */ B(LdaSmi), I8(3),
- /* 1515 E> */ B(Wide), B(TestEqual), R16(129), U16(0),
+ /* 2134 S> */ B(LdaSmi), I8(3),
+ /* 2143 E> */ B(Wide), B(TestEqual), R16(129), U16(0),
B(JumpIfFalse), U8(12),
- /* 1534 S> */ B(Wide), B(Mov), R16(0), R16(129),
+ /* 2162 S> */ B(Wide), B(Mov), R16(0), R16(129),
B(Wide), B(Ldar), R16(129),
- /* 1540 S> */ B(LdaSmi), I8(3),
- /* 1547 E> */ B(TestGreaterThan), R(2), U8(1),
+ /* 2168 S> */ B(LdaSmi), I8(3),
+ /* 2175 E> */ B(TestGreaterThan), R(2), U8(1),
B(JumpIfFalse), U8(5),
- /* 1554 S> */ B(Ldar), R(0),
- /* 1564 S> */ B(Return),
- /* 1567 S> */ B(Wide), B(Ldar), R16(129),
- /* 1579 S> */ B(Return),
+ /* 2182 S> */ B(Ldar), R(0),
+ /* 2192 S> */ B(Return),
+ /* 2195 S> */ B(Wide), B(Ldar), R16(129),
+ /* 2207 S> */ B(Return),
]
constant pool: [
]
@@ -728,192 +1984,506 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
var x0 = 0;
var x1 = 0;
for (x128 = 0; x128 < 64; x128++) { x1 += x128;}return x128;
"
frame size: 158
parameter count: 1
-bytecode array length: 56
+bytecode array length: 593
bytecodes: [
/* 30 E> */ B(StackCheck),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
/* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2131 S> */ B(LdaZero),
B(Star), R(0),
- /* 1515 S> */ B(LdaZero),
+ /* 2143 S> */ B(LdaZero),
B(Star), R(1),
- /* 1523 S> */ B(LdaZero),
+ /* 2151 S> */ B(LdaZero),
B(Wide), B(Star), R16(128),
- /* 1538 S> */ B(LdaSmi), I8(64),
- /* 1538 E> */ B(Wide), B(TestLessThan), R16(128), U16(0),
+ /* 2166 S> */ B(LdaSmi), I8(64),
+ /* 2166 E> */ B(Wide), B(TestLessThan), R16(128), U16(0),
B(JumpIfFalse), U8(31),
- /* 1518 E> */ B(StackCheck),
- /* 1555 S> */ B(Wide), B(Ldar), R16(128),
- /* 1561 E> */ B(Add), R(1), U8(1),
+ /* 2146 E> */ B(StackCheck),
+ /* 2183 S> */ B(Wide), B(Ldar), R16(128),
+ /* 2189 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(157),
B(Star), R(1),
- /* 1548 S> */ B(Wide), B(Ldar), R16(128),
+ /* 2176 S> */ B(Wide), B(Ldar), R16(128),
B(Inc), U8(2),
B(Wide), B(Star), R16(128),
B(JumpLoop), U8(36), I8(0),
- /* 1567 S> */ B(Wide), B(Ldar), R16(128),
- /* 1579 S> */ B(Return),
+ /* 2195 S> */ B(Wide), B(Ldar), R16(128),
+ /* 2207 S> */ B(Return),
]
constant pool: [
]
@@ -922,177 +2492,491 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
var x0 = 1234;
var x1 = 0;
for (x128 in x0) { x1 += x128;}return x1;
"
frame size: 163
parameter count: 1
-bytecode array length: 89
+bytecode array length: 626
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 1503 S> */ B(Wide), B(LdaSmi), I16(1234),
+ /* 43 S> */ B(LdaZero),
B(Star), R(0),
- /* 1518 S> */ B(LdaZero),
+ /* 55 S> */ B(LdaZero),
B(Star), R(1),
- /* 1534 S> */ B(Ldar), R(0),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2131 S> */ B(Wide), B(LdaSmi), I16(1234),
+ B(Star), R(0),
+ /* 2146 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 2162 S> */ B(Ldar), R(0),
B(JumpIfUndefined), U8(74),
B(JumpIfNull), U8(72),
B(Wide), B(ToObject), R16(157),
@@ -1100,21 +2984,21 @@ bytecodes: [
B(Wide), B(ForInPrepare), R16(158), U16(0),
B(LdaZero),
B(Wide), B(Star), R16(161),
- /* 1526 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
+ /* 2154 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
B(JumpIfFalse), U8(45),
B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(0),
B(JumpIfUndefined), U8(22),
B(Wide), B(Star), R16(128),
- /* 1521 E> */ B(StackCheck),
- /* 1541 S> */ B(Wide), B(Ldar), R16(128),
- /* 1547 E> */ B(Add), R(1), U8(1),
+ /* 2149 E> */ B(StackCheck),
+ /* 2169 S> */ B(Wide), B(Ldar), R16(128),
+ /* 2175 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(162),
B(Star), R(1),
- /* 1544 E> */ B(Wide), B(ForInStep), R16(161),
+ /* 2172 E> */ B(Wide), B(ForInStep), R16(161),
B(Wide), B(Star), R16(161),
B(JumpLoop), U8(48), I8(0),
- /* 1553 S> */ B(Ldar), R(1),
- /* 1563 S> */ B(Return),
+ /* 2181 S> */ B(Ldar), R(1),
+ /* 2191 S> */ B(Return),
]
constant pool: [
]
@@ -1123,163 +3007,163 @@ handlers: [
---
snippet: "
- var x0;
- var x1;
- var x2;
- var x3;
- var x4;
- var x5;
- var x6;
- var x7;
- var x8;
- var x9;
- var x10;
- var x11;
- var x12;
- var x13;
- var x14;
- var x15;
- var x16;
- var x17;
- var x18;
- var x19;
- var x20;
- var x21;
- var x22;
- var x23;
- var x24;
- var x25;
- var x26;
- var x27;
- var x28;
- var x29;
- var x30;
- var x31;
- var x32;
- var x33;
- var x34;
- var x35;
- var x36;
- var x37;
- var x38;
- var x39;
- var x40;
- var x41;
- var x42;
- var x43;
- var x44;
- var x45;
- var x46;
- var x47;
- var x48;
- var x49;
- var x50;
- var x51;
- var x52;
- var x53;
- var x54;
- var x55;
- var x56;
- var x57;
- var x58;
- var x59;
- var x60;
- var x61;
- var x62;
- var x63;
- var x64;
- var x65;
- var x66;
- var x67;
- var x68;
- var x69;
- var x70;
- var x71;
- var x72;
- var x73;
- var x74;
- var x75;
- var x76;
- var x77;
- var x78;
- var x79;
- var x80;
- var x81;
- var x82;
- var x83;
- var x84;
- var x85;
- var x86;
- var x87;
- var x88;
- var x89;
- var x90;
- var x91;
- var x92;
- var x93;
- var x94;
- var x95;
- var x96;
- var x97;
- var x98;
- var x99;
- var x100;
- var x101;
- var x102;
- var x103;
- var x104;
- var x105;
- var x106;
- var x107;
- var x108;
- var x109;
- var x110;
- var x111;
- var x112;
- var x113;
- var x114;
- var x115;
- var x116;
- var x117;
- var x118;
- var x119;
- var x120;
- var x121;
- var x122;
- var x123;
- var x124;
- var x125;
- var x126;
- var x127;
- var x128;
- var x129;
- var x130;
- var x131;
- var x132;
- var x133;
- var x134;
- var x135;
- var x136;
- var x137;
- var x138;
- var x139;
- var x140;
- var x141;
- var x142;
- var x143;
- var x144;
- var x145;
- var x146;
- var x147;
- var x148;
- var x149;
- var x150;
- var x151;
- var x152;
- var x153;
- var x154;
- var x155;
- var x156;
+ var x0 = 0;
+ var x1 = 0;
+ var x2 = 0;
+ var x3 = 0;
+ var x4 = 0;
+ var x5 = 0;
+ var x6 = 0;
+ var x7 = 0;
+ var x8 = 0;
+ var x9 = 0;
+ var x10 = 0;
+ var x11 = 0;
+ var x12 = 0;
+ var x13 = 0;
+ var x14 = 0;
+ var x15 = 0;
+ var x16 = 0;
+ var x17 = 0;
+ var x18 = 0;
+ var x19 = 0;
+ var x20 = 0;
+ var x21 = 0;
+ var x22 = 0;
+ var x23 = 0;
+ var x24 = 0;
+ var x25 = 0;
+ var x26 = 0;
+ var x27 = 0;
+ var x28 = 0;
+ var x29 = 0;
+ var x30 = 0;
+ var x31 = 0;
+ var x32 = 0;
+ var x33 = 0;
+ var x34 = 0;
+ var x35 = 0;
+ var x36 = 0;
+ var x37 = 0;
+ var x38 = 0;
+ var x39 = 0;
+ var x40 = 0;
+ var x41 = 0;
+ var x42 = 0;
+ var x43 = 0;
+ var x44 = 0;
+ var x45 = 0;
+ var x46 = 0;
+ var x47 = 0;
+ var x48 = 0;
+ var x49 = 0;
+ var x50 = 0;
+ var x51 = 0;
+ var x52 = 0;
+ var x53 = 0;
+ var x54 = 0;
+ var x55 = 0;
+ var x56 = 0;
+ var x57 = 0;
+ var x58 = 0;
+ var x59 = 0;
+ var x60 = 0;
+ var x61 = 0;
+ var x62 = 0;
+ var x63 = 0;
+ var x64 = 0;
+ var x65 = 0;
+ var x66 = 0;
+ var x67 = 0;
+ var x68 = 0;
+ var x69 = 0;
+ var x70 = 0;
+ var x71 = 0;
+ var x72 = 0;
+ var x73 = 0;
+ var x74 = 0;
+ var x75 = 0;
+ var x76 = 0;
+ var x77 = 0;
+ var x78 = 0;
+ var x79 = 0;
+ var x80 = 0;
+ var x81 = 0;
+ var x82 = 0;
+ var x83 = 0;
+ var x84 = 0;
+ var x85 = 0;
+ var x86 = 0;
+ var x87 = 0;
+ var x88 = 0;
+ var x89 = 0;
+ var x90 = 0;
+ var x91 = 0;
+ var x92 = 0;
+ var x93 = 0;
+ var x94 = 0;
+ var x95 = 0;
+ var x96 = 0;
+ var x97 = 0;
+ var x98 = 0;
+ var x99 = 0;
+ var x100 = 0;
+ var x101 = 0;
+ var x102 = 0;
+ var x103 = 0;
+ var x104 = 0;
+ var x105 = 0;
+ var x106 = 0;
+ var x107 = 0;
+ var x108 = 0;
+ var x109 = 0;
+ var x110 = 0;
+ var x111 = 0;
+ var x112 = 0;
+ var x113 = 0;
+ var x114 = 0;
+ var x115 = 0;
+ var x116 = 0;
+ var x117 = 0;
+ var x118 = 0;
+ var x119 = 0;
+ var x120 = 0;
+ var x121 = 0;
+ var x122 = 0;
+ var x123 = 0;
+ var x124 = 0;
+ var x125 = 0;
+ var x126 = 0;
+ var x127 = 0;
+ var x128 = 0;
+ var x129 = 0;
+ var x130 = 0;
+ var x131 = 0;
+ var x132 = 0;
+ var x133 = 0;
+ var x134 = 0;
+ var x135 = 0;
+ var x136 = 0;
+ var x137 = 0;
+ var x138 = 0;
+ var x139 = 0;
+ var x140 = 0;
+ var x141 = 0;
+ var x142 = 0;
+ var x143 = 0;
+ var x144 = 0;
+ var x145 = 0;
+ var x146 = 0;
+ var x147 = 0;
+ var x148 = 0;
+ var x149 = 0;
+ var x150 = 0;
+ var x151 = 0;
+ var x152 = 0;
+ var x153 = 0;
+ var x154 = 0;
+ var x155 = 0;
+ var x156 = 0;
x0 = %Add(x64, x63);
x1 = %Add(x27, x143);
%TheHole();
@@ -1287,20 +3171,334 @@ snippet: "
"
frame size: 159
parameter count: 1
-bytecode array length: 53
+bytecode array length: 590
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 1494 S> */ B(Wide), B(Mov), R16(64), R16(157),
+ /* 43 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 67 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 79 S> */ B(LdaZero),
+ B(Star), R(3),
+ /* 91 S> */ B(LdaZero),
+ B(Star), R(4),
+ /* 103 S> */ B(LdaZero),
+ B(Star), R(5),
+ /* 115 S> */ B(LdaZero),
+ B(Star), R(6),
+ /* 127 S> */ B(LdaZero),
+ B(Star), R(7),
+ /* 139 S> */ B(LdaZero),
+ B(Star), R(8),
+ /* 151 S> */ B(LdaZero),
+ B(Star), R(9),
+ /* 164 S> */ B(LdaZero),
+ B(Star), R(10),
+ /* 177 S> */ B(LdaZero),
+ B(Star), R(11),
+ /* 190 S> */ B(LdaZero),
+ B(Star), R(12),
+ /* 203 S> */ B(LdaZero),
+ B(Star), R(13),
+ /* 216 S> */ B(LdaZero),
+ B(Star), R(14),
+ /* 229 S> */ B(LdaZero),
+ B(Star), R(15),
+ /* 242 S> */ B(LdaZero),
+ B(Star), R(16),
+ /* 255 S> */ B(LdaZero),
+ B(Star), R(17),
+ /* 268 S> */ B(LdaZero),
+ B(Star), R(18),
+ /* 281 S> */ B(LdaZero),
+ B(Star), R(19),
+ /* 294 S> */ B(LdaZero),
+ B(Star), R(20),
+ /* 307 S> */ B(LdaZero),
+ B(Star), R(21),
+ /* 320 S> */ B(LdaZero),
+ B(Star), R(22),
+ /* 333 S> */ B(LdaZero),
+ B(Star), R(23),
+ /* 346 S> */ B(LdaZero),
+ B(Star), R(24),
+ /* 359 S> */ B(LdaZero),
+ B(Star), R(25),
+ /* 372 S> */ B(LdaZero),
+ B(Star), R(26),
+ /* 385 S> */ B(LdaZero),
+ B(Star), R(27),
+ /* 398 S> */ B(LdaZero),
+ B(Star), R(28),
+ /* 411 S> */ B(LdaZero),
+ B(Star), R(29),
+ /* 424 S> */ B(LdaZero),
+ B(Star), R(30),
+ /* 437 S> */ B(LdaZero),
+ B(Star), R(31),
+ /* 450 S> */ B(LdaZero),
+ B(Star), R(32),
+ /* 463 S> */ B(LdaZero),
+ B(Star), R(33),
+ /* 476 S> */ B(LdaZero),
+ B(Star), R(34),
+ /* 489 S> */ B(LdaZero),
+ B(Star), R(35),
+ /* 502 S> */ B(LdaZero),
+ B(Star), R(36),
+ /* 515 S> */ B(LdaZero),
+ B(Star), R(37),
+ /* 528 S> */ B(LdaZero),
+ B(Star), R(38),
+ /* 541 S> */ B(LdaZero),
+ B(Star), R(39),
+ /* 554 S> */ B(LdaZero),
+ B(Star), R(40),
+ /* 567 S> */ B(LdaZero),
+ B(Star), R(41),
+ /* 580 S> */ B(LdaZero),
+ B(Star), R(42),
+ /* 593 S> */ B(LdaZero),
+ B(Star), R(43),
+ /* 606 S> */ B(LdaZero),
+ B(Star), R(44),
+ /* 619 S> */ B(LdaZero),
+ B(Star), R(45),
+ /* 632 S> */ B(LdaZero),
+ B(Star), R(46),
+ /* 645 S> */ B(LdaZero),
+ B(Star), R(47),
+ /* 658 S> */ B(LdaZero),
+ B(Star), R(48),
+ /* 671 S> */ B(LdaZero),
+ B(Star), R(49),
+ /* 684 S> */ B(LdaZero),
+ B(Star), R(50),
+ /* 697 S> */ B(LdaZero),
+ B(Star), R(51),
+ /* 710 S> */ B(LdaZero),
+ B(Star), R(52),
+ /* 723 S> */ B(LdaZero),
+ B(Star), R(53),
+ /* 736 S> */ B(LdaZero),
+ B(Star), R(54),
+ /* 749 S> */ B(LdaZero),
+ B(Star), R(55),
+ /* 762 S> */ B(LdaZero),
+ B(Star), R(56),
+ /* 775 S> */ B(LdaZero),
+ B(Star), R(57),
+ /* 788 S> */ B(LdaZero),
+ B(Star), R(58),
+ /* 801 S> */ B(LdaZero),
+ B(Star), R(59),
+ /* 814 S> */ B(LdaZero),
+ B(Star), R(60),
+ /* 827 S> */ B(LdaZero),
+ B(Star), R(61),
+ /* 840 S> */ B(LdaZero),
+ B(Star), R(62),
+ /* 853 S> */ B(LdaZero),
+ B(Star), R(63),
+ /* 866 S> */ B(LdaZero),
+ B(Star), R(64),
+ /* 879 S> */ B(LdaZero),
+ B(Star), R(65),
+ /* 892 S> */ B(LdaZero),
+ B(Star), R(66),
+ /* 905 S> */ B(LdaZero),
+ B(Star), R(67),
+ /* 918 S> */ B(LdaZero),
+ B(Star), R(68),
+ /* 931 S> */ B(LdaZero),
+ B(Star), R(69),
+ /* 944 S> */ B(LdaZero),
+ B(Star), R(70),
+ /* 957 S> */ B(LdaZero),
+ B(Star), R(71),
+ /* 970 S> */ B(LdaZero),
+ B(Star), R(72),
+ /* 983 S> */ B(LdaZero),
+ B(Star), R(73),
+ /* 996 S> */ B(LdaZero),
+ B(Star), R(74),
+ /* 1009 S> */ B(LdaZero),
+ B(Star), R(75),
+ /* 1022 S> */ B(LdaZero),
+ B(Star), R(76),
+ /* 1035 S> */ B(LdaZero),
+ B(Star), R(77),
+ /* 1048 S> */ B(LdaZero),
+ B(Star), R(78),
+ /* 1061 S> */ B(LdaZero),
+ B(Star), R(79),
+ /* 1074 S> */ B(LdaZero),
+ B(Star), R(80),
+ /* 1087 S> */ B(LdaZero),
+ B(Star), R(81),
+ /* 1100 S> */ B(LdaZero),
+ B(Star), R(82),
+ /* 1113 S> */ B(LdaZero),
+ B(Star), R(83),
+ /* 1126 S> */ B(LdaZero),
+ B(Star), R(84),
+ /* 1139 S> */ B(LdaZero),
+ B(Star), R(85),
+ /* 1152 S> */ B(LdaZero),
+ B(Star), R(86),
+ /* 1165 S> */ B(LdaZero),
+ B(Star), R(87),
+ /* 1178 S> */ B(LdaZero),
+ B(Star), R(88),
+ /* 1191 S> */ B(LdaZero),
+ B(Star), R(89),
+ /* 1204 S> */ B(LdaZero),
+ B(Star), R(90),
+ /* 1217 S> */ B(LdaZero),
+ B(Star), R(91),
+ /* 1230 S> */ B(LdaZero),
+ B(Star), R(92),
+ /* 1243 S> */ B(LdaZero),
+ B(Star), R(93),
+ /* 1256 S> */ B(LdaZero),
+ B(Star), R(94),
+ /* 1269 S> */ B(LdaZero),
+ B(Star), R(95),
+ /* 1282 S> */ B(LdaZero),
+ B(Star), R(96),
+ /* 1295 S> */ B(LdaZero),
+ B(Star), R(97),
+ /* 1308 S> */ B(LdaZero),
+ B(Star), R(98),
+ /* 1321 S> */ B(LdaZero),
+ B(Star), R(99),
+ /* 1335 S> */ B(LdaZero),
+ B(Star), R(100),
+ /* 1349 S> */ B(LdaZero),
+ B(Star), R(101),
+ /* 1363 S> */ B(LdaZero),
+ B(Star), R(102),
+ /* 1377 S> */ B(LdaZero),
+ B(Star), R(103),
+ /* 1391 S> */ B(LdaZero),
+ B(Star), R(104),
+ /* 1405 S> */ B(LdaZero),
+ B(Star), R(105),
+ /* 1419 S> */ B(LdaZero),
+ B(Star), R(106),
+ /* 1433 S> */ B(LdaZero),
+ B(Star), R(107),
+ /* 1447 S> */ B(LdaZero),
+ B(Star), R(108),
+ /* 1461 S> */ B(LdaZero),
+ B(Star), R(109),
+ /* 1475 S> */ B(LdaZero),
+ B(Star), R(110),
+ /* 1489 S> */ B(LdaZero),
+ B(Star), R(111),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(112),
+ /* 1517 S> */ B(LdaZero),
+ B(Star), R(113),
+ /* 1531 S> */ B(LdaZero),
+ B(Star), R(114),
+ /* 1545 S> */ B(LdaZero),
+ B(Star), R(115),
+ /* 1559 S> */ B(LdaZero),
+ B(Star), R(116),
+ /* 1573 S> */ B(LdaZero),
+ B(Star), R(117),
+ /* 1587 S> */ B(LdaZero),
+ B(Star), R(118),
+ /* 1601 S> */ B(LdaZero),
+ B(Star), R(119),
+ /* 1615 S> */ B(LdaZero),
+ B(Star), R(120),
+ /* 1629 S> */ B(LdaZero),
+ B(Star), R(121),
+ /* 1643 S> */ B(LdaZero),
+ B(Star), R(122),
+ /* 1657 S> */ B(LdaZero),
+ B(Star), R(123),
+ /* 1671 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(124),
+ /* 1685 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(125),
+ /* 1699 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(126),
+ /* 1713 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(127),
+ /* 1727 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1741 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(129),
+ /* 1755 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(130),
+ /* 1769 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(131),
+ /* 1783 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(132),
+ /* 1797 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(133),
+ /* 1811 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(134),
+ /* 1825 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(135),
+ /* 1839 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(136),
+ /* 1853 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(137),
+ /* 1867 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(138),
+ /* 1881 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(139),
+ /* 1895 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(140),
+ /* 1909 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(141),
+ /* 1923 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(142),
+ /* 1937 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(143),
+ /* 1951 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(144),
+ /* 1965 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(145),
+ /* 1979 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(146),
+ /* 1993 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(147),
+ /* 2007 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(148),
+ /* 2021 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(149),
+ /* 2035 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(150),
+ /* 2049 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(151),
+ /* 2063 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(152),
+ /* 2077 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(153),
+ /* 2091 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(154),
+ /* 2105 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(155),
+ /* 2119 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(156),
+ /* 2122 S> */ B(Wide), B(Mov), R16(64), R16(157),
B(Wide), B(Mov), R16(63), R16(158),
- /* 1509 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ /* 2137 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
B(Star), R(0),
- /* 1515 S> */ B(Wide), B(Mov), R16(27), R16(157),
+ /* 2143 S> */ B(Wide), B(Mov), R16(27), R16(157),
B(Wide), B(Mov), R16(143), R16(158),
- /* 1530 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ /* 2158 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
B(Star), R(1),
- /* 1537 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
- /* 1549 S> */ B(Ldar), R(1),
- /* 1559 S> */ B(Return),
+ /* 2165 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
+ /* 2177 S> */ B(Ldar), R(1),
+ /* 2187 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 99ab4cd8c0..245d9d9afd 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -25,9 +25,7 @@ using v8::internal::interpreter::BytecodeExpectationsPrinter;
namespace {
-#ifdef V8_OS_POSIX
const char* kGoldenFilesPath = "test/cctest/interpreter/bytecode_expectations/";
-#endif
class ProgramOptions final {
public:
@@ -129,24 +127,23 @@ bool ParseBoolean(const char* string) {
const char* BooleanToString(bool value) { return value ? "yes" : "no"; }
-#ifdef V8_OS_POSIX
-
-bool StrEndsWith(const char* string, const char* suffix) {
- int string_size = i::StrLength(string);
- int suffix_size = i::StrLength(suffix);
- if (string_size < suffix_size) return false;
-
- return strcmp(string + (string_size - suffix_size), suffix) == 0;
-}
-
bool CollectGoldenFiles(std::vector<std::string>* golden_file_list,
const char* directory_path) {
+#ifdef V8_OS_POSIX
DIR* directory = opendir(directory_path);
if (!directory) return false;
+ auto str_ends_with = [](const char* string, const char* suffix) {
+ int string_size = i::StrLength(string);
+ int suffix_size = i::StrLength(suffix);
+ if (string_size < suffix_size) return false;
+
+ return strcmp(string + (string_size - suffix_size), suffix) == 0;
+ };
+
dirent* entry = readdir(directory);
while (entry) {
- if (StrEndsWith(entry->d_name, ".golden")) {
+ if (str_ends_with(entry->d_name, ".golden")) {
std::string golden_filename(kGoldenFilesPath);
golden_filename += entry->d_name;
golden_file_list->push_back(golden_filename);
@@ -155,12 +152,24 @@ bool CollectGoldenFiles(std::vector<std::string>* golden_file_list,
}
closedir(directory);
-
+#elif V8_OS_WIN
+ std::string search_path(directory_path + std::string("/*.golden"));
+ WIN32_FIND_DATAA fd;
+ HANDLE find_handle = FindFirstFileA(search_path.c_str(), &fd);
+ if (find_handle == INVALID_HANDLE_VALUE) return false;
+ do {
+ if (!(fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ std::string golden_filename(kGoldenFilesPath);
+ std::string temp_filename(fd.cFileName);
+ golden_filename += temp_filename;
+ golden_file_list->push_back(golden_filename);
+ }
+ } while (FindNextFileA(find_handle, &fd));
+ FindClose(find_handle);
+#endif // V8_OS_POSIX
return true;
}
-#endif // V8_OS_POSIX
-
// static
ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
ProgramOptions options;
@@ -210,7 +219,7 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
}
if (options.rebaseline_ && options.input_filenames_.empty()) {
-#ifdef V8_OS_POSIX
+#if defined(V8_OS_POSIX) || defined(V8_OS_WIN)
if (options.verbose_) {
std::cout << "Looking for golden files in " << kGoldenFilesPath << '\n';
}
@@ -219,7 +228,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.parsing_failed_ = true;
}
#else
- REPORT_ERROR("Golden files autodiscovery requires a POSIX OS, sorry.");
+ REPORT_ERROR(
+ "Golden files autodiscovery requires a POSIX or Window OS, sorry.");
options.parsing_failed_ = true;
#endif
}
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index a361a98a52..c66c1a279b 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -5,6 +5,7 @@
#include "test/cctest/interpreter/interpreter-tester.h"
#include "src/api-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index fb56d5d98a..168dabd8dc 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -91,6 +91,7 @@ class InitializedIgnitionHandleScope : public InitializedHandleScope {
InitializedIgnitionHandleScope() {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_enable_lazy_source_positions = false;
}
};
@@ -2420,7 +2421,7 @@ TEST(WideRegisters) {
// Prepare prologue that creates frame for lots of registers.
std::ostringstream os;
for (size_t i = 0; i < 157; ++i) {
- os << "var x" << i << ";\n";
+ os << "var x" << i << " = 0;\n";
}
std::string prologue(os.str());
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index bfc42aa540..becc46ab9c 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/api-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
#include "test/cctest/interpreter/interpreter-tester.h"
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 9ec0c99ce1..3483e7967a 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -7,8 +7,12 @@
#include "src/v8.h"
#include "src/api-inl.h"
+#include "src/base/overflowing-math.h"
+#include "src/compiler.h"
#include "src/execution.h"
#include "src/handles.h"
+#include "src/hash-seed-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -160,7 +164,7 @@ TEST(InterpreterLoadLiteral) {
// Heap numbers.
{
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
BytecodeArrayBuilder builder(zone, 1, 0);
@@ -178,7 +182,7 @@ TEST(InterpreterLoadLiteral) {
// Strings.
{
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
BytecodeArrayBuilder builder(zone, 1, 0);
@@ -239,7 +243,7 @@ static double BinaryOpC(Token::Value op, double lhs, double rhs) {
case Token::Value::MUL:
return lhs * rhs;
case Token::Value::DIV:
- return lhs / rhs;
+ return base::Divide(lhs, rhs);
case Token::Value::MOD:
return Modulo(lhs, rhs);
case Token::Value::BIT_OR:
@@ -252,10 +256,7 @@ static double BinaryOpC(Token::Value op, double lhs, double rhs) {
return (v8::internal::DoubleToInt32(lhs) &
v8::internal::DoubleToInt32(rhs));
case Token::Value::SHL: {
- int32_t val = v8::internal::DoubleToInt32(lhs);
- uint32_t count = v8::internal::DoubleToUint32(rhs) & 0x1F;
- int32_t result = val << count;
- return result;
+ return base::ShlWithWraparound(DoubleToInt32(lhs), DoubleToInt32(rhs));
}
case Token::Value::SAR: {
int32_t val = v8::internal::DoubleToInt32(lhs);
@@ -487,7 +488,7 @@ TEST(InterpreterStringAdd) {
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
struct TestCase {
const AstRawString* lhs;
@@ -580,7 +581,7 @@ TEST(InterpreterParameter8) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 8, 0, &feedback_spec);
@@ -633,7 +634,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
struct BinaryOpExpectation {
Token::Value op;
@@ -767,7 +768,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
struct BinaryOpExpectation {
Token::Value op;
@@ -1132,7 +1133,7 @@ TEST(InterpreterLoadNamedProperty) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddLoadICSlot();
@@ -1184,7 +1185,7 @@ TEST(InterpreterLoadKeyedProperty) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddKeyedLoadICSlot();
@@ -1226,7 +1227,7 @@ TEST(InterpreterStoreNamedProperty) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddStoreICSlot(LanguageMode::kStrict);
@@ -1289,7 +1290,7 @@ TEST(InterpreterStoreKeyedProperty) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddKeyedStoreICSlot(LanguageMode::kSloppy);
@@ -1341,7 +1342,7 @@ TEST(InterpreterCall) {
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddLoadICSlot();
@@ -1527,17 +1528,18 @@ TEST(InterpreterJumps) {
NewFeedbackMetadata(isolate, &feedback_spec);
Register reg(0), scratch(1);
- BytecodeLabel label[3];
+ BytecodeLoopHeader loop_header;
+ BytecodeLabel label[2];
builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
- .Jump(&label[1]);
- SetRegister(builder, reg, 1024, scratch).Bind(&label[0]);
- IncrementRegister(builder, reg, 1, scratch, GetIndex(slot)).Jump(&label[2]);
- SetRegister(builder, reg, 2048, scratch).Bind(&label[1]);
+ .Jump(&label[0]);
+ SetRegister(builder, reg, 1024, scratch).Bind(&loop_header);
+ IncrementRegister(builder, reg, 1, scratch, GetIndex(slot)).Jump(&label[1]);
+ SetRegister(builder, reg, 2048, scratch).Bind(&label[0]);
IncrementRegister(builder, reg, 2, scratch, GetIndex(slot1))
- .JumpLoop(&label[0], 0);
- SetRegister(builder, reg, 4096, scratch).Bind(&label[2]);
+ .JumpLoop(&loop_header, 0);
+ SetRegister(builder, reg, 4096, scratch).Bind(&label[1]);
IncrementRegister(builder, reg, 4, scratch, GetIndex(slot2))
.LoadAccumulatorWithRegister(reg)
.Return();
@@ -1653,7 +1655,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 1, 257, &feedback_spec);
@@ -1666,6 +1668,8 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
builder.LoadLiteral(Smi::zero());
builder.StoreAccumulatorInRegister(reg);
+ // Conditional jump to the fake label, to force both basic blocks to be live.
+ builder.JumpIfTrue(ToBooleanMode::kConvertToBoolean, &fake);
// Consume all 8-bit operands
for (int i = 1; i <= 256; i++) {
builder.LoadLiteral(i + 0.5);
@@ -1714,7 +1718,7 @@ TEST(InterpreterJumpWith32BitOperand) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
BytecodeArrayBuilder builder(zone, 1, 1);
Register reg(0);
BytecodeLabel done;
@@ -1853,7 +1857,7 @@ TEST(InterpreterHeapNumberComparisons) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
@@ -1900,7 +1904,7 @@ TEST(InterpreterBigIntComparisons) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
@@ -1945,7 +1949,7 @@ TEST(InterpreterStringComparisons) {
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
CanonicalHandleScope canonical(isolate);
const char* lhs = inputs[i].c_str();
@@ -2028,7 +2032,7 @@ TEST(InterpreterMixedComparisons) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
@@ -2266,7 +2270,7 @@ TEST(InterpreterTestIn) {
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
// Allocate an array
Handle<i::JSArray> array =
factory->NewJSArray(0, i::ElementsKind::PACKED_SMI_ELEMENTS);
@@ -2274,21 +2278,26 @@ TEST(InterpreterTestIn) {
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(zone, 1, 1);
+ FeedbackVectorSpec feedback_spec(zone);
+ BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register r0(0);
builder.LoadLiteral(ast_factory.GetOneByteString(properties[i]))
.StoreAccumulatorInRegister(r0);
+ FeedbackSlot slot = feedback_spec.AddKeyedHasICSlot();
+ Handle<i::FeedbackMetadata> metadata =
+ NewFeedbackMetadata(isolate, &feedback_spec);
+
size_t array_entry = builder.AllocateDeferredConstantPoolEntry();
builder.SetDeferredConstantPoolEntry(array_entry, array);
builder.LoadConstantPoolEntry(array_entry)
- .CompareOperation(Token::Value::IN, r0)
+ .CompareOperation(Token::Value::IN, r0, GetIndex(slot))
.Return();
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -2324,7 +2333,7 @@ TEST(InterpreterUnaryNotNonBoolean) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
std::pair<LiteralForTest, bool> object_type_tuples[] = {
std::make_pair(LiteralForTest(LiteralForTest::kUndefined), true),
@@ -4946,7 +4955,7 @@ TEST(InterpreterIllegalConstDeclaration) {
std::pair<const char*, const char*> const_decl[] = {
{"const x = x = 10 + 3; return x;",
- "Uncaught ReferenceError: x is not defined"},
+ "Uncaught ReferenceError: Cannot access 'x' before initialization"},
{"const x = 10; x = 20; return x;",
"Uncaught TypeError: Assignment to constant variable."},
{"const x = 10; { x = 20; } return x;",
@@ -4954,7 +4963,7 @@ TEST(InterpreterIllegalConstDeclaration) {
{"const x = 10; eval('x = 20;'); return x;",
"Uncaught TypeError: Assignment to constant variable."},
{"let x = x + 10; return x;",
- "Uncaught ReferenceError: x is not defined"},
+ "Uncaught ReferenceError: Cannot access 'x' before initialization"},
{"'use strict'; (function f1() { f1 = 123; })() ",
"Uncaught TypeError: Assignment to constant variable."},
};
@@ -5061,6 +5070,82 @@ TEST(InterpreterGetBytecodeHandler) {
CHECK_EQ(add_wide_handler->builtin_index(), Builtins::kAddWideHandler);
}
+TEST(InterpreterCollectSourcePositions) {
+ FLAG_enable_lazy_source_positions = true;
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ const char* source =
+ "(function () {\n"
+ " return 1;\n"
+ "})";
+
+ Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+
+ Handle<SharedFunctionInfo> sfi = handle(function->shared(), isolate);
+ Handle<BytecodeArray> bytecode_array =
+ handle(sfi->GetBytecodeArray(), isolate);
+ ByteArray source_position_table = bytecode_array->SourcePositionTable();
+ CHECK_EQ(source_position_table->length(), 0);
+
+ Compiler::CollectSourcePositions(isolate, sfi);
+
+ source_position_table = bytecode_array->SourcePositionTable();
+ CHECK_GT(source_position_table->length(), 0);
+}
+
+namespace {
+
+void CheckStringEqual(const char* expected_ptr, Handle<Object> actual_handle) {
+ v8::String::Utf8Value utf8(
+ v8::Isolate::GetCurrent(),
+ v8::Utils::ToLocal(Handle<String>::cast(actual_handle)));
+ std::string expected(expected_ptr);
+ std::string actual(*utf8);
+ CHECK_EQ(expected, actual);
+}
+
+} // namespace
+
+TEST(InterpreterCollectSourcePositions_GenerateStackTrace) {
+ FLAG_enable_lazy_source_positions = true;
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ const char* source =
+ R"javascript(
+ (function () {
+ try {
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+ });
+ )javascript";
+
+ Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+
+ Handle<SharedFunctionInfo> sfi = handle(function->shared(), isolate);
+ Handle<BytecodeArray> bytecode_array =
+ handle(sfi->GetBytecodeArray(), isolate);
+ ByteArray source_position_table = bytecode_array->SourcePositionTable();
+ CHECK_EQ(source_position_table->length(), 0);
+
+ {
+ Handle<Object> result =
+ Execution::Call(isolate, function,
+ ReadOnlyRoots(isolate).undefined_value_handle(), 0,
+ nullptr)
+ .ToHandleChecked();
+ CheckStringEqual("Error\n at <anonymous>:4:17", result);
+ }
+
+ source_position_table = bytecode_array->SourcePositionTable();
+ CHECK_GT(source_position_table->length(), 0);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index 2a8e354e54..ccdbd53558 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -113,6 +113,7 @@ class OptimizedBytecodeSourcePositionTester final {
SaveOptimizationFlags();
saved_flag_always_opt_ = FLAG_always_opt;
FLAG_always_opt = false;
+ FLAG_enable_lazy_source_positions = false;
}
~OptimizedBytecodeSourcePositionTester() {
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index b949785bcf..3b449f4b09 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -4,6 +4,7 @@
#include <limits>
#include "include/libplatform/v8-tracing.h"
+#include "src/base/platform/platform.h"
#include "src/libplatform/default-platform.h"
#include "src/tracing/trace-event.h"
#include "test/cctest/cctest.h"
@@ -439,6 +440,59 @@ TEST(TracingObservers) {
i::V8::SetPlatformForTesting(old_platform);
}
+class TraceWritingThread : public base::Thread {
+ public:
+ TraceWritingThread(
+ v8::platform::tracing::TracingController* tracing_controller)
+ : base::Thread(base::Thread::Options("TraceWritingThread")),
+ tracing_controller_(tracing_controller) {}
+
+ void Run() override {
+ for (int i = 0; i < 1000; i++) {
+ TRACE_EVENT0("v8", "v8.Test");
+ tracing_controller_->AddTraceEvent('A', nullptr, "v8", "", 1, 1, 0,
+ nullptr, nullptr, nullptr, nullptr, 0);
+ tracing_controller_->AddTraceEventWithTimestamp('A', nullptr, "v8", "", 1,
+ 1, 0, nullptr, nullptr,
+ nullptr, nullptr, 0, 0);
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
+ }
+ }
+
+ private:
+ v8::platform::tracing::TracingController* tracing_controller_;
+};
+
+TEST(AddTraceEventMultiThreaded) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ std::unique_ptr<v8::Platform> default_platform(
+ v8::platform::NewDefaultPlatform());
+ i::V8::SetPlatformForTesting(default_platform.get());
+
+ auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
+ v8::platform::tracing::TracingController* tracing_controller = tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
+ ->SetTracingController(std::move(tracing));
+
+ MockTraceWriter* writer = new MockTraceWriter();
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
+ tracing_controller->Initialize(ring_buffer);
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory("v8");
+ tracing_controller->StartTracing(trace_config);
+
+ TraceWritingThread thread(tracing_controller);
+ thread.StartSynchronously();
+
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(100));
+ tracing_controller->StopTracing();
+
+ thread.Join();
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
index e00861ddcf..fdb9bbda89 100644
--- a/deps/v8/test/cctest/libsampler/test-sampler.cc
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -141,6 +141,7 @@ class CountingSampler : public Sampler {
int sample_count() { return sample_count_; }
void set_active(bool active) { SetActive(active); }
+ void set_should_record_sample() { SetShouldRecordSample(); }
private:
int sample_count_ = 0;
@@ -153,6 +154,7 @@ TEST(SamplerManager_AddRemoveSampler) {
SamplerManager* manager = SamplerManager::instance();
CountingSampler sampler1(isolate);
sampler1.set_active(true);
+ sampler1.set_should_record_sample();
CHECK_EQ(0, sampler1.sample_count());
manager->AddSampler(&sampler1);
@@ -162,6 +164,7 @@ TEST(SamplerManager_AddRemoveSampler) {
CHECK_EQ(1, sampler1.sample_count());
sampler1.set_active(true);
+ sampler1.set_should_record_sample();
manager->RemoveSampler(&sampler1);
sampler1.set_active(false);
@@ -177,6 +180,7 @@ TEST(SamplerManager_DoesNotReAdd) {
SamplerManager* manager = SamplerManager::instance();
CountingSampler sampler1(isolate);
sampler1.set_active(true);
+ sampler1.set_should_record_sample();
manager->AddSampler(&sampler1);
manager->AddSampler(&sampler1);
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 4d2aba768c..64dd802c64 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -804,6 +804,14 @@ TEST(ProducingAndConsumingByteData) {
std::vector<uint8_t> buffer;
i::PreparseDataBuilder::ByteData bytes;
bytes.Start(&buffer);
+
+ bytes.Reserve(32);
+ bytes.Reserve(32);
+ CHECK_EQ(buffer.size(), 32);
+ const int kBufferSize = 64;
+ bytes.Reserve(kBufferSize);
+ CHECK_EQ(buffer.size(), kBufferSize);
+
// Write some data.
#ifdef DEBUG
bytes.WriteUint32(1983); // This will be overwritten.
@@ -818,7 +826,8 @@ TEST(ProducingAndConsumingByteData) {
#ifdef DEBUG
bytes.SaveCurrentSizeAtFirstUint32();
int saved_size = 21;
- CHECK_EQ(buffer.size(), saved_size);
+ CHECK_EQ(buffer.size(), kBufferSize);
+ CHECK_EQ(bytes.length(), saved_size);
#endif
bytes.WriteUint8(100);
// Write quarter bytes between uint8s and uint32s to verify they're stored
@@ -845,11 +854,14 @@ TEST(ProducingAndConsumingByteData) {
// End with a lonely quarter.
bytes.WriteQuarter(2);
+ CHECK_EQ(buffer.size(), 64);
#ifdef DEBUG
- CHECK_EQ(buffer.size(), 42);
+ const int kDataSize = 42;
#else
- CHECK_EQ(buffer.size(), 21);
+ const int kDataSize = 21;
#endif
+ CHECK_EQ(bytes.length(), kDataSize);
+ CHECK_EQ(buffer.size(), kBufferSize);
// Copy buffer for sanity checks later-on.
std::vector<uint8_t> copied_buffer(buffer);
@@ -858,7 +870,7 @@ TEST(ProducingAndConsumingByteData) {
// serialization.
bytes.Finalize(&zone);
CHECK_EQ(buffer.size(), 0);
- CHECK_LT(0, copied_buffer.size());
+ CHECK_EQ(copied_buffer.size(), kBufferSize);
{
// Serialize as a ZoneConsumedPreparseData, and read back data.
@@ -868,7 +880,9 @@ TEST(ProducingAndConsumingByteData) {
i::ZoneConsumedPreparseData::ByteData::ReadingScope reading_scope(
&bytes_for_reading, wrapper);
- for (int i = 0; i < static_cast<int>(copied_buffer.size()); i++) {
+ CHECK_EQ(wrapper.data_length(), kDataSize);
+
+ for (int i = 0; i < kDataSize; i++) {
CHECK_EQ(copied_buffer.at(i), wrapper.get(i));
}
@@ -910,13 +924,13 @@ TEST(ProducingAndConsumingByteData) {
{
// Serialize as an OnHeapConsumedPreparseData, and read back data.
i::Handle<i::PreparseData> data_on_heap = bytes.CopyToHeap(isolate, 0);
- CHECK_EQ(copied_buffer.size(), data_on_heap->data_length());
+ CHECK_EQ(data_on_heap->data_length(), kDataSize);
CHECK_EQ(data_on_heap->children_length(), 0);
i::OnHeapConsumedPreparseData::ByteData bytes_for_reading;
i::OnHeapConsumedPreparseData::ByteData::ReadingScope reading_scope(
&bytes_for_reading, *data_on_heap);
- for (int i = 0; i < static_cast<int>(copied_buffer.size()); i++) {
+ for (int i = 0; i < kDataSize; i++) {
CHECK_EQ(copied_buffer[i], data_on_heap->get(i));
}
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index ef3d0f7df8..ec6c2bc10b 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -47,7 +47,9 @@ class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
DCHECK_LT(current_, chunks_.size());
Chunk& next = chunks_[current_++];
uint8_t* chunk = new uint8_t[next.len];
- i::MemMove(chunk, next.ptr, next.len);
+ if (next.len > 0) {
+ i::MemMove(chunk, next.ptr, next.len);
+ }
*src = chunk;
return next.len;
}
@@ -164,6 +166,20 @@ TEST(Utf8StreamAsciiOnly) {
} while (c != v8::internal::Utf16CharacterStream::kEndOfInput);
}
+TEST(Utf8StreamMaxNonSurrogateCharCode) {
+ const char* chunks[] = {"\uffff\uffff", ""};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ // Read the correct character.
+ uint16_t max = unibrow::Utf16::kMaxNonSurrogateCharCode;
+ CHECK_EQ(max, static_cast<uint32_t>(stream->Advance()));
+ CHECK_EQ(max, static_cast<uint32_t>(stream->Advance()));
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+}
+
TEST(Utf8StreamBOM) {
// Construct test string w/ UTF-8 BOM (byte order mark)
char data[3 + arraysize(unicode_utf8)] = {"\xef\xbb\xbf"};
diff --git a/deps/v8/test/cctest/scope-test-helper.h b/deps/v8/test/cctest/scope-test-helper.h
index a10d8af96f..c733f9dbf5 100644
--- a/deps/v8/test/cctest/scope-test-helper.h
+++ b/deps/v8/test/cctest/scope-test-helper.h
@@ -24,16 +24,14 @@ class ScopeTestHelper {
baseline->AsDeclarationScope()->function_kind() ==
scope->AsDeclarationScope()->function_kind());
- if (!PreparseDataBuilder::ScopeNeedsData(baseline)) {
- return;
- }
+ if (!PreparseDataBuilder::ScopeNeedsData(baseline)) return;
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return;
}
- if (baseline->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ if (baseline->is_function_scope()) {
Variable* function = baseline->AsDeclarationScope()->function_var();
if (function != nullptr) {
CompareVariables(function, scope->AsDeclarationScope()->function_var(),
@@ -99,7 +97,7 @@ class ScopeTestHelper {
static void MarkInnerFunctionsAsSkipped(Scope* scope) {
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
- if (inner->scope_type() == ScopeType::FUNCTION_SCOPE &&
+ if (inner->is_function_scope() &&
!inner->AsDeclarationScope()->is_arrow_scope()) {
inner->AsDeclarationScope()->set_is_skipped_function(true);
}
@@ -108,15 +106,13 @@ class ScopeTestHelper {
}
static bool HasSkippedFunctionInside(Scope* scope) {
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
+ if (scope->is_function_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return true;
}
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
- if (HasSkippedFunctionInside(inner)) {
- return true;
- }
+ if (HasSkippedFunctionInside(inner)) return true;
}
return false;
}
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index d5ba49c537..dd06535a98 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -99,7 +99,8 @@ TEST(AccountingAllocatorOOM) {
AllocationPlatform platform;
v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called);
- v8::internal::Segment* result = allocator.GetSegment(GetHugeMemoryAmount());
+ v8::internal::Segment* result =
+ allocator.AllocateSegment(GetHugeMemoryAmount());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 2ca473dea7..41678032af 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -470,6 +470,9 @@ THREADED_TEST(QueryInterceptor) {
v8_compile("Object.isFrozen('obj.x');")->Run(env.local()).ToLocalChecked();
CHECK_EQ(8, query_counter_int);
+
+ v8_compile("'x' in obj;")->Run(env.local()).ToLocalChecked();
+ CHECK_EQ(9, query_counter_int);
}
namespace {
@@ -874,15 +877,14 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
CHECK(!value->BooleanValue(isolate));
}
-
-static void CheckInterceptorLoadIC(
- v8::GenericNamedPropertyGetterCallback getter, const char* source,
- int expected) {
+static void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertyQueryCallback query,
+ const char* source, int expected) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- getter, nullptr, nullptr, nullptr, nullptr, v8_str("data")));
+ getter, nullptr, query, nullptr, nullptr, v8_str("data")));
LocalContext context;
context->Global()
->Set(context.local(), v8_str("o"),
@@ -892,6 +894,11 @@ static void CheckInterceptorLoadIC(
CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
}
+static void CheckInterceptorLoadIC(
+ v8::GenericNamedPropertyGetterCallback getter, const char* source,
+ int expected) {
+ CheckInterceptorIC(getter, nullptr, source, expected);
+}
static void InterceptorLoadICGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -1432,6 +1439,92 @@ THREADED_TEST(InterceptorReturningZero) {
0);
}
+namespace {
+
+template <typename TKey, v8::internal::PropertyAttributes attribute>
+void HasICQuery(TKey name, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ info.GetReturnValue().Set(v8::Integer::New(isolate, attribute));
+}
+
+template <typename TKey>
+void HasICQueryToggle(TKey name,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ static bool toggle = false;
+ toggle = !toggle;
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ info.GetReturnValue().Set(v8::Integer::New(
+ isolate, toggle ? v8::internal::ABSENT : v8::internal::NONE));
+}
+
+int named_query_counter = 0;
+void NamedQueryCallback(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ named_query_counter++;
+}
+
+} // namespace
+
+THREADED_TEST(InterceptorHasIC) {
+ named_query_counter = 0;
+ CheckInterceptorIC(nullptr, NamedQueryCallback,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " 'x' in o;"
+ "}",
+ 0);
+ CHECK_EQ(1000, named_query_counter);
+}
+
+THREADED_TEST(InterceptorHasICQueryAbsent) {
+ CheckInterceptorIC(nullptr, HasICQuery<Local<Name>, v8::internal::ABSENT>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if ('x' in o) ++result;"
+ "}",
+ 0);
+}
+
+THREADED_TEST(InterceptorHasICQueryNone) {
+ CheckInterceptorIC(nullptr, HasICQuery<Local<Name>, v8::internal::NONE>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if ('x' in o) ++result;"
+ "}",
+ 1000);
+}
+
+THREADED_TEST(InterceptorHasICGetter) {
+ CheckInterceptorIC(InterceptorLoadICGetter, nullptr,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if ('x' in o) ++result;"
+ "}",
+ 1000);
+}
+
+THREADED_TEST(InterceptorHasICQueryGetter) {
+ CheckInterceptorIC(InterceptorLoadICGetter,
+ HasICQuery<Local<Name>, v8::internal::ABSENT>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if ('x' in o) ++result;"
+ "}",
+ 0);
+}
+
+THREADED_TEST(InterceptorHasICQueryToggle) {
+ CheckInterceptorIC(InterceptorLoadICGetter, HasICQueryToggle<Local<Name>>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if ('x' in o) ++result;"
+ "}",
+ 500);
+}
static void InterceptorStoreICSetter(
Local<Name> key, Local<Value> value,
@@ -3273,6 +3366,101 @@ THREADED_TEST(IndexedInterceptorOnProto) {
ExpectString(code, "PASSED");
}
+namespace {
+
+void CheckIndexedInterceptorHasIC(v8::IndexedPropertyGetterCallback getter,
+ v8::IndexedPropertyQueryCallback query,
+ const char* source, int expected) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ getter, nullptr, query, nullptr, nullptr, v8_str("data")));
+ LocalContext context;
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(source);
+ CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
+}
+
+int indexed_query_counter = 0;
+void IndexedQueryCallback(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ indexed_query_counter++;
+}
+
+void IndexHasICQueryAbsent(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ info.GetReturnValue().Set(v8::Integer::New(isolate, v8::internal::ABSENT));
+}
+
+} // namespace
+
+THREADED_TEST(IndexedInterceptorHasIC) {
+ indexed_query_counter = 0;
+ CheckIndexedInterceptorHasIC(nullptr, IndexedQueryCallback,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " i in o;"
+ "}",
+ 0);
+ CHECK_EQ(1000, indexed_query_counter);
+}
+
+THREADED_TEST(IndexedInterceptorHasICQueryAbsent) {
+ CheckIndexedInterceptorHasIC(nullptr,
+ // HasICQuery<uint32_t, v8::internal::ABSENT>,
+ IndexHasICQueryAbsent,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if (i in o) ++result;"
+ "}",
+ 0);
+}
+
+THREADED_TEST(IndexedInterceptorHasICQueryNone) {
+ CheckIndexedInterceptorHasIC(nullptr,
+ HasICQuery<uint32_t, v8::internal::NONE>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if (i in o) ++result;"
+ "}",
+ 1000);
+}
+
+THREADED_TEST(IndexedInterceptorHasICGetter) {
+ CheckIndexedInterceptorHasIC(IdentityIndexedPropertyGetter, nullptr,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if (i in o) ++result;"
+ "}",
+ 1000);
+}
+
+THREADED_TEST(IndexedInterceptorHasICQueryGetter) {
+ CheckIndexedInterceptorHasIC(IdentityIndexedPropertyGetter,
+ HasICQuery<uint32_t, v8::internal::ABSENT>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if (i in o) ++result;"
+ "}",
+ 0);
+}
+
+THREADED_TEST(IndexedInterceptorHasICQueryToggle) {
+ CheckIndexedInterceptorHasIC(IdentityIndexedPropertyGetter,
+ HasICQueryToggle<uint32_t>,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " if (i in o) ++result;"
+ "}",
+ 500);
+}
static void NoBlockGetterX(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>&) {}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 6615f1583a..9dfe1155d9 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -45,8 +45,11 @@
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
#include "src/execution.h"
+#include "src/feedback-vector-inl.h"
+#include "src/feedback-vector.h"
#include "src/futex-emulation.h"
#include "src/global-handles.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/local-allocator.h"
#include "src/lookup.h"
@@ -62,6 +65,8 @@
#include "src/wasm/wasm-js.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
static const bool kLogThreading = false;
@@ -619,6 +624,13 @@ TEST(MakingExternalStringConditions) {
CcTest::CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
+ Local<String> tiny_local_string =
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ i::DeleteArray(two_byte_string);
+
+ two_byte_string = AsciiToTwoByteString("s1234");
Local<String> local_string =
String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
v8::NewStringType::kNormal)
@@ -632,6 +644,11 @@ TEST(MakingExternalStringConditions) {
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(local_string->CanMakeExternal());
+
+ // Tiny strings are not in-place externalizable when pointer compression is
+ // enabled.
+ CHECK_EQ(i::kTaggedSize == i::kSystemPointerSize,
+ tiny_local_string->CanMakeExternal());
}
@@ -643,7 +660,8 @@ TEST(MakingExternalOneByteStringConditions) {
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::NEW_SPACE);
- Local<String> local_string = v8_str("s1");
+ Local<String> tiny_local_string = v8_str("s");
+ Local<String> local_string = v8_str("s1234");
// We should refuse to externalize new space strings.
CHECK(!local_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
@@ -651,6 +669,11 @@ TEST(MakingExternalOneByteStringConditions) {
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(local_string->CanMakeExternal());
+
+ // Tiny strings are not in-place externalizable when pointer compression is
+ // enabled.
+ CHECK_EQ(i::kTaggedSize == i::kSystemPointerSize,
+ tiny_local_string->CanMakeExternal());
}
@@ -786,7 +809,7 @@ TEST(ScavengeExternalString) {
i::FLAG_stress_compaction = false;
i::FLAG_gc_global = false;
int dispose_count = 0;
- bool in_new_space = false;
+ bool in_young_generation = false;
{
v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
@@ -797,11 +820,12 @@ TEST(ScavengeExternalString) {
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::CollectGarbage(i::NEW_SPACE);
- in_new_space = i::Heap::InNewSpace(*istring);
- CHECK(in_new_space || CcTest::heap()->old_space()->Contains(*istring));
+ in_young_generation = i::Heap::InYoungGeneration(*istring);
+ CHECK_IMPLIES(!in_young_generation,
+ CcTest::heap()->old_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
+ CcTest::CollectGarbage(in_young_generation ? i::NEW_SPACE : i::OLD_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -810,7 +834,7 @@ TEST(ScavengeExternalOneByteString) {
i::FLAG_stress_compaction = false;
i::FLAG_gc_global = false;
int dispose_count = 0;
- bool in_new_space = false;
+ bool in_young_generation = false;
{
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
@@ -821,11 +845,12 @@ TEST(ScavengeExternalOneByteString) {
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::CollectGarbage(i::NEW_SPACE);
- in_new_space = i::Heap::InNewSpace(*istring);
- CHECK(in_new_space || CcTest::heap()->old_space()->Contains(*istring));
+ in_young_generation = i::Heap::InYoungGeneration(*istring);
+ CHECK_IMPLIES(!in_young_generation,
+ CcTest::heap()->old_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
+ CcTest::CollectGarbage(in_young_generation ? i::NEW_SPACE : i::OLD_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -2813,7 +2838,7 @@ TEST(InternalFieldsSubclassing) {
if (in_object_only) {
CHECK_LE(nof_properties, i_value->map()->GetInObjectProperties());
} else {
- CHECK_LE(kMaxNofProperties, i_value->map()->GetInObjectProperties());
+ CHECK_LE(i_value->map()->GetInObjectProperties(), kMaxNofProperties);
}
// Make Sure we get the precise property count.
@@ -2824,7 +2849,7 @@ TEST(InternalFieldsSubclassing) {
if (in_object_only) {
CHECK_EQ(nof_properties, i_value->map()->GetInObjectProperties());
} else {
- CHECK_LE(kMaxNofProperties, i_value->map()->GetInObjectProperties());
+ CHECK_LE(i_value->map()->GetInObjectProperties(), kMaxNofProperties);
}
}
}
@@ -7361,7 +7386,8 @@ static const char* kSimpleExtensionSource =
TEST(SimpleExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("simpletest", kSimpleExtensionSource));
const char* extension_names[] = {"simpletest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7383,8 +7409,8 @@ static const char* kStackTraceFromExtensionSource =
TEST(StackTraceInExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(
- new Extension("stacktracetest", kStackTraceFromExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ "stacktracetest", kStackTraceFromExtensionSource));
const char* extension_names[] = {"stacktracetest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7401,7 +7427,7 @@ TEST(StackTraceInExtension) {
TEST(NullExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("nulltest", nullptr));
+ v8::RegisterExtension(v8::base::make_unique<Extension>("nulltest", nullptr));
const char* extension_names[] = {"nulltest"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7419,8 +7445,8 @@ static const int kEmbeddedExtensionSourceValidLen = 34;
TEST(ExtensionMissingSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(
- new Extension("srclentest_fail", kEmbeddedExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ "srclentest_fail", kEmbeddedExtensionSource));
const char* extension_names[] = {"srclentest_fail"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7434,9 +7460,9 @@ TEST(ExtensionWithSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
i::ScopedVector<char> extension_name(32);
i::SNPrintF(extension_name, "ext #%d", source_len);
- v8::RegisterExtension(new Extension(extension_name.start(),
- kEmbeddedExtensionSource, 0, nullptr,
- source_len));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ extension_name.start(), kEmbeddedExtensionSource, 0, nullptr,
+ source_len));
const char* extension_names[1] = {extension_name.start()};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7473,8 +7499,10 @@ static const char* kEvalExtensionSource2 =
TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
- v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("evaltest1", kEvalExtensionSource1));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("evaltest2", kEvalExtensionSource2));
const char* extension_names[] = {"evaltest1", "evaltest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7507,8 +7535,10 @@ static const char* kWithExtensionSource2 =
TEST(UseWithFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
- v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("withtest1", kWithExtensionSource1));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("withtest2", kWithExtensionSource2));
const char* extension_names[] = {"withtest1", "withtest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7524,9 +7554,10 @@ TEST(UseWithFromExtension) {
TEST(AutoExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- Extension* extension = new Extension("autotest", kSimpleExtensionSource);
+ auto extension =
+ v8::base::make_unique<Extension>("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
- v8::RegisterExtension(extension);
+ v8::RegisterExtension(std::move(extension));
v8::Local<Context> context = Context::New(CcTest::isolate());
Context::Scope lock(context);
v8::Local<Value> result = CompileRun("Foo()");
@@ -7542,8 +7573,8 @@ static const char* kSyntaxErrorInExtensionSource = "[";
// error but results in an empty context.
TEST(SyntaxErrorExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(
- new Extension("syntaxerror", kSyntaxErrorInExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ "syntaxerror", kSyntaxErrorInExtensionSource));
const char* extension_names[] = {"syntaxerror"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7558,8 +7589,8 @@ static const char* kExceptionInExtensionSource = "throw 42";
// a fatal error but results in an empty context.
TEST(ExceptionExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(
- new Extension("exception", kExceptionInExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ "exception", kExceptionInExtensionSource));
const char* extension_names[] = {"exception"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7577,8 +7608,8 @@ static const char* kNativeCallTest =
// Test that a native runtime calls are supported in extensions.
TEST(NativeCallInExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(
- new Extension("nativecall", kNativeCallInExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<Extension>(
+ "nativecall", kNativeCallInExtensionSource));
const char* extension_names[] = {"nativecall"};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7612,8 +7643,8 @@ class NativeFunctionExtension : public Extension {
TEST(NativeFunctionDeclaration) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedecl";
- v8::RegisterExtension(
- new NativeFunctionExtension(name, "native function foo();"));
+ v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ name, "native function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7628,8 +7659,8 @@ TEST(NativeFunctionDeclarationError) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerr";
// Syntax error in extension code.
- v8::RegisterExtension(
- new NativeFunctionExtension(name, "native\nfunction foo();"));
+ v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ name, "native\nfunction foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7642,8 +7673,8 @@ TEST(NativeFunctionDeclarationErrorEscape) {
const char* name = "nativedeclerresc";
// Syntax error in extension code - escape code in "native" means that
// it's not treated as a keyword.
- v8::RegisterExtension(
- new NativeFunctionExtension(name, "nativ\\u0065 function foo();"));
+ v8::RegisterExtension(v8::base::make_unique<NativeFunctionExtension>(
+ name, "nativ\\u0065 function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
@@ -7673,13 +7704,18 @@ static void CheckDependencies(const char* name, const char* expected) {
*/
THREADED_TEST(ExtensionDependency) {
static const char* kEDeps[] = {"D"};
- v8::RegisterExtension(new Extension("E", "this.loaded += 'E';", 1, kEDeps));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("E", "this.loaded += 'E';", 1, kEDeps));
static const char* kDDeps[] = {"B", "C"};
- v8::RegisterExtension(new Extension("D", "this.loaded += 'D';", 2, kDDeps));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("D", "this.loaded += 'D';", 2, kDDeps));
static const char* kBCDeps[] = {"A"};
- v8::RegisterExtension(new Extension("B", "this.loaded += 'B';", 1, kBCDeps));
- v8::RegisterExtension(new Extension("C", "this.loaded += 'C';", 1, kBCDeps));
- v8::RegisterExtension(new Extension("A", "this.loaded += 'A';"));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("B", "this.loaded += 'B';", 1, kBCDeps));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("C", "this.loaded += 'C';", 1, kBCDeps));
+ v8::RegisterExtension(
+ v8::base::make_unique<Extension>("A", "this.loaded += 'A';"));
CheckDependencies("A", "undefinedA");
CheckDependencies("B", "undefinedAB");
CheckDependencies("C", "undefinedAC");
@@ -7751,7 +7787,7 @@ v8::Local<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
THREADED_TEST(FunctionLookup) {
- v8::RegisterExtension(new FunctionExtension());
+ v8::RegisterExtension(v8::base::make_unique<FunctionExtension>());
v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
@@ -7770,7 +7806,7 @@ THREADED_TEST(FunctionLookup) {
THREADED_TEST(NativeFunctionConstructCall) {
- v8::RegisterExtension(new FunctionExtension());
+ v8::RegisterExtension(v8::base::make_unique<FunctionExtension>());
v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
@@ -7807,9 +7843,9 @@ void StoringErrorCallback(const char* location, const char* message) {
TEST(ErrorReporting) {
CcTest::isolate()->SetFatalErrorHandler(StoringErrorCallback);
static const char* aDeps[] = {"B"};
- v8::RegisterExtension(new Extension("A", "", 1, aDeps));
+ v8::RegisterExtension(v8::base::make_unique<Extension>("A", "", 1, aDeps));
static const char* bDeps[] = {"A"};
- v8::RegisterExtension(new Extension("B", "", 1, bDeps));
+ v8::RegisterExtension(v8::base::make_unique<Extension>("B", "", 1, bDeps));
last_location = nullptr;
v8::ExtensionConfiguration config(1, bDeps);
v8::Local<Context> context = Context::New(CcTest::isolate(), &config);
@@ -10940,7 +10976,6 @@ static void ShadowIndexedGet(uint32_t index,
static void ShadowNamedGet(Local<Name> key,
const v8::PropertyCallbackInfo<v8::Value>&) {}
-
THREADED_TEST(ShadowObject) {
shadow_y = shadow_y_setter_call_count = shadow_y_getter_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
@@ -10991,198 +11026,103 @@ THREADED_TEST(ShadowObject) {
CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
-
-THREADED_TEST(HiddenPrototype) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
+THREADED_TEST(ShadowObjectAndDataProperty) {
+ // Lite mode doesn't make use of feedback vectors, which is what we
+ // want to ensure has the correct form.
+ if (i::FLAG_lite_mode) return;
+ // This test mimics the kind of shadow property the Chromium embedder
+ // uses for undeclared globals. The IC subsystem has special handling
+ // for this case, using a PREMONOMORPHIC state to delay entering
+ // MONOMORPHIC state until enough information is available to support
+ // efficient access and good feedback for optimization.
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
- t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
- t1->SetHiddenPrototype(true);
- t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1));
- Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
- t2->SetHiddenPrototype(true);
- t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2));
- Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
- t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
-
- Local<v8::Object> o0 = t0->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::Object> o1 = t1->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::Object> o2 = t2->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::Object> o3 = t3->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
+ Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
+ LocalContext context(nullptr, global_template);
- // Setting the prototype on an object skips hidden prototypes.
- CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(o0->Set(context.local(), v8_str("__proto__"), o1).FromJust());
- CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(o0->Set(context.local(), v8_str("__proto__"), o2).FromJust());
- CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(o0->Set(context.local(), v8_str("__proto__"), o3).FromJust());
- CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(3, o0->Get(context.local(), v8_str("u"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ t->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(ShadowNamedGet));
- // Getting the prototype of o0 should get the first visible one
- // which is o3. Therefore, z should not be defined on the prototype
- // object.
- Local<Value> proto =
- o0->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
- CHECK(proto->IsObject());
- CHECK(proto.As<v8::Object>()
- ->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->IsUndefined());
-}
+ Local<Value> o = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("__proto__"), o)
+ .FromJust());
+ CompileRun(
+ "function foo(x) { i = x; }"
+ "foo(0)");
+
+ i::Handle<i::JSFunction> foo(i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*context->Global()
+ ->Get(context.local(), v8_str("foo"))
+ .ToLocalChecked())));
+ CHECK(foo->has_feedback_vector());
+ i::FeedbackSlot slot = i::FeedbackVector::ToSlot(0);
+ i::FeedbackNexus nexus(foo->feedback_vector(), slot);
+ CHECK_EQ(i::FeedbackSlotKind::kStoreGlobalSloppy, nexus.kind());
+ CHECK_EQ(i::PREMONOMORPHIC, nexus.ic_state());
+ CompileRun("foo(1)");
+ CHECK_EQ(i::MONOMORPHIC, nexus.ic_state());
+ // We go a bit further, checking that the form of monomorphism is
+ // a PropertyCell in the vector. This is because we want to make sure
+ // we didn't settle for a "poor man's monomorphism," such as a
+ // slow_stub bailout which would mean a trip to the runtime on all
+ // subsequent stores, and a lack of feedback for the optimizing
+ // compiler downstream.
+ i::HeapObject heap_object;
+ CHECK(nexus.GetFeedback().GetHeapObject(&heap_object));
+ CHECK(heap_object->IsPropertyCell());
+}
+
+THREADED_TEST(ShadowObjectAndDataPropertyTurbo) {
+ // This test is the same as the previous one except that it triggers
+ // optimization of {foo} after its first invocation.
+ i::FLAG_allow_natives_syntax = true;
-THREADED_TEST(HiddenPrototypeSet) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
+ if (i::FLAG_lite_mode) return;
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> ot = v8::FunctionTemplate::New(isolate);
- Local<v8::FunctionTemplate> ht = v8::FunctionTemplate::New(isolate);
- ht->SetHiddenPrototype(true);
- Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New(isolate);
- ht->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
-
- Local<v8::Object> o = ot->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::Object> h = ht->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::Object> p = pt->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- CHECK(o->Set(context.local(), v8_str("__proto__"), h).FromJust());
- CHECK(h->Set(context.local(), v8_str("__proto__"), p).FromJust());
-
- // Setting a property that exists on the hidden prototype goes there.
- CHECK(o->Set(context.local(), v8_str("x"), v8_num(7)).FromJust());
- CHECK_EQ(7, o->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(7, h->Get(context.local(), v8_str("x"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(p->Get(context.local(), v8_str("x")).ToLocalChecked()->IsUndefined());
-
- // Setting a new property should not be forwarded to the hidden prototype.
- CHECK(o->Set(context.local(), v8_str("y"), v8_num(6)).FromJust());
- CHECK_EQ(6, o->Get(context.local(), v8_str("y"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(h->Get(context.local(), v8_str("y")).ToLocalChecked()->IsUndefined());
- CHECK(p->Get(context.local(), v8_str("y")).ToLocalChecked()->IsUndefined());
-
- // Setting a property that only exists on a prototype of the hidden prototype
- // is treated normally again.
- CHECK(p->Set(context.local(), v8_str("z"), v8_num(8)).FromJust());
- CHECK_EQ(8, o->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(8, h->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(8, p->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK(o->Set(context.local(), v8_str("z"), v8_num(9)).FromJust());
- CHECK_EQ(9, o->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(8, h->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(8, p->Get(context.local(), v8_str("z"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
-}
-
+ Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
+ LocalContext context(nullptr, global_template);
-// Regression test for issue 2457.
-THREADED_TEST(HiddenPrototypeIdentityHash) {
- LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ t->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(ShadowNamedGet));
- Local<FunctionTemplate> t = FunctionTemplate::New(context->GetIsolate());
- t->SetHiddenPrototype(true);
- t->InstanceTemplate()->Set(v8_str("foo"), v8_num(75));
- Local<Object> p = t->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<Object> o = Object::New(context->GetIsolate());
- CHECK(o->SetPrototype(context.local(), p).FromJust());
+ Local<Value> o = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("__proto__"), o)
+ .FromJust());
- int hash = o->GetIdentityHash();
- USE(hash);
- CHECK(o->Set(context.local(), v8_str("foo"), v8_num(42)).FromJust());
- CHECK_EQ(hash, o->GetIdentityHash());
+ CompileRun(
+ "function foo(x) { i = x; }"
+ "foo(0)");
+
+ i::Handle<i::JSFunction> foo(i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*context->Global()
+ ->Get(context.local(), v8_str("foo"))
+ .ToLocalChecked())));
+ CHECK(foo->has_feedback_vector());
+ i::FeedbackSlot slot = i::FeedbackVector::ToSlot(0);
+ i::FeedbackNexus nexus(foo->feedback_vector(), slot);
+ CHECK_EQ(i::FeedbackSlotKind::kStoreGlobalSloppy, nexus.kind());
+ CHECK_EQ(i::PREMONOMORPHIC, nexus.ic_state());
+ CompileRun("%OptimizeFunctionOnNextCall(foo); foo(1)");
+ CHECK_EQ(i::MONOMORPHIC, nexus.ic_state());
+ i::HeapObject heap_object;
+ CHECK(nexus.GetFeedback().GetHeapObject(&heap_object));
+ CHECK(heap_object->IsPropertyCell());
}
-
THREADED_TEST(SetPrototype) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -11191,10 +11131,8 @@ THREADED_TEST(SetPrototype) {
Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
- t1->SetHiddenPrototype(true);
t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1));
Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
- t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2));
Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
@@ -11216,7 +11154,6 @@ THREADED_TEST(SetPrototype) {
->NewInstance(context.local())
.ToLocalChecked();
- // Setting the prototype on an object does not skip hidden prototypes.
CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -11261,15 +11198,11 @@ THREADED_TEST(SetPrototype) {
->Int32Value(context.local())
.FromJust());
- // Getting the prototype of o0 should get the first visible one
- // which is o3. Therefore, z should not be defined on the prototype
- // object.
Local<Value> proto =
o0->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
CHECK(proto->IsObject());
- CHECK(proto.As<v8::Object>()->Equals(context.local(), o3).FromJust());
+ CHECK(proto.As<v8::Object>()->Equals(context.local(), o1).FromJust());
- // However, Object::GetPrototype ignores hidden prototype.
Local<Value> proto0 = o0->GetPrototype();
CHECK(proto0->IsObject());
CHECK(proto0.As<v8::Object>()->Equals(context.local(), o1).FromJust());
@@ -11294,16 +11227,13 @@ THREADED_TEST(Regress91517) {
v8::HandleScope handle_scope(isolate);
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
- t1->SetHiddenPrototype(true);
t1->InstanceTemplate()->Set(v8_str("foo"), v8_num(1));
Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
- t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("fuz1"), v8_num(2));
t2->InstanceTemplate()->Set(v8_str("objects"),
v8::ObjectTemplate::New(isolate));
t2->InstanceTemplate()->Set(v8_str("fuz2"), v8_num(2));
Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
- t3->SetHiddenPrototype(true);
t3->InstanceTemplate()->Set(v8_str("boo"), v8_num(3));
Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New(isolate);
t4->InstanceTemplate()->Set(v8_str("baz"), v8_num(4));
@@ -11332,7 +11262,6 @@ THREADED_TEST(Regress91517) {
->NewInstance(context.local())
.ToLocalChecked();
- // Create prototype chain of hidden prototypes.
CHECK(o4->SetPrototype(context.local(), o3).FromJust());
CHECK(o3->SetPrototype(context.local(), o2).FromJust());
CHECK(o2->SetPrototype(context.local(), o1).FromJust());
@@ -11343,79 +11272,14 @@ THREADED_TEST(Regress91517) {
// PROPERTY_FILTER_NONE = 0
CompileRun("var names = %GetOwnPropertyKeys(obj, 0);");
- ExpectInt32("names.length", 1006);
+ ExpectInt32("names.length", 1);
ExpectTrue("names.indexOf(\"baz\") >= 0");
- ExpectTrue("names.indexOf(\"boo\") >= 0");
- ExpectTrue("names.indexOf(\"foo\") >= 0");
- ExpectTrue("names.indexOf(\"fuz1\") >= 0");
- ExpectTrue("names.indexOf(\"objects\") >= 0");
- ExpectTrue("names.indexOf(\"fuz2\") >= 0");
- ExpectFalse("names[1005] == undefined");
-}
-
-
-// Getting property names of an object with a hidden and inherited
-// prototype should not duplicate the accessor properties inherited.
-THREADED_TEST(Regress269562) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
-
- Local<v8::FunctionTemplate> t1 =
- v8::FunctionTemplate::New(context->GetIsolate());
- t1->SetHiddenPrototype(true);
-
- Local<v8::ObjectTemplate> i1 = t1->InstanceTemplate();
- i1->SetAccessor(v8_str("foo"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- i1->SetAccessor(v8_str("bar"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- i1->SetAccessor(v8_str("baz"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- i1->Set(v8_str("n1"), v8_num(1));
- i1->Set(v8_str("n2"), v8_num(2));
-
- Local<v8::Object> o1 = t1->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- Local<v8::FunctionTemplate> t2 =
- v8::FunctionTemplate::New(context->GetIsolate());
- t2->SetHiddenPrototype(true);
-
- // Inherit from t1 and mark prototype as hidden.
- t2->Inherit(t1);
- t2->InstanceTemplate()->Set(v8_str("mine"), v8_num(4));
-
- Local<v8::Object> o2 = t2->GetFunction(context.local())
- .ToLocalChecked()
- ->NewInstance(context.local())
- .ToLocalChecked();
- CHECK(o2->SetPrototype(context.local(), o1).FromJust());
-
- v8::Local<v8::Symbol> sym =
- v8::Symbol::New(context->GetIsolate(), v8_str("s1"));
- CHECK(o1->Set(context.local(), sym, v8_num(3)).FromJust());
- o1->SetPrivate(context.local(),
- v8::Private::New(context->GetIsolate(), v8_str("h1")),
- v8::Integer::New(context->GetIsolate(), 2013))
- .FromJust();
-
- // Call the runtime version of GetOwnPropertyNames() on
- // the natively created object through JavaScript.
- CHECK(context->Global()->Set(context.local(), v8_str("obj"), o2).FromJust());
- CHECK(context->Global()->Set(context.local(), v8_str("sym"), sym).FromJust());
- // PROPERTY_FILTER_NONE = 0
- CompileRun("var names = %GetOwnPropertyKeys(obj, 0);");
-
- ExpectInt32("names.length", 7);
- ExpectTrue("names.indexOf(\"foo\") >= 0");
- ExpectTrue("names.indexOf(\"bar\") >= 0");
- ExpectTrue("names.indexOf(\"baz\") >= 0");
- ExpectTrue("names.indexOf(\"n1\") >= 0");
- ExpectTrue("names.indexOf(\"n2\") >= 0");
- ExpectTrue("names.indexOf(sym) >= 0");
- ExpectTrue("names.indexOf(\"mine\") >= 0");
+ ExpectFalse("names.indexOf(\"boo\") >= 0");
+ ExpectFalse("names.indexOf(\"foo\") >= 0");
+ ExpectFalse("names.indexOf(\"fuz1\") >= 0");
+ ExpectFalse("names.indexOf(\"objects\") >= 0");
+ ExpectFalse("names.indexOf(\"fuz2\") >= 0");
+ ExpectTrue("names[1005] == undefined");
}
@@ -12278,30 +12142,28 @@ THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
.FromJust());
// call the api function multiple times to ensure direct call stub creation.
CompileRun(
- "function f() {"
- " for (var i = 1; i <= 30; i++) {"
- " nativeobject.callback();"
- " }"
- "}"
- "f();");
+ "function f() {"
+ " for (var i = 1; i <= 30; i++) {"
+ " nativeobject.callback();"
+ " }"
+ "}"
+ "f();");
}
-
void ThrowingDirectApiCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->ThrowException(v8_str("g"));
}
-
THREADED_TEST(CallICFastApi_DirectCall_Throw) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> nativeobject_templ =
v8::ObjectTemplate::New(isolate);
- nativeobject_templ->Set(isolate, "callback",
- v8::FunctionTemplate::New(isolate,
- ThrowingDirectApiCallback));
+ nativeobject_templ->Set(
+ isolate, "callback",
+ v8::FunctionTemplate::New(isolate, ThrowingDirectApiCallback));
v8::Local<v8::Object> nativeobject_obj =
nativeobject_templ->NewInstance(context.local()).ToLocalChecked();
CHECK(context->Global()
@@ -12319,10 +12181,8 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
CHECK(v8_str("ggggg")->Equals(context.local(), result).FromJust());
}
-
static int p_getter_count_3;
-
static Local<Value> DoDirectGetter() {
if (++p_getter_count_3 % 3 == 0) {
CcTest::CollectAllGarbage();
@@ -12331,16 +12191,13 @@ static Local<Value> DoDirectGetter() {
return v8_str("Direct Getter Result");
}
-
static void DirectGetterCallback(
- Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ Local<String> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
CheckReturnValue(info, FUNCTION_ADDR(DirectGetterCallback));
info.GetReturnValue().Set(DoDirectGetter());
}
-
-template<typename Accessor>
+template <typename Accessor>
static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -12364,19 +12221,15 @@ static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
CHECK_EQ(31, p_getter_count_3);
}
-
THREADED_PROFILED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
LoadICFastApi_DirectCall_GCMoveStub(DirectGetterCallback);
}
-
void ThrowingDirectGetterCallback(
- Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ Local<String> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
info.GetIsolate()->ThrowException(v8_str("g"));
}
-
THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -12396,7 +12249,6 @@ THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
CHECK(v8_str("ggggg")->Equals(context.local(), result).FromJust());
}
-
THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
@@ -12433,259 +12285,6 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
CHECK_EQ(100, interceptor_call_count);
}
-
-THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
- int interceptor_call_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
- v8::External::New(isolate, &interceptor_call_count)));
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- "}");
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(100, interceptor_call_count);
-}
-
-
-THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
- int interceptor_call_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
- v8::External::New(isolate, &interceptor_call_count)));
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = {method: function(x) { return x - 1 }};"
- " }"
- "}");
- CHECK_EQ(40, context->Global()
- ->Get(context.local(), v8_str("result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_GE(interceptor_call_count, 50);
-}
-
-
-THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
- int interceptor_call_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
- v8::External::New(isolate, &interceptor_call_count)));
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " o.method = function(x) { return x - 1 };"
- " }"
- "}");
- CHECK_EQ(40, context->Global()
- ->Get(context.local(), v8_str("result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_GE(interceptor_call_count, 50);
-}
-
-
-THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
- int interceptor_call_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
- v8::External::New(isolate, &interceptor_call_count)));
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = 333;"
- " }"
- "}");
- CHECK(try_catch.HasCaught());
- // TODO(verwaest): Adjust message.
- CHECK(
- v8_str("TypeError: receiver.method is not a function")
- ->Equals(
- context.local(),
- try_catch.Exception()->ToString(context.local()).ToLocalChecked())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_GE(interceptor_call_count, 50);
-}
-
-
-THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
- int interceptor_call_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorCallICFastApi, nullptr, nullptr, nullptr, nullptr,
- v8::External::New(isolate, &interceptor_call_count)));
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = {method: receiver.method};"
- " }"
- "}");
- CHECK(try_catch.HasCaught());
- CHECK(
- v8_str("TypeError: Illegal invocation")
- ->Equals(
- context.local(),
- try_catch.Exception()->ToString(context.local()).ToLocalChecked())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_GE(interceptor_call_count, 50);
-}
-
-
THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -12719,193 +12318,6 @@ THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
.FromJust());
}
-
-THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
- CHECK(!templ.IsEmpty());
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- "}");
-
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
-}
-
-
-THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
- CHECK(!templ.IsEmpty());
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = {method: function(x) { return x - 1 }};"
- " }"
- "}");
- CHECK_EQ(40, context->Global()
- ->Get(context.local(), v8_str("result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
-}
-
-
-THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
- CHECK(!templ.IsEmpty());
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = 333;"
- " }"
- "}");
- CHECK(try_catch.HasCaught());
- // TODO(verwaest): Adjust message.
- CHECK(
- v8_str("TypeError: receiver.method is not a function")
- ->Equals(
- context.local(),
- try_catch.Exception()->ToString(context.local()).ToLocalChecked())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
-}
-
-
-THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate);
- v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
- isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
- v8::Signature::New(isolate, fun_templ));
- v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
- CHECK(!templ.IsEmpty());
- LocalContext context;
- v8::Local<v8::Function> fun =
- fun_templ->GetFunction(context.local()).ToLocalChecked();
- GenerateSomeGarbage();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("o"),
- fun->NewInstance(context.local()).ToLocalChecked())
- .FromJust());
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = Object.create(receiver);"
- " }"
- "}");
- CHECK(try_catch.HasCaught());
- CHECK(
- v8_str("TypeError: Illegal invocation")
- ->Equals(
- context.local(),
- try_catch.Exception()->ToString(context.local()).ToLocalChecked())
- .FromJust());
- CHECK_EQ(42, context->Global()
- ->Get(context.local(), v8_str("saved_result"))
- .ToLocalChecked()
- ->Int32Value(context.local())
- .FromJust());
-}
-
-
static void ThrowingGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
@@ -15690,9 +15102,6 @@ TEST(CompileExternalTwoByteSource) {
}
}
-
-#ifndef V8_INTERPRETED_REGEXP
-
struct RegExpInterruptionData {
v8::base::Atomic32 loop_count;
UC16VectorResource* string_resource;
@@ -15768,9 +15177,6 @@ TEST(RegExpInterruption) {
i::DeleteArray(uc16_content);
}
-#endif // V8_INTERPRETED_REGEXP
-
-
// Test that we cannot set a property on the global object if there
// is a read-only property in the prototype chain.
TEST(ReadOnlyPropertyInGlobalProto) {
@@ -19012,16 +18418,9 @@ TEST(GetHeapSpaceStatistics) {
v8::HeapSpaceStatistics space_statistics;
isolate->GetHeapSpaceStatistics(&space_statistics, i);
CHECK_NOT_NULL(space_statistics.space_name());
- if (strcmp(space_statistics.space_name(), "new_large_object_space") == 0 ||
- strcmp(space_statistics.space_name(), "code_large_object_space") == 0) {
- continue;
- }
- CHECK_GT(space_statistics.space_size(), 0u);
total_size += space_statistics.space_size();
- CHECK_GT(space_statistics.space_used_size(), 0u);
total_used_size += space_statistics.space_used_size();
total_available_size += space_statistics.space_available_size();
- CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size();
}
total_available_size += CcTest::heap()->memory_allocator()->Available();
@@ -21879,92 +21278,6 @@ THREADED_TEST(Regress1516) {
}
-THREADED_TEST(Regress93759) {
- v8::Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
-
- // Template for object with security check.
- Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New(isolate);
- no_proto_template->SetAccessCheckCallback(AccessAlwaysBlocked);
-
- // Templates for objects with hidden prototypes and possibly security check.
- Local<FunctionTemplate> hidden_proto_template =
- v8::FunctionTemplate::New(isolate);
- hidden_proto_template->SetHiddenPrototype(true);
-
- Local<FunctionTemplate> protected_hidden_proto_template =
- v8::FunctionTemplate::New(isolate);
- protected_hidden_proto_template->InstanceTemplate()->SetAccessCheckCallback(
- AccessAlwaysBlocked);
- protected_hidden_proto_template->SetHiddenPrototype(true);
-
- // Context for "foreign" objects used in test.
- Local<Context> context = v8::Context::New(isolate);
- context->Enter();
-
- // Plain object, no security check.
- Local<Object> simple_object = Object::New(isolate);
-
- // Object with explicit security check.
- Local<Object> protected_object =
- no_proto_template->NewInstance(context).ToLocalChecked();
-
- // JSGlobalProxy object, always have security check.
- Local<Object> proxy_object = context->Global();
-
- // Global object, the prototype of proxy_object. No security checks.
- Local<Object> global_object =
- proxy_object->GetPrototype()->ToObject(context).ToLocalChecked();
-
- // Hidden prototype without security check.
- Local<Object> hidden_prototype = hidden_proto_template->GetFunction(context)
- .ToLocalChecked()
- ->NewInstance(context)
- .ToLocalChecked();
- Local<Object> object_with_hidden =
- Object::New(isolate);
- object_with_hidden->SetPrototype(context, hidden_prototype).FromJust();
-
- context->Exit();
-
- LocalContext context2;
- v8::Local<v8::Object> global = context2->Global();
-
- // Setup global variables.
- CHECK(global->Set(context2.local(), v8_str("simple"), simple_object)
- .FromJust());
- CHECK(global->Set(context2.local(), v8_str("protected"), protected_object)
- .FromJust());
- CHECK(global->Set(context2.local(), v8_str("global"), global_object)
- .FromJust());
- CHECK(
- global->Set(context2.local(), v8_str("proxy"), proxy_object).FromJust());
- CHECK(global->Set(context2.local(), v8_str("hidden"), object_with_hidden)
- .FromJust());
-
- Local<Value> result1 = CompileRun("Object.getPrototypeOf(simple)");
- CHECK(result1->Equals(context2.local(), simple_object->GetPrototype())
- .FromJust());
-
- Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
- CHECK(result2->IsNull());
-
- Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
- CHECK(result3->Equals(context2.local(), global_object->GetPrototype())
- .FromJust());
-
- Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
- CHECK(result4->IsNull());
-
- Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
- CHECK(result5->Equals(context2.local(), object_with_hidden->GetPrototype()
- ->ToObject(context2.local())
- .ToLocalChecked()
- ->GetPrototype())
- .FromJust());
-}
-
-
static void TestReceiver(Local<Value> expected_result,
Local<Value> expected_receiver,
const char* code) {
@@ -23511,8 +22824,8 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("JSON.stringify(other)");
CheckCorrectThrow("has_own_property(other, 'x')");
CheckCorrectThrow("%GetProperty(other, 'x')");
- CheckCorrectThrow("%SetKeyedProperty(other, 'x', 'foo', 0)");
- CheckCorrectThrow("%SetNamedProperty(other, 'y', 'foo', 1)");
+ CheckCorrectThrow("%SetKeyedProperty(other, 'x', 'foo')");
+ CheckCorrectThrow("%SetNamedProperty(other, 'y', 'foo')");
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kSloppy) == 0);
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kStrict) == 1);
CheckCorrectThrow("%DeleteProperty(other, 'x', 0)"); // 0 == SLOPPY
@@ -24178,7 +23491,6 @@ class ApiCallOptimizationChecker {
{
Local<v8::FunctionTemplate> parent_template =
FunctionTemplate::New(isolate);
- parent_template->SetHiddenPrototype(true);
Local<v8::FunctionTemplate> function_template
= FunctionTemplate::New(isolate);
function_template->Inherit(parent_template);
@@ -24207,7 +23519,6 @@ class ApiCallOptimizationChecker {
// Get the holder objects.
Local<Object> inner_global =
Local<Object>::Cast(context->Global()->GetPrototype());
- // Install functions on hidden prototype object if there is one.
data = Object::New(isolate);
Local<FunctionTemplate> function_template = FunctionTemplate::New(
isolate, OptimizationCallback, data, signature);
@@ -24486,47 +23797,6 @@ TEST(ChainSignatureCheck) {
}
-TEST(PrototypeSignatureCheck) {
- LocalContext context;
- auto isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
- auto global = context->Global();
- auto sig_obj = FunctionTemplate::New(isolate);
- sig_obj->SetHiddenPrototype(true);
- auto sig = v8::Signature::New(isolate, sig_obj);
- auto x = FunctionTemplate::New(isolate, Returns42, Local<Value>(), sig);
- global->Set(context.local(), v8_str("sig_obj"),
- sig_obj->GetFunction(context.local()).ToLocalChecked())
- .FromJust();
- global->Set(context.local(), v8_str("x"),
- x->GetFunction(context.local()).ToLocalChecked())
- .FromJust();
- CompileRun("s = {}; s.__proto__ = new sig_obj();");
- {
- TryCatch try_catch(isolate);
- CompileRun("x()");
- CHECK(try_catch.HasCaught());
- }
- {
- TryCatch try_catch(isolate);
- CompileRun("x.call(1)");
- CHECK(try_catch.HasCaught());
- }
- {
- TryCatch try_catch(isolate);
- auto result = CompileRun("s.x = x; s.x()");
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
- }
- {
- TryCatch try_catch(isolate);
- auto result = CompileRun("x.call(s)");
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
- }
-}
-
-
static const char* last_event_message;
static int last_event_status;
void StoringEventLoggerCallback(const char* message, int status) {
@@ -28308,15 +27578,10 @@ void AtomicsWaitCallbackForTesting(
}
}
-TEST(AtomicsWaitCallback) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- Local<Value> sab = CompileRun(
- "sab = new SharedArrayBuffer(12);"
- "int32arr = new Int32Array(sab, 4);"
- "sab");
+// Must be called from within HandleScope
+void AtomicsWaitCallbackCommon(v8::Isolate* isolate, Local<Value> sab,
+ size_t initial_offset,
+ size_t offset_multiplier) {
CHECK(sab->IsSharedArrayBuffer());
AtomicsWaitCallbackInfo info;
@@ -28326,52 +27591,52 @@ TEST(AtomicsWaitCallback) {
{
v8::TryCatch try_catch(isolate);
- info.expected_offset = 4;
+ info.expected_offset = initial_offset;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 0;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kTerminatedExecution;
info.action = AtomicsWaitCallbackAction::Interrupt;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 0, 0);");
+ CompileRun("wait(0, 0);");
CHECK_EQ(info.ncalls, 2);
CHECK(try_catch.HasTerminated());
}
{
v8::TryCatch try_catch(isolate);
- info.expected_offset = 8;
+ info.expected_offset = initial_offset + offset_multiplier;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 1;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kNotEqual;
info.action = AtomicsWaitCallbackAction::KeepWaiting;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 1, 1);"); // real value is 0 != 1
+ CompileRun("wait(1, 1);"); // real value is 0 != 1
CHECK_EQ(info.ncalls, 2);
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- info.expected_offset = 8;
+ info.expected_offset = initial_offset + offset_multiplier;
info.expected_timeout = 0.125;
info.expected_value = 0;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kTimedOut;
info.action = AtomicsWaitCallbackAction::KeepWaiting;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 1, 0, 0.125);"); // timeout
+ CompileRun("wait(1, 0, 0.125);"); // timeout
CHECK_EQ(info.ncalls, 2);
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- info.expected_offset = 8;
+ info.expected_offset = initial_offset + offset_multiplier;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 0;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kAPIStopped;
info.action = AtomicsWaitCallbackAction::StopAndThrowInFirstCall;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 1, 0);");
+ CompileRun("wait(1, 0);");
CHECK_EQ(info.ncalls, 1); // Only one extra call
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsInt32());
@@ -28380,13 +27645,13 @@ TEST(AtomicsWaitCallback) {
{
v8::TryCatch try_catch(isolate);
- info.expected_offset = 8;
+ info.expected_offset = initial_offset + offset_multiplier;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 0;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kAPIStopped;
info.action = AtomicsWaitCallbackAction::StopAndThrowInSecondCall;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 1, 0);");
+ CompileRun("wait(1, 0);");
CHECK_EQ(info.ncalls, 2);
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsInt32());
@@ -28396,15 +27661,15 @@ TEST(AtomicsWaitCallback) {
{
// Same test as before, but with a different `expected_value`.
v8::TryCatch try_catch(isolate);
- info.expected_offset = 8;
+ info.expected_offset = initial_offset + offset_multiplier;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 200;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kAPIStopped;
info.action = AtomicsWaitCallbackAction::StopAndThrowInSecondCall;
info.ncalls = 0;
CompileRun(
- "int32arr[1] = 200;"
- "Atomics.wait(int32arr, 1, 200);");
+ "setArrayElemAs(1, 200);"
+ "wait(1, 200);");
CHECK_EQ(info.ncalls, 2);
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsInt32());
@@ -28414,13 +27679,15 @@ TEST(AtomicsWaitCallback) {
{
// Wake the `Atomics.wait()` call from a thread.
v8::TryCatch try_catch(isolate);
- info.expected_offset = 4;
+ info.expected_offset = initial_offset;
info.expected_timeout = std::numeric_limits<double>::infinity();
info.expected_value = 0;
info.expected_event = v8::Isolate::AtomicsWaitEvent::kAPIStopped;
info.action = AtomicsWaitCallbackAction::StopFromThreadAndThrow;
info.ncalls = 0;
- CompileRun("Atomics.wait(int32arr, 0, 0);");
+ CompileRun(
+ "setArrayElemAs(1, 0);"
+ "wait(0, 0);");
CHECK_EQ(info.ncalls, 2);
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsInt32());
@@ -28428,6 +27695,105 @@ TEST(AtomicsWaitCallback) {
}
}
+TEST(AtomicsWaitCallback) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ const char* init = R"(
+ let sab = new SharedArrayBuffer(16);
+ let int32arr = new Int32Array(sab, 4);
+ let setArrayElemAs = function(id, val) {
+ int32arr[id] = val;
+ };
+ let wait = function(id, val, timeout) {
+ if(arguments.length == 2) return Atomics.wait(int32arr, id, val);
+ return Atomics.wait(int32arr, id, val, timeout);
+ };
+ sab;)";
+ AtomicsWaitCallbackCommon(isolate, CompileRun(init), 4, 4);
+}
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+TEST(WasmI32AtomicWaitCallback) {
+ FlagScope<bool> wasm_threads_flag(&i::FLAG_experimental_wasm_threads, true);
+ WasmRunner<int32_t, int32_t, int32_t, double> r(ExecutionTier::kOptimized);
+ r.builder().AddMemory(kWasmPageSize, SharedFlag::kShared);
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_ATOMICS_WAIT(kExprI32AtomicWait, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1),
+ WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(2)), 4));
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Handle<JSFunction> func = r.builder().WrapCode(0);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("func"), v8::Utils::ToLocal(func))
+ .FromJust());
+ Handle<JSArrayBuffer> memory(
+ r.builder().instance_object()->memory_object()->array_buffer(),
+ i_isolate);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("sab"), v8::Utils::ToLocal(memory))
+ .FromJust());
+
+ const char* init = R"(
+ let int32arr = new Int32Array(sab, 4);
+ let setArrayElemAs = function(id, val) {
+ int32arr[id] = val;
+ };
+ let wait = function(id, val, timeout) {
+ if(arguments.length === 2)
+ return func(id << 2, val, -1);
+ return func(id << 2, val, timeout*1000000);
+ };
+ sab;)";
+ AtomicsWaitCallbackCommon(isolate, CompileRun(init), 4, 4);
+}
+
+TEST(WasmI64AtomicWaitCallback) {
+ FlagScope<bool> wasm_threads_flag(&i::FLAG_experimental_wasm_threads, true);
+ WasmRunner<int32_t, int32_t, double, double> r(ExecutionTier::kOptimized);
+ r.builder().AddMemory(kWasmPageSize, SharedFlag::kShared);
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_ATOMICS_WAIT(kExprI64AtomicWait, WASM_GET_LOCAL(0),
+ WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(1)),
+ WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(2)), 8));
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Handle<JSFunction> func = r.builder().WrapCode(0);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("func"), v8::Utils::ToLocal(func))
+ .FromJust());
+ Handle<JSArrayBuffer> memory(
+ r.builder().instance_object()->memory_object()->array_buffer(),
+ i_isolate);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("sab"), v8::Utils::ToLocal(memory))
+ .FromJust());
+
+ const char* init = R"(
+ let int64arr = new BigInt64Array(sab, 8);
+ let setArrayElemAs = function(id, val) {
+ int64arr[id] = BigInt(val);
+ };
+ let wait = function(id, val, timeout) {
+ if(arguments.length === 2)
+ return func(id << 3, val, -1);
+ return func(id << 3, val, timeout*1000000);
+ };
+ sab;)";
+ AtomicsWaitCallbackCommon(isolate, CompileRun(init), 8, 8);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
TEST(BigIntAPI) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index d66027b5fd..f93163c985 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -167,7 +167,6 @@ TEST(3) {
T t;
Assembler assm(AssemblerOptions{});
- Label L, C;
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -236,7 +235,6 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
Assembler assm(AssemblerOptions{});
- Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(&assm, VFPv3);
@@ -1029,7 +1027,6 @@ TEST(13) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
Assembler assm(AssemblerOptions{});
- Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(&assm, VFPv3);
@@ -2935,7 +2932,6 @@ TEST(ARMv8_float32_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the floats.
Assembler assm(AssemblerOptions{});
- Label L, C;
if (CpuFeatures::IsSupported(ARMv8)) {
@@ -3037,7 +3033,6 @@ TEST(ARMv8_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
Assembler assm(AssemblerOptions{});
- Label L, C;
if (CpuFeatures::IsSupported(ARMv8)) {
@@ -4094,18 +4089,18 @@ namespace {
std::vector<Float32> Float32Inputs() {
std::vector<Float32> inputs;
FOR_FLOAT32_INPUTS(f) {
- inputs.push_back(Float32::FromBits(bit_cast<uint32_t>(*f)));
+ inputs.push_back(Float32::FromBits(bit_cast<uint32_t>(f)));
}
- FOR_UINT32_INPUTS(bits) { inputs.push_back(Float32::FromBits(*bits)); }
+ FOR_UINT32_INPUTS(bits) { inputs.push_back(Float32::FromBits(bits)); }
return inputs;
}
std::vector<Float64> Float64Inputs() {
std::vector<Float64> inputs;
FOR_FLOAT64_INPUTS(f) {
- inputs.push_back(Float64::FromBits(bit_cast<uint64_t>(*f)));
+ inputs.push_back(Float64::FromBits(bit_cast<uint64_t>(f)));
}
- FOR_UINT64_INPUTS(bits) { inputs.push_back(Float64::FromBits(*bits)); }
+ FOR_UINT64_INPUTS(bits) { inputs.push_back(Float64::FromBits(bits)); }
return inputs;
}
@@ -4187,6 +4182,81 @@ TEST(vneg_64) {
}
}
+TEST(move_pair) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ auto f = AssembleCode<F_piiii>([](MacroAssembler& assm) {
+ RegList used_callee_saved =
+ r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit();
+ __ stm(db_w, sp, used_callee_saved);
+
+ // Save output register bank pointer to r8.
+ __ mov(r8, r0);
+
+ __ mov(r0, Operand(0xabababab));
+ __ mov(r1, Operand(0xbabababa));
+ __ mov(r2, Operand(0x12341234));
+ __ mov(r3, Operand(0x43214321));
+
+ // No overlap:
+ // r4 <- r0
+ // r5 <- r1
+ __ MovePair(r4, r0, r5, r1);
+
+ // Overlap but we can swap moves:
+ // r2 <- r0
+ // r6 <- r2
+ __ MovePair(r2, r0, r6, r2);
+
+ // Overlap but can be done:
+ // r7 <- r3
+ // r3 <- r0
+ __ MovePair(r7, r3, r3, r0);
+
+ // Swap.
+ // r0 <- r1
+ // r1 <- r0
+ __ MovePair(r0, r1, r1, r0);
+
+ // Fill the fake register bank.
+ __ str(r0, MemOperand(r8, 0 * kPointerSize));
+ __ str(r1, MemOperand(r8, 1 * kPointerSize));
+ __ str(r2, MemOperand(r8, 2 * kPointerSize));
+ __ str(r3, MemOperand(r8, 3 * kPointerSize));
+ __ str(r4, MemOperand(r8, 4 * kPointerSize));
+ __ str(r5, MemOperand(r8, 5 * kPointerSize));
+ __ str(r6, MemOperand(r8, 6 * kPointerSize));
+ __ str(r7, MemOperand(r8, 7 * kPointerSize));
+
+ __ ldm(ia_w, sp, used_callee_saved);
+ });
+
+ // Create a fake register bank.
+ uint32_t r[] = {0, 0, 0, 0, 0, 0, 0, 0};
+ f.Call(r, 0, 0, 0, 0);
+
+ // r4 <- r0
+ // r5 <- r1
+ CHECK_EQ(0xabababab, r[4]);
+ CHECK_EQ(0xbabababa, r[5]);
+
+ // r2 <- r0
+ // r6 <- r2
+ CHECK_EQ(0xabababab, r[2]);
+ CHECK_EQ(0x12341234, r[6]);
+
+ // r7 <- r3
+ // r3 <- r0
+ CHECK_EQ(0x43214321, r[7]);
+ CHECK_EQ(0xabababab, r[3]);
+
+ // r0 and r1 should be swapped.
+ CHECK_EQ(0xbabababa, r[0]);
+ CHECK_EQ(0xabababab, r[1]);
+}
+
+
#undef __
} // namespace test_assembler_arm
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index a500c9cb51..44a54df80e 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -162,7 +162,10 @@ static void InitializeVM() {
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
- __ GetCode(masm.isolate(), nullptr);
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ }
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
@@ -198,11 +201,14 @@ static void InitializeVM() {
test_function(); \
}
-#define END() \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- __ GetCode(masm.isolate(), nullptr);
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ }
#endif // ifdef USE_SIMULATOR.
@@ -405,6 +411,60 @@ TEST(mov) {
CHECK_EQUAL_64(0x000000000001FFE0UL, x27);
}
+TEST(move_pair) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xabababab);
+ __ Mov(x1, 0xbabababa);
+ __ Mov(x2, 0x12341234);
+ __ Mov(x3, 0x43214321);
+
+ // No overlap:
+ // x4 <- x0
+ // x5 <- x1
+ __ MovePair(x4, x0, x5, x1);
+
+ // Overlap but we can swap moves:
+ // x2 <- x0
+ // x6 <- x2
+ __ MovePair(x2, x0, x6, x2);
+
+ // Overlap but can be done:
+ // x7 <- x3
+ // x3 <- x0
+ __ MovePair(x7, x3, x3, x0);
+
+ // Swap.
+ // x0 <- x1
+ // x1 <- x0
+ __ MovePair(x0, x1, x1, x0);
+
+ END();
+
+ RUN();
+
+ // x4 <- x0
+ // x5 <- x1
+ CHECK_EQUAL_64(0xabababab, x4);
+ CHECK_EQUAL_64(0xbabababa, x5);
+
+ // x2 <- x0
+ // x6 <- x2
+ CHECK_EQUAL_64(0xabababab, x2);
+ CHECK_EQUAL_64(0x12341234, x6);
+
+ // x7 <- x3
+ // x3 <- x0
+ CHECK_EQUAL_64(0x43214321, x7);
+ CHECK_EQUAL_64(0xabababab, x3);
+
+ // x0 and x1 should be swapped.
+ CHECK_EQUAL_64(0xbabababa, x0);
+ CHECK_EQUAL_64(0xabababab, x1);
+}
+
TEST(mov_imm_w) {
INIT_V8();
SETUP();
@@ -2160,7 +2220,7 @@ TEST(far_branch_veneer_broken_link_chain) {
START();
- Label skip, fail, done;
+ Label fail, done;
Label test_1, test_2, test_3;
Label far_target;
@@ -14525,7 +14585,7 @@ static void AbsHelperX(int64_t value) {
__ Mov(x1, value);
if (value != kXMinInt) {
- expected = labs(value);
+ expected = std::abs(value);
Label next;
// The result is representable.
@@ -14535,7 +14595,7 @@ static void AbsHelperX(int64_t value) {
__ Bind(&next);
__ Abs(x13, x1, nullptr, &done);
} else {
- // labs is undefined for kXMinInt but our implementation in the
+ // std::abs is undefined for kXMinInt but our implementation in the
// MacroAssembler will return kXMinInt in such a case.
expected = kXMinInt;
@@ -14715,7 +14775,7 @@ TEST(jump_tables_forward) {
Label base;
__ Adr(x10, &base);
- __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10, index, LSL, kSystemPointerSizeLog2));
__ Br(x11);
__ Bind(&base);
for (int i = 0; i < kNumCases; ++i) {
@@ -14783,7 +14843,7 @@ TEST(jump_tables_backward) {
Label base;
__ Adr(x10, &base);
- __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10, index, LSL, kSystemPointerSizeLog2));
__ Br(x11);
__ Bind(&base);
for (int i = 0; i < kNumCases; ++i) {
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 781dbfcc10..a9c0b60485 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -250,8 +250,8 @@ TEST(AssemblerIa326) {
Assembler assm(AssemblerOptions{},
ExternalAssemblerBuffer(buffer, sizeof buffer));
- __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
- __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kSystemPointerSize));
+ __ movsd(xmm1, Operand(esp, 3 * kSystemPointerSize));
__ addsd(xmm0, xmm1);
__ mulsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
@@ -522,8 +522,8 @@ TEST(AssemblerIa32SSE) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
- __ movss(xmm0, Operand(esp, kPointerSize));
- __ movss(xmm1, Operand(esp, 2 * kPointerSize));
+ __ movss(xmm0, Operand(esp, kSystemPointerSize));
+ __ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
__ shufps(xmm0, xmm0, 0x0);
__ shufps(xmm1, xmm1, 0x0);
__ movaps(xmm2, xmm1);
@@ -559,8 +559,8 @@ TEST(AssemblerIa32SSE3) {
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, SSE3);
- __ movss(xmm0, Operand(esp, kPointerSize));
- __ movss(xmm1, Operand(esp, 2 * kPointerSize));
+ __ movss(xmm0, Operand(esp, kSystemPointerSize));
+ __ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
__ shufps(xmm0, xmm0, 0x0);
__ shufps(xmm1, xmm1, 0x0);
__ haddps(xmm1, xmm0);
@@ -594,9 +594,9 @@ TEST(AssemblerX64FMA_sd) {
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
- __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
- __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
- __ movsd(xmm2, Operand(esp, 5 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kSystemPointerSize));
+ __ movsd(xmm1, Operand(esp, 3 * kSystemPointerSize));
+ __ movsd(xmm2, Operand(esp, 5 * kSystemPointerSize));
// argument in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
@@ -823,9 +823,9 @@ TEST(AssemblerX64FMA_ss) {
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
- __ movss(xmm0, Operand(esp, 1 * kPointerSize));
- __ movss(xmm1, Operand(esp, 2 * kPointerSize));
- __ movss(xmm2, Operand(esp, 3 * kPointerSize));
+ __ movss(xmm0, Operand(esp, 1 * kSystemPointerSize));
+ __ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
+ __ movss(xmm2, Operand(esp, 3 * kSystemPointerSize));
// arguments in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
@@ -1403,7 +1403,7 @@ TEST(AssemblerIa32JumpTables1) {
Label done, table;
__ mov(eax, Operand(esp, 4));
- __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ jmp(Operand::JumpTable(eax, times_system_pointer_size, &table));
__ ud2();
__ bind(&table);
for (int i = 0; i < kNumCases; ++i) {
@@ -1450,7 +1450,7 @@ TEST(AssemblerIa32JumpTables2) {
Label done, table;
__ mov(eax, Operand(esp, 4));
- __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ jmp(Operand::JumpTable(eax, times_system_pointer_size, &table));
__ ud2();
for (int i = 0; i < kNumCases; ++i) {
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 2f5b13f725..bf45427532 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -273,7 +273,6 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Double precision floating point instructions.
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -401,7 +400,6 @@ TEST(MIPS4) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
__ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
@@ -469,7 +467,6 @@ TEST(MIPS5) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Load all structure elements to registers.
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -537,7 +534,6 @@ TEST(MIPS6) {
T t;
Assembler assm(AssemblerOptions{});
- Label L, C;
// Basic word load/store.
__ lw(t0, MemOperand(a0, offsetof(T, ui)) );
@@ -825,7 +821,6 @@ TEST(MIPS10) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
@@ -4742,7 +4737,7 @@ uint32_t run_jic(int16_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label get_program_counter, stop_execution;
+ Label stop_execution;
__ push(ra);
__ li(v0, 0);
__ li(t1, 0x66);
@@ -5074,7 +5069,7 @@ uint32_t run_jialc(int16_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label main_block, get_program_counter;
+ Label main_block;
__ push(ra);
__ li(v0, 0);
__ beq(v0, v0, &main_block);
@@ -5291,7 +5286,7 @@ int32_t run_balc(int32_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label continue_1, stop_execution;
+ Label continue_1;
__ push(ra);
__ li(v0, 0);
__ li(t8, 0);
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index aa82b359f2..ec5e0e283e 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -283,7 +283,6 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Double precision floating point instructions.
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -406,7 +405,6 @@ TEST(MIPS4) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
__ Ldc1(f5, MemOperand(a0, offsetof(T, b)));
@@ -472,7 +470,6 @@ TEST(MIPS5) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Load all structure elements to registers.
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -540,7 +537,6 @@ TEST(MIPS6) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Basic word load/store.
__ Lw(a4, MemOperand(a0, offsetof(T, ui)));
@@ -828,7 +824,6 @@ TEST(MIPS10) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
if (kArchVariant == kMips64r2) {
// Rewritten for FR=1 FPU mode:
@@ -1386,7 +1381,6 @@ TEST(MIPS16) {
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label L, C;
// Basic 32-bit word load/store, with un-signed data.
__ Lw(a4, MemOperand(a0, offsetof(T, ui)));
@@ -5358,7 +5352,7 @@ uint64_t run_jic(int16_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label get_program_counter, stop_execution;
+ Label stop_execution;
__ push(ra);
__ li(v0, 0l);
__ li(t1, 0x66);
@@ -5690,7 +5684,7 @@ uint64_t run_jialc(int16_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label main_block, get_program_counter;
+ Label main_block;
__ push(ra);
__ li(v0, 0l);
__ beq(v0, v0, &main_block);
@@ -5991,7 +5985,7 @@ int64_t run_balc(int32_t offset) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- Label continue_1, stop_execution;
+ Label continue_1;
__ push(ra);
__ li(v0, 0l);
__ li(t8, 0l);
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 196a3d91df..1f7a9e0eec 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -169,7 +169,6 @@ TEST(3) {
T t;
Assembler assm(AssemblerOptions{});
- Label L, C;
// build a frame
#if V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index 5dbe81cb62..fa55e40af6 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -24,13 +24,20 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
CodeDesc code_desc;
code_desc.buffer = buffer;
code_desc.buffer_size = buffer_size;
- code_desc.constant_pool_size = 0;
code_desc.instr_size = buffer_size;
+ code_desc.safepoint_table_offset = buffer_size;
+ code_desc.safepoint_table_size = 0;
+ code_desc.handler_table_offset = buffer_size;
+ code_desc.handler_table_size = 0;
+ code_desc.constant_pool_offset = buffer_size;
+ code_desc.constant_pool_size = 0;
+ code_desc.code_comments_offset = buffer_size;
+ code_desc.code_comments_size = 0;
+ code_desc.reloc_offset = buffer_size;
code_desc.reloc_size = 0;
- code_desc.origin = nullptr;
code_desc.unwinding_info = nullptr;
code_desc.unwinding_info_size = 0;
- code_desc.code_comments_size = 0;
+ code_desc.origin = nullptr;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
code_desc, Code::STUB, Handle<Object>::null());
@@ -63,13 +70,20 @@ TEST(CodeLayoutWithUnwindingInfo) {
CodeDesc code_desc;
code_desc.buffer = buffer;
code_desc.buffer_size = buffer_size;
- code_desc.constant_pool_size = 0;
code_desc.instr_size = buffer_size;
+ code_desc.safepoint_table_offset = buffer_size;
+ code_desc.safepoint_table_size = 0;
+ code_desc.handler_table_offset = buffer_size;
+ code_desc.handler_table_size = 0;
+ code_desc.constant_pool_offset = buffer_size;
+ code_desc.constant_pool_size = 0;
+ code_desc.code_comments_offset = buffer_size;
+ code_desc.code_comments_size = 0;
+ code_desc.reloc_offset = buffer_size;
code_desc.reloc_size = 0;
- code_desc.origin = nullptr;
code_desc.unwinding_info = unwinding_info;
code_desc.unwinding_info_size = unwinding_info_size;
- code_desc.code_comments_size = 0;
+ code_desc.origin = nullptr;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
code_desc, Code::STUB, Handle<Object>::null());
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 0c22f4503b..e7f592d1c5 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -14,6 +14,7 @@
#include "src/code-stub-assembler.h"
#include "src/compiler/node.h"
#include "src/debug/debug.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -21,9 +22,11 @@
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
+#include "src/transitions-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -348,7 +351,7 @@ TEST(ComputeIntegerHash) {
Handle<Smi> key(Smi::FromInt(k), isolate);
Handle<Object> result = ft.Call(key).ToHandleChecked();
- uint32_t hash = ComputeSeededHash(k, isolate->heap()->HashSeed());
+ uint32_t hash = ComputeSeededHash(k, HashSeed(isolate));
Smi expected = Smi::FromInt(hash);
CHECK_EQ(expected, Smi::cast(*result));
}
@@ -1708,14 +1711,13 @@ TEST(AllocateNameDictionary) {
{
for (int i = 0; i < 256; i = i * 1.1 + 1) {
- Handle<Object> result =
- ft.Call(handle(Smi::FromInt(i), isolate)).ToHandleChecked();
+ Handle<HeapObject> result = Handle<HeapObject>::cast(
+ ft.Call(handle(Smi::FromInt(i), isolate)).ToHandleChecked());
Handle<NameDictionary> dict = NameDictionary::New(isolate, i);
// Both dictionaries should be memory equal.
- int size =
- FixedArrayBase::kHeaderSize + (dict->length() - 1) * kPointerSize;
- CHECK_EQ(0, memcmp(reinterpret_cast<void*>(dict->ptr()),
- reinterpret_cast<void*>(result->ptr()), size));
+ int size = dict->Size();
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(dict->address()),
+ reinterpret_cast<void*>(result->address()), size));
}
}
}
@@ -2089,10 +2091,10 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
Handle<JSArray> array = isolate->factory()->NewJSArray(
kind_, 2, initial_size, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
Object::SetElement(isolate, array, 0, Handle<Smi>(Smi::FromInt(1), isolate),
- LanguageMode::kSloppy)
+ kDontThrow)
.Check();
Object::SetElement(isolate, array, 1, Handle<Smi>(Smi::FromInt(2), isolate),
- LanguageMode::kSloppy)
+ kDontThrow)
.Check();
CodeStubArguments args(this, IntPtrConstant(kNumParams));
TVariable<IntPtrT> arg_index(this);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index f05056a2de..17f7a7d851 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -35,8 +35,10 @@
#include "src/compiler.h"
#include "src/disasm.h"
#include "src/heap/factory.h"
+#include "src/heap/spaces.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
+#include "src/objects/allocation-site-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -55,7 +57,7 @@ static void SetGlobalProperty(const char* name, Object value) {
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object(), isolate);
Runtime::SetObjectProperty(isolate, global, internalized_name, object,
- LanguageMode::kSloppy, StoreOrigin::kMaybeKeyed)
+ StoreOrigin::kMaybeKeyed, Just(kDontThrow))
.Check();
}
@@ -791,6 +793,30 @@ TEST(InvocationCount) {
CHECK_EQ(4, foo->feedback_vector()->invocation_count());
}
+TEST(SafeToSkipArgumentsAdaptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun(
+ "function a() { \"use strict\"; }; a();"
+ "function b() { }; b();"
+ "function c() { \"use strict\"; return arguments; }; c();"
+ "function d(...args) { return args; }; d();"
+ "function e() { \"use strict\"; return eval(\"\"); }; e();"
+ "function f(x, y) { \"use strict\"; return x + y; }; f(1, 2);");
+ Handle<JSFunction> a = Handle<JSFunction>::cast(GetGlobalProperty("a"));
+ CHECK(a->shared()->is_safe_to_skip_arguments_adaptor());
+ Handle<JSFunction> b = Handle<JSFunction>::cast(GetGlobalProperty("b"));
+ CHECK(!b->shared()->is_safe_to_skip_arguments_adaptor());
+ Handle<JSFunction> c = Handle<JSFunction>::cast(GetGlobalProperty("c"));
+ CHECK(!c->shared()->is_safe_to_skip_arguments_adaptor());
+ Handle<JSFunction> d = Handle<JSFunction>::cast(GetGlobalProperty("d"));
+ CHECK(!d->shared()->is_safe_to_skip_arguments_adaptor());
+ Handle<JSFunction> e = Handle<JSFunction>::cast(GetGlobalProperty("e"));
+ CHECK(!e->shared()->is_safe_to_skip_arguments_adaptor());
+ Handle<JSFunction> f = Handle<JSFunction>::cast(GetGlobalProperty("f"));
+ CHECK(f->shared()->is_safe_to_skip_arguments_adaptor());
+}
+
TEST(ShallowEagerCompilation) {
i::FLAG_always_opt = false;
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 3a3063ed3c..e26838d0a8 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -41,6 +41,7 @@
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
+#include "src/profiler/tracing-cpu-profiler.h"
#include "src/source-position-table.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -2405,11 +2406,16 @@ class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
profile_id_ = trace_event->id();
v8::ConvertableToTraceFormat* arg =
trace_event->arg_convertables()[0].get();
+ result_json_ += result_json_.empty() ? "[" : ",\n";
arg->AppendAsTraceFormat(&result_json_);
}
- void Flush() override {}
+ void Flush() override { result_json_ += "]"; }
- std::string result_json() const { return result_json_; }
+ const std::string& result_json() const { return result_json_; }
+ void Reset() {
+ result_json_.clear();
+ profile_id_ = 0;
+ }
private:
std::string result_json_;
@@ -2419,50 +2425,61 @@ class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
} // namespace
TEST(TracingCpuProfiler) {
- v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- std::unique_ptr<v8::Platform> default_platform =
- v8::platform::NewDefaultPlatform();
- i::V8::SetPlatformForTesting(default_platform.get());
-
- auto tracing = base::make_unique<v8::platform::tracing::TracingController>();
- v8::platform::tracing::TracingController* tracing_controller = tracing.get();
- static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
- ->SetTracingController(std::move(tracing));
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
+ v8::Context::Scope context_scope(env);
CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
+ auto* tracing_controller =
+ static_cast<v8::platform::tracing::TracingController*>(
+ i::V8::GetCurrentPlatform()->GetTracingController());
tracing_controller->Initialize(ring_buffer);
- TraceConfig* trace_config = new TraceConfig();
- trace_config->AddIncludedCategory(
- TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- {
+ bool result = false;
+ for (int run_duration = 50; !result; run_duration += 50) {
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
+ trace_config->AddIncludedCategory(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"));
+
+ std::string test_code = R"(
+ function foo() {
+ let s = 0;
+ const endTime = Date.now() + )" +
+ std::to_string(run_duration) + R"(
+ while (Date.now() < endTime) s += Math.cos(s);
+ return s;
+ }
+ foo();)";
+
tracing_controller->StartTracing(trace_config);
- CompileRun("function foo() { } foo();");
+ CompileRun(test_code.c_str());
tracing_controller->StopTracing();
- CompileRun("function bar() { } bar();");
- }
- const char* profile_checker =
- "function checkProfile(profile) {\n"
- " if (typeof profile['startTime'] !== 'number') return 'startTime';\n"
- " return '';\n"
- "}\n"
- "checkProfile(";
- std::string profile_json = event_checker->result_json();
- CHECK_LT(0u, profile_json.length());
- printf("Profile JSON: %s\n", profile_json.c_str());
- std::string code = profile_checker + profile_json + ")";
- v8::Local<v8::Value> result =
- CompileRunChecked(CcTest::isolate(), code.c_str());
- v8::String::Utf8Value value(CcTest::isolate(), result);
- printf("Check result: %*s\n", value.length(), *value);
- CHECK_EQ(0, value.length());
+ std::string profile_json = event_checker->result_json();
+ event_checker->Reset();
+ CHECK_LT(0u, profile_json.length());
+ printf("Profile JSON: %s\n", profile_json.c_str());
+
+ std::string profile_checker_code = R"(
+ function checkProfile(json) {
+ const profile_header = json[0];
+ if (typeof profile_header['startTime'] !== 'number')
+ return false;
+ return json.some(event => (event.lines || []).some(line => line));
+ }
+ checkProfile()" + profile_json +
+ ")";
+ result = CompileRunChecked(CcTest::isolate(), profile_checker_code.c_str())
+ ->IsTrue();
+ }
- i::V8::SetPlatformForTesting(old_platform);
+ static_cast<v8::platform::tracing::TracingController*>(
+ i::V8::GetCurrentPlatform()->GetTracingController())
+ ->Initialize(nullptr);
}
TEST(Issue763073) {
@@ -2688,6 +2705,54 @@ TEST(MultipleProfilers) {
profiler2->StopProfiling("2");
}
+// Tests that logged CodeCreateEvent calls do not crash a reused CpuProfiler.
+// crbug.com/929928
+TEST(CrashReusedProfiler) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(isolate));
+ profiler->StartProfiling("1");
+ profiler->StopProfiling("1");
+
+ profiler->StartProfiling("2");
+ CreateCode(&env);
+ profiler->StopProfiling("2");
+}
+
+// Tests that samples from different profilers on the same isolate do not leak
+// samples to each other. See crbug.com/v8/8835.
+TEST(MultipleProfilersSampleIndependently) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ // Create two profilers- one slow ticking one, and one fast ticking one.
+ // Ensure that the slow ticking profiler does not receive samples from the
+ // fast ticking one.
+ std::unique_ptr<CpuProfiler> slow_profiler(
+ new CpuProfiler(CcTest::i_isolate()));
+ slow_profiler->set_sampling_interval(base::TimeDelta::FromSeconds(1));
+ slow_profiler->StartProfiling("1", true);
+
+ CompileRun(R"(
+ function start() {
+ let val = 1;
+ for (let i = 0; i < 10e3; i++) {
+ val = (val * 2) % 3;
+ }
+ return val;
+ }
+ )");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0, true);
+
+ auto slow_profile = slow_profiler->StopProfiling("1");
+ CHECK_GT(profile->GetSamplesCount(), slow_profile->samples_count());
+}
+
void ProfileSomeCode(v8::Isolate* isolate) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
@@ -2740,19 +2805,45 @@ TEST(MultipleIsolates) {
thread2.Join();
}
-int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source) {
+// Tests that StopProfiling doesn't wait for the next sample tick in order to
+// stop, but rather exits early before a given wait threshold.
+TEST(FastStopProfiling) {
+ static const base::TimeDelta kLongInterval = base::TimeDelta::FromSeconds(10);
+ static const base::TimeDelta kWaitThreshold = base::TimeDelta::FromSeconds(5);
+
+ std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
+ profiler->set_sampling_interval(kLongInterval);
+ profiler->StartProfiling("", true);
+
+ v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
+ double start = platform->CurrentClockTimeMillis();
+ profiler->StopProfiling("");
+ double duration = platform->CurrentClockTimeMillis() - start;
+
+ CHECK_LT(duration, kWaitThreshold.InMillisecondsF());
+}
+
+enum class EntryCountMode { kAll, kOnlyInlined };
+
+// Count the number of unique source positions.
+int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source,
+ EntryCountMode mode = EntryCountMode::kAll) {
+ std::unordered_set<int64_t> raw_position_set;
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*CompileRun(source)));
if (function->IsInterpreted()) return -1;
i::Handle<i::Code> code(function->code(), isolate);
i::SourcePositionTableIterator iterator(
ByteArray::cast(code->source_position_table()));
- int count = 0;
+
while (!iterator.done()) {
- count++;
+ if (mode == EntryCountMode::kAll ||
+ iterator.source_position().isInlined()) {
+ raw_position_set.insert(iterator.source_position().raw());
+ }
iterator.Advance();
}
- return count;
+ return static_cast<int>(raw_position_set.size());
}
UNINITIALIZED_TEST(DetailedSourcePositionAPI) {
@@ -2795,6 +2886,68 @@ UNINITIALIZED_TEST(DetailedSourcePositionAPI) {
isolate->Dispose();
}
+UNINITIALIZED_TEST(DetailedSourcePositionAPI_Inlining) {
+ i::FLAG_detailed_line_info = false;
+ i::FLAG_turbo_inlining = true;
+ i::FLAG_stress_inline = true;
+ i::FLAG_always_opt = false;
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ const char* source = R"(
+ function foo(x) {
+ return bar(x) + 1;
+ }
+
+ function bar(x) {
+ var y = 1;
+ for (var i = 0; i < x; ++i) {
+ y = y * x;
+ }
+ return x;
+ }
+
+ foo(5);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5);
+ foo;
+ )";
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ CHECK(!i_isolate->NeedsDetailedOptimizedCodeLineInfo());
+
+ int non_detailed_positions =
+ GetSourcePositionEntryCount(i_isolate, source, EntryCountMode::kAll);
+ int non_detailed_inlined_positions = GetSourcePositionEntryCount(
+ i_isolate, source, EntryCountMode::kOnlyInlined);
+
+ v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
+ CHECK(i_isolate->NeedsDetailedOptimizedCodeLineInfo());
+
+ int detailed_positions =
+ GetSourcePositionEntryCount(i_isolate, source, EntryCountMode::kAll);
+ int detailed_inlined_positions = GetSourcePositionEntryCount(
+ i_isolate, source, EntryCountMode::kOnlyInlined);
+
+ if (non_detailed_positions == -1) {
+ CHECK_EQ(non_detailed_positions, detailed_positions);
+ } else {
+ CHECK_LT(non_detailed_positions, detailed_positions);
+ CHECK_LT(non_detailed_inlined_positions, detailed_inlined_positions);
+ }
+ }
+
+ isolate->Dispose();
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index bc9a11a9f1..c6a163f554 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -2949,8 +2949,8 @@ TEST(NoBreakWhenBootstrapping) {
{
// Create a context with an extension to make sure that some JavaScript
// code is executed during bootstrapping.
- v8::RegisterExtension(new v8::Extension("simpletest",
- kSimpleExtensionSource));
+ v8::RegisterExtension(v8::base::make_unique<v8::Extension>(
+ "simpletest", kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::HandleScope handle_scope(isolate);
@@ -4022,7 +4022,7 @@ UNINITIALIZED_TEST(DebugSetOutOfMemoryListener) {
CHECK(!near_heap_limit_callback_called);
// The following allocation fails unless the out-of-memory callback
// increases the heap limit.
- int length = 10 * i::MB / i::kPointerSize;
+ int length = 10 * i::MB / i::kTaggedSize;
i_isolate->factory()->NewFixedArray(length, i::TENURED);
CHECK(near_heap_limit_callback_called);
isolate->RemoveNearHeapLimitCallback(NearHeapLimitCallback, 0);
@@ -4037,7 +4037,8 @@ TEST(DebugCoverage) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::debug::Coverage::SelectMode(isolate, v8::debug::Coverage::kPreciseCount);
+ v8::debug::Coverage::SelectMode(isolate,
+ v8::debug::CoverageMode::kPreciseCount);
v8::Local<v8::String> source = v8_str(
"function f() {\n"
"}\n"
@@ -4092,7 +4093,8 @@ TEST(DebugCoverageWithCoverageOutOfScope) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::debug::Coverage::SelectMode(isolate, v8::debug::Coverage::kPreciseCount);
+ v8::debug::Coverage::SelectMode(isolate,
+ v8::debug::CoverageMode::kPreciseCount);
v8::Local<v8::String> source = v8_str(
"function f() {\n"
"}\n"
@@ -4163,7 +4165,8 @@ TEST(DebugCoverageWithScriptDataOutOfScope) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::debug::Coverage::SelectMode(isolate, v8::debug::Coverage::kPreciseCount);
+ v8::debug::Coverage::SelectMode(isolate,
+ v8::debug::CoverageMode::kPreciseCount);
v8::Local<v8::String> source = v8_str(
"function f() {\n"
"}\n"
@@ -4500,3 +4503,99 @@ TEST(Regress517592) {
CHECK_EQ(delegate.break_count(), 1);
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
}
+
+TEST(GetPrivateFields) {
+ LocalContext env;
+ v8::Isolate* v8_isolate = CcTest::isolate();
+ v8::internal::Isolate* isolate = CcTest::i_isolate();
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Context> context = env.local();
+ v8::internal::FLAG_harmony_class_fields = true;
+ v8::internal::FLAG_harmony_private_fields = true;
+ v8::Local<v8::String> source = v8_str(
+ "var X = class {\n"
+ " #foo = 1;\n"
+ " #bar = function() {};\n"
+ "}\n"
+ "var x = new X()");
+ CompileRun(source);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "x"))
+ .ToLocalChecked());
+ v8::Local<v8::Array> private_names =
+ v8::debug::GetPrivateFields(context, object).ToLocalChecked();
+
+ for (int i = 0; i < 4; i = i + 2) {
+ Handle<v8::internal::JSReceiver> private_name =
+ v8::Utils::OpenHandle(*private_names->Get(context, i)
+ .ToLocalChecked()
+ ->ToObject(context)
+ .ToLocalChecked());
+ Handle<v8::internal::JSValue> private_value =
+ Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::Symbol> priv_symbol(
+ v8::internal::Symbol::cast(private_value->value()), isolate);
+ CHECK(priv_symbol->is_private_name());
+ }
+
+ source = v8_str(
+ "var Y = class {\n"
+ " #baz = 2;\n"
+ "}\n"
+ "var X = class extends Y{\n"
+ " #foo = 1;\n"
+ " #bar = function() {};\n"
+ "}\n"
+ "var x = new X()");
+ CompileRun(source);
+ object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "x"))
+ .ToLocalChecked());
+ private_names = v8::debug::GetPrivateFields(context, object).ToLocalChecked();
+
+ for (int i = 0; i < 6; i = i + 2) {
+ Handle<v8::internal::JSReceiver> private_name =
+ v8::Utils::OpenHandle(*private_names->Get(context, i)
+ .ToLocalChecked()
+ ->ToObject(context)
+ .ToLocalChecked());
+ Handle<v8::internal::JSValue> private_value =
+ Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::Symbol> priv_symbol(
+ v8::internal::Symbol::cast(private_value->value()), isolate);
+ CHECK(priv_symbol->is_private_name());
+ }
+
+ source = v8_str(
+ "var Y = class {\n"
+ " constructor() {"
+ " return new Proxy({}, {});"
+ " }"
+ "}\n"
+ "var X = class extends Y{\n"
+ " #foo = 1;\n"
+ " #bar = function() {};\n"
+ "}\n"
+ "var x = new X()");
+ CompileRun(source);
+ object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "x"))
+ .ToLocalChecked());
+ private_names = v8::debug::GetPrivateFields(context, object).ToLocalChecked();
+
+ for (int i = 0; i < 4; i = i + 2) {
+ Handle<v8::internal::JSReceiver> private_name =
+ v8::Utils::OpenHandle(*private_names->Get(context, i)
+ .ToLocalChecked()
+ ->ToObject(context)
+ .ToLocalChecked());
+ Handle<v8::internal::JSValue> private_value =
+ Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::Symbol> priv_symbol(
+ v8::internal::Symbol::cast(private_value->value()), isolate);
+ CHECK(priv_symbol->is_private_name());
+ }
+}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index cce41a3738..2e9bc90fac 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -427,61 +427,6 @@ TEST(AbsentInPrototype) {
-class ExistsInHiddenPrototypeContext: public DeclarationContext {
- public:
- ExistsInHiddenPrototypeContext() {
- hidden_proto_ = FunctionTemplate::New(CcTest::isolate());
- hidden_proto_->SetHiddenPrototype(true);
- }
-
- protected:
- v8::Local<Integer> Query(Local<Name> key) override {
- // Let it seem that the property exists in the hidden prototype object.
- return Integer::New(isolate(), v8::None);
- }
-
- // Install the hidden prototype after the global object has been created.
- void PostInitializeContext(Local<Context> context) override {
- Local<Object> global_object = context->Global();
- Local<Object> hidden_proto = hidden_proto_->GetFunction(context)
- .ToLocalChecked()
- ->NewInstance(context)
- .ToLocalChecked();
- Local<Object> inner_global =
- Local<Object>::Cast(global_object->GetPrototype());
- inner_global->SetPrototype(context, hidden_proto).FromJust();
- }
-
- // Use the hidden prototype as the holder for the interceptors.
- Local<ObjectTemplate> GetHolder(Local<FunctionTemplate> function) override {
- return hidden_proto_->InstanceTemplate();
- }
-
- private:
- Local<FunctionTemplate> hidden_proto_;
-};
-
-
-TEST(ExistsInHiddenPrototype) {
- HandleScope scope(CcTest::isolate());
-
- { ExistsInHiddenPrototypeContext context;
- context.Check("var x; x", 0, 0, 0, EXPECT_RESULT,
- Undefined(CcTest::isolate()));
- }
-
- { ExistsInHiddenPrototypeContext context;
- context.Check("var x = 0; x", 0, 0, 0, EXPECT_RESULT,
- Number::New(CcTest::isolate(), 0));
- }
-
- { ExistsInHiddenPrototypeContext context;
- context.Check("function x() { }; x", 0, 1, 1, EXPECT_RESULT);
- }
-}
-
-
-
class SimpleContext {
public:
SimpleContext()
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index feeaeb4214..a06c18df02 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -36,6 +36,7 @@
#include "src/heap/spaces.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/roots.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
@@ -218,7 +219,7 @@ TEST(HashTableRehash) {
for (int i = 0; i < capacity - 1; i++) {
t->insert(i, i * i, i);
}
- t->Rehash(isolate);
+ t->Rehash(ReadOnlyRoots(isolate));
for (int i = 0; i < capacity - 1; i++) {
CHECK_EQ(i, t->lookup(i * i));
}
@@ -231,7 +232,7 @@ TEST(HashTableRehash) {
for (int i = 0; i < capacity / 2; i++) {
t->insert(i, i * i, i);
}
- t->Rehash(isolate);
+ t->Rehash(ReadOnlyRoots(isolate));
for (int i = 0; i < capacity / 2; i++) {
CHECK_EQ(i, t->lookup(i * i));
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 20ccf77fd0..a401e031f9 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -74,7 +74,10 @@ namespace internal {
#define COMPARE(ASM, EXP) \
assm->Reset(); \
assm->ASM; \
- assm->GetCode(isolate, nullptr); \
+ { \
+ CodeDesc desc; \
+ assm->GetCode(isolate, &desc); \
+ } \
decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
encoding = *reinterpret_cast<uint32_t*>(buf); \
if (strcmp(disasm->GetOutput(), EXP) != 0) { \
@@ -86,7 +89,10 @@ namespace internal {
#define COMPARE_PREFIX(ASM, EXP) \
assm->Reset(); \
assm->ASM; \
- assm->GetCode(isolate, nullptr); \
+ { \
+ CodeDesc desc; \
+ assm->GetCode(isolate, &desc); \
+ } \
decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
encoding = *reinterpret_cast<uint32_t*>(buf); \
if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 085fd4be7c..15eef72a6b 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -205,8 +205,7 @@ TEST(DisasmX64) {
__ incq(Operand(rbx, rcx, times_4, 10000));
__ pushq(Operand(rbx, rcx, times_4, 10000));
__ popq(Operand(rbx, rcx, times_4, 10000));
- // TODO(mstarzinger): The following is protected.
- // __ jmp(Operand(rbx, rcx, times_4, 10000));
+ __ jmp(Operand(rbx, rcx, times_4, 10000));
__ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
__ orq(rdx, Immediate(12345));
@@ -291,8 +290,7 @@ TEST(DisasmX64) {
__ nop();
__ jmp(&L1);
- // TODO(mstarzinger): The following is protected.
- // __ jmp(Operand(rbx, rcx, times_4, 10000));
+ __ jmp(Operand(rbx, rcx, times_4, 10000));
__ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index ca382a60c1..d7f6ccb852 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -113,7 +113,8 @@ TEST(JSObjectInObjectAddingProperties) {
factory->NewFunctionForTest(factory->empty_string());
int nof_inobject_properties = 10;
// force in object properties by changing the expected_nof_properties
- function->shared()->set_expected_nof_properties(nof_inobject_properties);
+ // (we always reserve 8 inobject properties slack on top).
+ function->shared()->set_expected_nof_properties(nof_inobject_properties - 8);
Handle<Object> value(Smi::FromInt(42), isolate);
Handle<JSObject> object = factory->NewJSObject(function);
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
index a282f4bccd..abb77b5b6b 100644
--- a/deps/v8/test/cctest/test-factory.cc
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -4,6 +4,7 @@
#include "include/v8.h"
+#include "src/code-desc.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index c241ac6b7d..c8ffddbf7b 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -176,14 +176,14 @@ TEST(VectorCallICStates) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
CompileRun("f(function() { return 16; })");
- CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.ic_state());
// After a collection, state should remain GENERIC.
CcTest::CollectAllGarbage();
- CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.ic_state());
}
TEST(VectorCallFeedback) {
@@ -206,14 +206,14 @@ TEST(VectorCallFeedback) {
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
HeapObject heap_object;
CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*foo, heap_object);
CcTest::CollectAllGarbage();
// It should stay monomorphic even after a GC.
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
}
TEST(VectorCallFeedbackForArray) {
@@ -233,14 +233,14 @@ TEST(VectorCallFeedbackForArray) {
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
HeapObject heap_object;
CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*isolate->array_function(), heap_object);
CcTest::CollectAllGarbage();
// It should stay monomorphic even after a GC.
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
}
size_t GetFeedbackVectorLength(Isolate* isolate, const char* src,
@@ -326,15 +326,15 @@ TEST(VectorCallCounts) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
CompileRun("f(foo); f(foo);");
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
CHECK_EQ(3, nexus.GetCallCount());
// Send the IC megamorphic, but we should still have incrementing counts.
CompileRun("f(function() { return 12; });");
- CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.ic_state());
CHECK_EQ(4, nexus.GetCallCount());
}
@@ -357,17 +357,17 @@ TEST(VectorConstructCounts) {
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
CHECK(feedback_vector->Get(slot)->IsWeak());
CompileRun("f(Foo); f(Foo);");
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
CHECK_EQ(3, nexus.GetCallCount());
// Send the IC megamorphic, but we should still have incrementing counts.
CompileRun("f(function() {});");
- CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.ic_state());
CHECK_EQ(4, nexus.GetCallCount());
}
@@ -424,40 +424,40 @@ TEST(VectorLoadICStates) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(PREMONOMORPHIC, nexus.ic_state());
CompileRun("f(o)");
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
// Verify that the monomorphic map is the one we expect.
v8::MaybeLocal<v8::Value> v8_o =
CcTest::global()->Get(context.local(), v8_str("o"));
Handle<JSObject> o =
Handle<JSObject>::cast(v8::Utils::OpenHandle(*v8_o.ToLocalChecked()));
- CHECK_EQ(o->map(), nexus.FindFirstMap());
+ CHECK_EQ(o->map(), nexus.GetFirstMap());
// Now go polymorphic.
CompileRun("f({ blarg: 3, foo: 2 })");
- CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
CompileRun(
"delete o.foo;"
"f(o)");
- CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
CompileRun("f({ blarg: 3, torino: 10, foo: 2 })");
- CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
MapHandles maps;
nexus.ExtractMaps(&maps);
CHECK_EQ(4, maps.size());
// Finally driven megamorphic.
CompileRun("f({ blarg: 3, gran: 3, torino: 10, foo: 2 })");
- CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
- CHECK(nexus.FindFirstMap().is_null());
+ CHECK_EQ(MEGAMORPHIC, nexus.ic_state());
+ CHECK(nexus.GetFirstMap().is_null());
// After a collection, state should not be reset to PREMONOMORPHIC.
CcTest::CollectAllGarbage();
- CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MEGAMORPHIC, nexus.ic_state());
}
TEST(VectorLoadGlobalICSlotSharing) {
@@ -490,10 +490,8 @@ TEST(VectorLoadGlobalICSlotSharing) {
CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kLoadGlobalInsideTypeof);
FeedbackSlot slot1 = helper.slot(0);
FeedbackSlot slot2 = helper.slot(1);
- CHECK_EQ(MONOMORPHIC,
- FeedbackNexus(feedback_vector, slot1).StateFromFeedback());
- CHECK_EQ(MONOMORPHIC,
- FeedbackNexus(feedback_vector, slot2).StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, FeedbackNexus(feedback_vector, slot1).ic_state());
+ CHECK_EQ(MONOMORPHIC, FeedbackNexus(feedback_vector, slot2).ic_state());
}
@@ -517,17 +515,17 @@ TEST(VectorLoadICOnSmi) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(PREMONOMORPHIC, nexus.ic_state());
CompileRun("f(34)");
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
// Verify that the monomorphic map is the one we expect.
Map number_map = ReadOnlyRoots(heap).heap_number_map();
- CHECK_EQ(number_map, nexus.FindFirstMap());
+ CHECK_EQ(number_map, nexus.GetFirstMap());
// Now go polymorphic on o.
CompileRun("f(o)");
- CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
MapHandles maps;
nexus.ExtractMaps(&maps);
@@ -550,7 +548,7 @@ TEST(VectorLoadICOnSmi) {
// The degree of polymorphism doesn't change.
CompileRun("f(100)");
- CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
MapHandles maps2;
nexus.ExtractMaps(&maps2);
CHECK_EQ(2, maps2.size());
@@ -720,7 +718,7 @@ TEST(VectorStoreICBasic) {
CHECK_EQ(1, helper.slot_count());
FeedbackSlot slot(0);
FeedbackNexus nexus(feedback_vector, slot);
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
}
TEST(StoreOwnIC) {
@@ -746,7 +744,7 @@ TEST(StoreOwnIC) {
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLiteral);
CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreOwnNamed);
FeedbackNexus nexus(feedback_vector, helper.slot(1));
- CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
}
} // namespace
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index dca13242ba..49b0f92011 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -124,18 +124,10 @@ class Expectations {
kinds_[index] = kind;
locations_[index] = location;
if (kind == kData && location == kField &&
- IsTransitionableFastElementsKind(elements_kind_) &&
- Map::IsInplaceGeneralizableField(constness, representation,
- FieldType::cast(*value))) {
- // Maps with transitionable elements kinds must have non in-place
- // generalizable fields.
- if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
- constness == PropertyConstness::kConst) {
- constness = PropertyConstness::kMutable;
- }
- if (representation.IsHeapObject() && !FieldType::cast(*value)->IsAny()) {
- value = FieldType::Any(isolate_);
- }
+ IsTransitionableFastElementsKind(elements_kind_)) {
+ // Maps with transitionable elements kinds must have the most general
+ // field type.
+ value = FieldType::Any(isolate_);
}
constnesses_[index] = constness;
attributes_[index] = attributes;
@@ -278,6 +270,7 @@ class Expectations {
if (details.attributes() != expected_attributes) return false;
Representation expected_representation = representations_[descriptor];
+
if (!details.representation().Equals(expected_representation)) return false;
Object expected_value = *values_[descriptor];
@@ -341,6 +334,12 @@ class Expectations {
return map;
}
+ void ChangeAttributesForAllProperties(PropertyAttributes attributes) {
+ for (int i = 0; i < number_of_properties_; i++) {
+ attributes_[i] = attributes;
+ }
+ }
+
Handle<Map> AddDataField(Handle<Map> map, PropertyAttributes attributes,
PropertyConstness constness,
Representation representation,
@@ -665,7 +664,7 @@ static void TestGeneralizeField(int detach_property_at_index,
// Create new maps by generalizing representation of propX field.
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
- CompilationDependencies dependencies(isolate, &zone);
+ CompilationDependencies dependencies(&broker, &zone);
MapRef map_ref(&broker, map);
map_ref.SerializeOwnDescriptors();
dependencies.DependOnFieldType(map_ref, property_index);
@@ -1042,7 +1041,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeField(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
- CompilationDependencies dependencies(isolate, &zone);
+ CompilationDependencies dependencies(&broker, &zone);
MapRef map_ref(&broker, map);
map_ref.SerializeOwnDescriptors();
dependencies.DependOnFieldType(map_ref, kSplitProp);
@@ -1129,10 +1128,11 @@ static void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
- CompilationDependencies dependencies(isolate, &zone);
+ CompilationDependencies dependencies(&broker, &zone);
MapRef map_ref(&broker, map);
map_ref.SerializeOwnDescriptors();
dependencies.DependOnFieldType(map_ref, kSplitProp);
+ dependencies.DependOnFieldConstness(map_ref, kSplitProp);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1813,7 +1813,7 @@ static void TestReconfigureElementsKind_GeneralizeField(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
- CompilationDependencies dependencies(isolate, &zone);
+ CompilationDependencies dependencies(&broker, &zone);
MapRef map_ref(&broker, map);
map_ref.SerializeOwnDescriptors();
dependencies.DependOnFieldType(map_ref, kDiffProp);
@@ -1911,10 +1911,12 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
JSHeapBroker broker(isolate, &zone);
- CompilationDependencies dependencies(isolate, &zone);
+ CompilationDependencies dependencies(&broker, &zone);
MapRef map_ref(&broker, map);
map_ref.SerializeOwnDescriptors();
+
dependencies.DependOnFieldType(map_ref, kDiffProp);
+ dependencies.DependOnFieldConstness(map_ref, kDiffProp);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1935,7 +1937,8 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
expected.representation, expected.type);
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
- CHECK(dependencies.AreValid());
+ CHECK_EQ(IsGeneralizableTo(to.constness, from.constness),
+ dependencies.AreValid());
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
@@ -2333,9 +2336,12 @@ static void TestGeneralizeFieldWithSpecialTransition(TestConfig& config,
// If Map::TryUpdate() manages to succeed the result must match the result
// of Map::Update().
CHECK_EQ(*new_map2, *tmp_map);
+ } else {
+ // Equivalent transitions should always find the updated map.
+ CHECK(config.is_non_equivalent_transition());
}
- if (config.is_non_equevalent_transition()) {
+ if (config.is_non_equivalent_transition()) {
// In case of non-equivalent transition currently we generalize all
// representations.
for (int i = 0; i < kPropCount; i++) {
@@ -2344,6 +2350,9 @@ static void TestGeneralizeFieldWithSpecialTransition(TestConfig& config,
CHECK(new_map2->GetBackPointer()->IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
} else {
+ expectations2.SetDataField(i, expected.constness, expected.representation,
+ expected.type);
+
CHECK(!new_map2->GetBackPointer()->IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
}
@@ -2374,23 +2383,33 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
+ TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol)
+ : attributes(attributes), symbol(symbol) {}
+
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
- Handle<Symbol> frozen_symbol(map->GetReadOnlyRoots().frozen_symbol(),
- CcTest::i_isolate());
expectations.SetElementsKind(DICTIONARY_ELEMENTS);
- return Map::CopyForPreventExtensions(CcTest::i_isolate(), map, NONE,
- frozen_symbol,
- "CopyForPreventExtensions");
+ expectations.ChangeAttributesForAllProperties(attributes);
+ return Map::CopyForPreventExtensions(CcTest::i_isolate(), map, attributes,
+ symbol, "CopyForPreventExtensions");
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return true; }
+ bool is_non_equivalent_transition() const { return false; }
+
+ PropertyAttributes attributes;
+ Handle<Symbol> symbol;
};
- TestConfig config;
- TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ Factory* factory = isolate->factory();
+ TestConfig configs[] = {{FROZEN, factory->frozen_symbol()},
+ {SEALED, factory->sealed_symbol()},
+ {NONE, factory->nonextensible_symbol()}};
+ for (size_t i = 0; i < arraysize(configs); i++) {
+ TestGeneralizeFieldWithSpecialTransition(
+ configs[i],
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ }
}
@@ -2404,6 +2423,9 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
+ TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol)
+ : attributes(attributes), symbol(symbol) {}
+
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2417,21 +2439,29 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
.ToHandleChecked();
CHECK(!map->owns_descriptors());
- Handle<Symbol> frozen_symbol(ReadOnlyRoots(isolate).frozen_symbol(),
- isolate);
expectations.SetElementsKind(DICTIONARY_ELEMENTS);
- return Map::CopyForPreventExtensions(isolate, map, NONE, frozen_symbol,
+ expectations.ChangeAttributesForAllProperties(attributes);
+ return Map::CopyForPreventExtensions(isolate, map, attributes, symbol,
"CopyForPreventExtensions");
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return true; }
+ bool is_non_equivalent_transition() const { return false; }
+
+ PropertyAttributes attributes;
+ Handle<Symbol> symbol;
};
- TestConfig config;
- TestGeneralizeFieldWithSpecialTransition(
- config, {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ Factory* factory = isolate->factory();
+ TestConfig configs[] = {{FROZEN, factory->frozen_symbol()},
+ {SEALED, factory->sealed_symbol()},
+ {NONE, factory->nonextensible_symbol()}};
+ for (size_t i = 0; i < arraysize(configs); i++) {
+ TestGeneralizeFieldWithSpecialTransition(
+ configs[i],
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ }
}
@@ -2460,7 +2490,7 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
bool generalizes_representations() const {
return !IS_PROTO_TRANS_ISSUE_FIXED;
}
- bool is_non_equevalent_transition() const { return true; }
+ bool is_non_equivalent_transition() const { return true; }
};
TestConfig config;
TestGeneralizeFieldWithSpecialTransition(
@@ -2507,7 +2537,7 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
bool generalizes_representations() const {
return !IS_PROTO_TRANS_ISSUE_FIXED;
}
- bool is_non_equevalent_transition() const { return true; }
+ bool is_non_equivalent_transition() const { return true; }
};
TestConfig config;
TestGeneralizeFieldWithSpecialTransition(
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 23ac83a953..538be20e71 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -453,8 +453,8 @@ TEST(FactoryHashmapVariable) {
" return obj;\n"
"}");
// Can't infer function names statically.
- CheckFunctionName(script, "return 1", "obj.(anonymous function)");
- CheckFunctionName(script, "return 2", "obj.(anonymous function)");
+ CheckFunctionName(script, "return 1", "obj.<computed>");
+ CheckFunctionName(script, "return 2", "obj.<computed>");
}
@@ -470,7 +470,7 @@ TEST(FactoryHashmapConditional) {
" return obj;\n"
"}");
// Can't infer the function name statically.
- CheckFunctionName(script, "return 1", "obj.(anonymous function)");
+ CheckFunctionName(script, "return 1", "obj.<computed>");
}
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 0db56e382a..b81007f40e 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -28,9 +28,11 @@
#include "src/api-inl.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -69,6 +71,14 @@ void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
CHECK(!flag_and_persistent->handle.IsEmpty());
}
+void ConstructJSObject(v8::Isolate* isolate, v8::Global<v8::Object>* global) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ CHECK(!object.IsEmpty());
+ *global = v8::Global<v8::Object>(isolate, object);
+ CHECK(!global->IsEmpty());
+}
+
void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
FlagAndPersistent* flag_and_persistent) {
v8::HandleScope handle_scope(isolate);
@@ -95,12 +105,7 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
FlagAndPersistent fp;
construct_function(isolate, context, &fp);
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> tmp = v8::Local<v8::Object>::New(isolate, fp.handle);
- CHECK(i::Heap::InNewSpace(*v8::Utils::OpenHandle(*tmp)));
- }
-
+ CHECK(heap::InYoungGeneration(isolate, fp.handle));
fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
v8::WeakCallbackType::kParameter);
fp.flag = false;
@@ -340,7 +345,16 @@ TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesScavenge) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ [](FlagAndPersistent* fp) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ fp->handle.MarkActive();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+ },
[]() { InvokeScavenge(); }, SurvivalMode::kSurvives);
}
@@ -348,7 +362,16 @@ TEST(WeakHandleToActiveUnmodifiedJSApiObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ [](FlagAndPersistent* fp) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ fp->handle.MarkActive();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+ },
[]() { InvokeMarkSweep(); }, SurvivalMode::kDies);
}
@@ -357,7 +380,14 @@ TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
WeakHandleTest(
CcTest::isolate(), &ConstructJSApiObject,
[](FlagAndPersistent* fp) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
fp->handle.MarkActive();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
v8::Local<v8::Object> handle =
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
@@ -487,12 +517,7 @@ TEST(GCFromWeakCallbacks) {
for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
FlagAndPersistent fp;
ConstructJSApiObject(isolate, context, &fp);
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Object> tmp =
- v8::Local<v8::Object>::New(isolate, fp.handle);
- CHECK(i::Heap::InNewSpace(*v8::Utils::OpenHandle(*tmp)));
- }
+ CHECK(heap::InYoungGeneration(isolate, fp.handle));
fp.flag = false;
fp.handle.SetWeak(&fp, gc_forcing_callback[inner_gc],
v8::WeakCallbackType::kParameter);
@@ -532,5 +557,32 @@ TEST(SecondPassPhantomCallbacks) {
CHECK(fp.flag);
}
+TEST(MoveStrongGlobal) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Global<v8::Object>* global = new Global<v8::Object>();
+ ConstructJSObject(isolate, global);
+ InvokeMarkSweep();
+ v8::Global<v8::Object> global2(std::move(*global));
+ delete global;
+ InvokeMarkSweep();
+}
+
+TEST(MoveWeakGlobal) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Global<v8::Object>* global = new Global<v8::Object>();
+ ConstructJSObject(isolate, global);
+ InvokeMarkSweep();
+ global->SetWeak();
+ v8::Global<v8::Object> global2(std::move(*global));
+ delete global;
+ InvokeMarkSweep();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 84ecc78ff3..5e61199e86 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -40,6 +40,7 @@
#include "src/base/optional.h"
#include "src/collector.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
@@ -2693,6 +2694,9 @@ TEST(TrackHeapAllocationsWithInlining) {
CHECK(node);
// In lite mode, there is feedback and feedback metadata.
unsigned int num_nodes = (i::FLAG_lite_mode) ? 6 : 8;
+ // Without forced source position collection, there is no source position
+ // table.
+ if (i::FLAG_enable_lazy_source_positions) num_nodes -= 1;
CHECK_GE(node->allocation_count(), num_nodes);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
heap_profiler->StopTrackingHeapObjects();
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index c1dde75a93..ac03a6fc59 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -24,7 +24,7 @@ static constexpr int kBufferSize = 8 * KB;
static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
- __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(eax, Operand(esp, kSystemPointerSize));
for (int i = 0; i < kNumInstr; ++i) {
__ add(eax, Immediate(1));
}
@@ -70,7 +70,7 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
static void FloodWithNop(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
- __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(eax, Operand(esp, kSystemPointerSize));
#elif V8_TARGET_ARCH_X64
__ movl(rax, arg_reg_1);
#elif V8_TARGET_ARCH_MIPS
@@ -101,14 +101,14 @@ TEST(TestFlushICacheOfWritable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWrite));
FloodWithInc(isolate, buffer.get());
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWrite));
FloodWithNop(isolate, buffer.get());
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
CHECK_EQ(23, f.Call(23)); // Call into generated code.
@@ -148,14 +148,14 @@ CONDITIONAL_TEST(TestFlushICacheOfExecutable) {
FloodWithInc(isolate, buffer.get());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWrite));
FloodWithNop(isolate, buffer.get());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
@@ -177,10 +177,10 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute));
FloodWithInc(isolate, buffer.get());
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
FloodWithNop(isolate, buffer.get());
- Assembler::FlushICache(buffer->start(), buffer->size());
+ FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 61984afe23..5646f97a33 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -47,7 +47,7 @@ Handle<T> GetLexical(const char* name) {
isolate->native_context()->script_context_table(), isolate);
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(isolate, script_contexts, str_name,
+ if (ScriptContextTable::Lookup(isolate, *script_contexts, *str_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
@@ -1210,6 +1210,154 @@ TEST(SubclassPromiseBuiltinNoInlineNew) {
TestSubclassPromiseBuiltin();
}
+TEST(SubclassTranspiledClassHierarchy) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "Object.setPrototypeOf(B, A);\n"
+ "function A() {\n"
+ " this.a0 = 0;\n"
+ " this.a1 = 1;\n"
+ " this.a2 = 1;\n"
+ " this.a3 = 1;\n"
+ " this.a4 = 1;\n"
+ " this.a5 = 1;\n"
+ " this.a6 = 1;\n"
+ " this.a7 = 1;\n"
+ " this.a8 = 1;\n"
+ " this.a9 = 1;\n"
+ " this.a10 = 1;\n"
+ " this.a11 = 1;\n"
+ " this.a12 = 1;\n"
+ " this.a13 = 1;\n"
+ " this.a14 = 1;\n"
+ " this.a15 = 1;\n"
+ " this.a16 = 1;\n"
+ " this.a17 = 1;\n"
+ " this.a18 = 1;\n"
+ " this.a19 = 1;\n"
+ "};\n"
+ "function B() {\n"
+ " A.call(this);\n"
+ " this.b = 1;\n"
+ "};\n");
+
+ Handle<JSFunction> func = GetGlobal<JSFunction>("B");
+
+ // Zero instances have been created so far.
+ CHECK(!func->has_initial_map());
+
+ v8::Local<v8::Script> new_script = v8_compile("new B()");
+
+ RunI<JSObject>(new_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map(), func->GetIsolate());
+
+ CHECK_EQ(JS_OBJECT_TYPE, initial_map->instance_type());
+
+ // One instance of a subclass created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create two instances in order to ensure that |obj|.o is a data field
+ // in case of Function subclassing.
+ Handle<JSObject> obj = RunI<JSObject>(new_script);
+
+ // Two instances of a subclass created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 2,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+
+ // Create several subclass instances to complete the tracking.
+ for (int i = 2; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = RunI<JSObject>(new_script);
+ CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(21, obj->map()->GetInObjectProperties());
+ CHECK_EQ(JS_OBJECT_TYPE, obj->map()->instance_type());
+}
+
+TEST(Regress8853_ClassConstructor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // For classes without any this.prop assignments in their
+ // constructors we start out with 10 inobject properties.
+ Handle<JSObject> obj = CompileRunI<JSObject>("new (class {});\n");
+ CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+ CHECK_EQ(10, obj->map()->GetInObjectProperties());
+
+ // For classes with N explicit this.prop assignments in their
+ // constructors we start out with N+8 inobject properties.
+ obj = CompileRunI<JSObject>(
+ "new (class {\n"
+ " constructor() {\n"
+ " this.x = 1;\n"
+ " this.y = 2;\n"
+ " this.z = 3;\n"
+ " }\n"
+ "});\n");
+ CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+ CHECK_EQ(3 + 8, obj->map()->GetInObjectProperties());
+}
+
+TEST(Regress8853_ClassHierarchy) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // For class hierarchies without any this.prop assignments in their
+ // constructors we reserve 2 inobject properties per constructor plus
+ // 8 inobject properties slack on top.
+ std::string base = "(class {})";
+ for (int i = 1; i < 10; ++i) {
+ std::string script = "new " + base + ";\n";
+ Handle<JSObject> obj = CompileRunI<JSObject>(script.c_str());
+ CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+ CHECK_EQ(8 + 2 * i, obj->map()->GetInObjectProperties());
+ base = "(class extends " + base + " {})";
+ }
+}
+
+TEST(Regress8853_FunctionConstructor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // For constructor functions without any this.prop assignments in
+ // them we start out with 10 inobject properties.
+ Handle<JSObject> obj = CompileRunI<JSObject>("new (function() {});\n");
+ CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+ CHECK_EQ(10, obj->map()->GetInObjectProperties());
+
+ // For constructor functions with N explicit this.prop assignments
+ // in them we start out with N+8 inobject properties.
+ obj = CompileRunI<JSObject>(
+ "new (function() {\n"
+ " this.a = 1;\n"
+ " this.b = 2;\n"
+ " this.c = 3;\n"
+ " this.d = 3;\n"
+ " this.c = 3;\n"
+ " this.f = 3;\n"
+ "});\n");
+ CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(IsObjectShrinkable(*obj));
+ CHECK_EQ(6 + 8, obj->map()->GetInObjectProperties());
+}
+
} // namespace test_inobject_slack_tracking
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index 0670340227..d916507760 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -132,7 +132,8 @@ TEST(GetStringOption) {
Handle<String> key = isolate->factory()->NewStringFromAsciiChecked("foo");
v8::internal::LookupIterator it(isolate, options, key);
CHECK(Object::SetProperty(&it, Handle<Smi>(Smi::FromInt(42), isolate),
- LanguageMode::kStrict, StoreOrigin::kMaybeKeyed)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.FromJust());
{
@@ -191,7 +192,8 @@ TEST(GetBoolOption) {
Handle<Object> false_value =
handle(i::ReadOnlyRoots(isolate).false_value(), isolate);
Object::SetProperty(isolate, options, key, false_value,
- LanguageMode::kStrict)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Assert();
bool result = false;
Maybe<bool> found =
@@ -205,7 +207,8 @@ TEST(GetBoolOption) {
Handle<Object> true_value =
handle(i::ReadOnlyRoots(isolate).true_value(), isolate);
Object::SetProperty(isolate, options, key, true_value,
- LanguageMode::kStrict)
+ StoreOrigin::kMaybeKeyed,
+ Just(ShouldThrow::kThrowOnError))
.Assert();
bool result = false;
Maybe<bool> found =
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index ffa2ba54f0..e529c7cac9 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -14,26 +14,29 @@
namespace v8 {
namespace internal {
-Handle<JSWeakFactory> ConstructJSWeakFactory(Isolate* isolate) {
+namespace {
+
+Handle<JSFinalizationGroup> ConstructJSFinalizationGroup(Isolate* isolate) {
Factory* factory = isolate->factory();
- Handle<String> weak_factory_name = factory->WeakFactory_string();
+ Handle<String> finalization_group_name =
+ factory->NewStringFromStaticChars("FinalizationGroup");
Handle<Object> global =
handle(isolate->native_context()->global_object(), isolate);
- Handle<JSFunction> weak_factory_fun = Handle<JSFunction>::cast(
- Object::GetProperty(isolate, global, weak_factory_name)
+ Handle<JSFunction> finalization_group_fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate, global, finalization_group_name)
.ToHandleChecked());
- auto weak_factory = Handle<JSWeakFactory>::cast(
- JSObject::New(weak_factory_fun, weak_factory_fun,
+ auto finalization_group = Handle<JSFinalizationGroup>::cast(
+ JSObject::New(finalization_group_fun, finalization_group_fun,
Handle<AllocationSite>::null())
.ToHandleChecked());
#ifdef VERIFY_HEAP
- weak_factory->JSWeakFactoryVerify(isolate);
+ finalization_group->JSFinalizationGroupVerify(isolate);
#endif // VERIFY_HEAP
- return weak_factory;
+ return finalization_group;
}
-Handle<JSWeakRef> ConstructJSWeakRef(Isolate* isolate,
- Handle<JSReceiver> target) {
+Handle<JSWeakRef> ConstructJSWeakRef(Handle<JSReceiver> target,
+ Isolate* isolate) {
Factory* factory = isolate->factory();
Handle<String> weak_ref_name = factory->WeakRef_string();
Handle<Object> global =
@@ -50,290 +53,590 @@ Handle<JSWeakRef> ConstructJSWeakRef(Isolate* isolate,
return weak_ref;
}
-Handle<JSWeakCell> MakeCell(Isolate* isolate, Handle<JSObject> js_object,
- Handle<JSWeakFactory> weak_factory) {
- Handle<Map> weak_cell_map(isolate->native_context()->js_weak_cell_map(),
- isolate);
- Handle<JSWeakCell> weak_cell =
- Handle<JSWeakCell>::cast(isolate->factory()->NewJSObjectFromMap(
- weak_cell_map, TENURED, Handle<AllocationSite>::null()));
- weak_cell->set_target(*js_object);
- weak_factory->AddWeakCell(*weak_cell);
+Handle<JSObject> CreateKey(const char* key_prop_value, Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ Handle<String> key_string = factory->NewStringFromStaticChars("key_string");
+ Handle<JSObject> key =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ JSObject::AddProperty(isolate, key, key_string,
+ factory->NewStringFromAsciiChecked(key_prop_value),
+ NONE);
+ return key;
+}
+
+Handle<WeakCell> FinalizationGroupRegister(
+ Handle<JSFinalizationGroup> finalization_group, Handle<JSObject> target,
+ Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
+ JSFinalizationGroup::Register(finalization_group, target, holdings, key,
+ isolate);
+ CHECK(finalization_group->active_cells()->IsWeakCell());
+ Handle<WeakCell> weak_cell =
+ handle(WeakCell::cast(finalization_group->active_cells()), isolate);
#ifdef VERIFY_HEAP
- weak_cell->JSWeakCellVerify(isolate);
+ weak_cell->WeakCellVerify(isolate);
#endif // VERIFY_HEAP
return weak_cell;
}
-void NullifyWeakCell(Handle<JSWeakCell> weak_cell, Isolate* isolate) {
+Handle<WeakCell> FinalizationGroupRegister(
+ Handle<JSFinalizationGroup> finalization_group, Handle<JSObject> target,
+ Isolate* isolate) {
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
+ return FinalizationGroupRegister(finalization_group, target, undefined,
+ undefined, isolate);
+}
+
+void NullifyWeakCell(Handle<WeakCell> weak_cell, Isolate* isolate) {
auto empty_func = [](HeapObject object, ObjectSlot slot, Object target) {};
weak_cell->Nullify(isolate, empty_func);
#ifdef VERIFY_HEAP
- weak_cell->JSWeakCellVerify(isolate);
+ weak_cell->WeakCellVerify(isolate);
#endif // VERIFY_HEAP
}
-void ClearWeakCell(Handle<JSWeakCell> weak_cell, Isolate* isolate) {
- weak_cell->Clear(isolate);
- CHECK(weak_cell->next()->IsUndefined(isolate));
- CHECK(weak_cell->prev()->IsUndefined(isolate));
-#ifdef VERIFY_HEAP
- weak_cell->JSWeakCellVerify(isolate);
-#endif // VERIFY_HEAP
+// Usage: VerifyWeakCellChain(isolate, list_head, n, cell1, cell2, ..., celln);
+// verifies that list_head == cell1 and cell1, cell2, ..., celln. form a list.
+void VerifyWeakCellChain(Isolate* isolate, Object list_head, int n_args, ...) {
+ CHECK_GE(n_args, 0);
+
+ va_list args;
+ va_start(args, n_args);
+
+ if (n_args == 0) {
+ // Verify empty list
+ CHECK(list_head->IsUndefined(isolate));
+ } else {
+ WeakCell current = WeakCell::cast(Object(va_arg(args, Address)));
+ CHECK_EQ(current, list_head);
+ CHECK(current->prev()->IsUndefined(isolate));
+
+ for (int i = 1; i < n_args; i++) {
+ WeakCell next = WeakCell::cast(Object(va_arg(args, Address)));
+ CHECK_EQ(current->next(), next);
+ CHECK_EQ(next->prev(), current);
+ current = next;
+ }
+ CHECK(current->next()->IsUndefined(isolate));
+ }
+ va_end(args);
+}
+
+// Like VerifyWeakCellChain but verifies the chain created with key_list_prev
+// and key_list_next instead of prev and next.
+void VerifyWeakCellKeyChain(Isolate* isolate, Object list_head, int n_args,
+ ...) {
+ CHECK_GE(n_args, 0);
+
+ va_list args;
+ va_start(args, n_args);
+
+ if (n_args == 0) {
+ // Verify empty list
+ CHECK(list_head->IsTheHole(isolate));
+ } else {
+ WeakCell current = WeakCell::cast(Object(va_arg(args, Address)));
+ CHECK_EQ(current, list_head);
+ CHECK(current->key_list_prev()->IsUndefined(isolate));
+
+ for (int i = 1; i < n_args; i++) {
+ WeakCell next = WeakCell::cast(Object(va_arg(args, Address)));
+ CHECK_EQ(current->key_list_next(), next);
+ CHECK_EQ(next->key_list_prev(), current);
+ current = next;
+ }
+ CHECK(current->key_list_next()->IsUndefined(isolate));
+ }
+ va_end(args);
}
-TEST(TestJSWeakCellCreation) {
+} // namespace
+
+TEST(TestRegister) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- // Create JSWeakCell and verify internal data structures.
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ // Register a weak reference and verify internal data structures.
+ Handle<WeakCell> weak_cell1 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
- CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 1,
+ *weak_cell1);
+ CHECK(weak_cell1->key_list_prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->key_list_next()->IsUndefined(isolate));
- // Create another JSWeakCell and verify internal data structures.
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
- CHECK(weak_cell2->prev()->IsUndefined(isolate));
- CHECK_EQ(weak_cell2->next(), *weak_cell1);
- CHECK_EQ(weak_cell1->prev(), *weak_cell2);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+
+ // No key was used during registration, key-based map stays uninitialized.
+ CHECK(finalization_group->key_map()->IsUndefined(isolate));
+
+ // Register another weak reference and verify internal data structures.
+ Handle<WeakCell> weak_cell2 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ *weak_cell2, *weak_cell1);
+ CHECK(weak_cell2->key_list_prev()->IsUndefined(isolate));
+ CHECK(weak_cell2->key_list_next()->IsUndefined(isolate));
+
+ CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->key_map()->IsUndefined(isolate));
+}
+
+TEST(TestRegisterWithKey) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+ Handle<JSObject> key2 = CreateKey("key2", isolate);
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
+
+ // Register a weak reference with a key and verify internal data structures.
+ Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+
+ {
+ CHECK(finalization_group->key_map()->IsObjectHashTable());
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 0);
+ }
+
+ // Register another weak reference with a different key and verify internal
+ // data structures.
+ Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key2, isolate);
+
+ {
+ CHECK(finalization_group->key_map()->IsObjectHashTable());
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 1, *weak_cell2);
+ }
- CHECK_EQ(weak_factory->active_cells(), *weak_cell2);
- CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+ // Register another weak reference with key1 and verify internal data
+ // structures.
+ Handle<WeakCell> weak_cell3 = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+
+ {
+ CHECK(finalization_group->key_map()->IsObjectHashTable());
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell3,
+ *weak_cell1);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 1, *weak_cell2);
+ }
}
-TEST(TestJSWeakCellNullify1) {
+TEST(TestWeakCellNullify1) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ Handle<WeakCell> weak_cell1 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
+ Handle<WeakCell> weak_cell2 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
- // Nullify the first JSWeakCell and verify internal data structures.
+ // Nullify the first WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell1, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell2);
+ CHECK_EQ(finalization_group->active_cells(), *weak_cell2);
CHECK(weak_cell2->prev()->IsUndefined(isolate));
CHECK(weak_cell2->next()->IsUndefined(isolate));
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
+ CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
CHECK(weak_cell1->prev()->IsUndefined(isolate));
CHECK(weak_cell1->next()->IsUndefined(isolate));
- // Nullify the second JSWeakCell and verify internal data structures.
+ // Nullify the second WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell2, isolate);
- CHECK(weak_factory->active_cells()->IsUndefined(isolate));
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell2);
+ CHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
CHECK_EQ(weak_cell2->next(), *weak_cell1);
CHECK(weak_cell2->prev()->IsUndefined(isolate));
CHECK_EQ(weak_cell1->prev(), *weak_cell2);
CHECK(weak_cell1->next()->IsUndefined(isolate));
}
-TEST(TestJSWeakCellNullify2) {
+TEST(TestWeakCellNullify2) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ Handle<WeakCell> weak_cell1 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
+ Handle<WeakCell> weak_cell2 =
+ FinalizationGroupRegister(finalization_group, js_object, isolate);
- // Like TestJSWeakCellNullify1 but clear the JSWeakCells in opposite order.
+ // Like TestWeakCellNullify1 but nullify the WeakCells in opposite order.
NullifyWeakCell(weak_cell2, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
+ CHECK_EQ(finalization_group->active_cells(), *weak_cell1);
CHECK(weak_cell1->prev()->IsUndefined(isolate));
CHECK(weak_cell1->next()->IsUndefined(isolate));
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell2);
+ CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
CHECK(weak_cell2->prev()->IsUndefined(isolate));
CHECK(weak_cell2->next()->IsUndefined(isolate));
NullifyWeakCell(weak_cell1, isolate);
- CHECK(weak_factory->active_cells()->IsUndefined(isolate));
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
+ CHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
CHECK_EQ(weak_cell1->next(), *weak_cell2);
CHECK(weak_cell1->prev()->IsUndefined(isolate));
CHECK_EQ(weak_cell2->prev(), *weak_cell1);
CHECK(weak_cell2->next()->IsUndefined(isolate));
}
-TEST(TestJSWeakFactoryPopClearedCell) {
+TEST(TestJSFinalizationGroupPopClearedCellHoldings1) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
-
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
+
+ Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
+ Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings1, undefined, isolate);
+ Handle<Object> holdings2 = factory->NewStringFromAsciiChecked("holdings2");
+ Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings2, undefined, isolate);
+ Handle<Object> holdings3 = factory->NewStringFromAsciiChecked("holdings3");
+ Handle<WeakCell> weak_cell3 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings3, undefined, isolate);
NullifyWeakCell(weak_cell2, isolate);
NullifyWeakCell(weak_cell3, isolate);
- CHECK(weak_factory->NeedsCleanup());
- JSWeakCell cleared1 = weak_factory->PopClearedCell(isolate);
- CHECK_EQ(cleared1, *weak_cell3);
+ CHECK(finalization_group->NeedsCleanup());
+ Object cleared1 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared1, *holdings3);
CHECK(weak_cell3->prev()->IsUndefined(isolate));
CHECK(weak_cell3->next()->IsUndefined(isolate));
- CHECK(weak_factory->NeedsCleanup());
- JSWeakCell cleared2 = weak_factory->PopClearedCell(isolate);
- CHECK_EQ(cleared2, *weak_cell2);
+ CHECK(finalization_group->NeedsCleanup());
+ Object cleared2 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared2, *holdings2);
CHECK(weak_cell2->prev()->IsUndefined(isolate));
CHECK(weak_cell2->next()->IsUndefined(isolate));
- CHECK(!weak_factory->NeedsCleanup());
+ CHECK(!finalization_group->NeedsCleanup());
NullifyWeakCell(weak_cell1, isolate);
- CHECK(weak_factory->NeedsCleanup());
- JSWeakCell cleared3 = weak_factory->PopClearedCell(isolate);
- CHECK_EQ(cleared3, *weak_cell1);
+ CHECK(finalization_group->NeedsCleanup());
+ Object cleared3 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared3, *holdings1);
CHECK(weak_cell1->prev()->IsUndefined(isolate));
CHECK(weak_cell1->next()->IsUndefined(isolate));
- CHECK(!weak_factory->NeedsCleanup());
- CHECK(weak_factory->active_cells()->IsUndefined(isolate));
- CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+ CHECK(!finalization_group->NeedsCleanup());
+ CHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
}
-TEST(TestJSWeakCellClearActiveCells) {
+TEST(TestJSFinalizationGroupPopClearedCellHoldings2) {
+ // Test that when all WeakCells for a key are popped, the key is removed from
+ // the key map.
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+ Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
+ Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings1, key1, isolate);
+ Handle<Object> holdings2 = factory->NewStringFromAsciiChecked("holdings2");
+ Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings2, key1, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell3);
- CHECK(weak_cell3->prev()->IsUndefined(isolate));
- CHECK_EQ(weak_cell3->next(), *weak_cell2);
- CHECK_EQ(weak_cell2->prev(), *weak_cell3);
- CHECK_EQ(weak_cell2->next(), *weak_cell1);
- CHECK_EQ(weak_cell1->prev(), *weak_cell2);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ NullifyWeakCell(weak_cell1, isolate);
+ NullifyWeakCell(weak_cell2, isolate);
- // Clear all JSWeakCells in active_cells and verify the consistency of the
- // active_cells list in all stages.
- ClearWeakCell(weak_cell2, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell3);
- CHECK(weak_cell3->prev()->IsUndefined(isolate));
- CHECK_EQ(weak_cell3->next(), *weak_cell1);
- CHECK_EQ(weak_cell1->prev(), *weak_cell3);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ // Nullifying doesn't affect the key chains (just moves WeakCells from
+ // active_cells to cleared_cells).
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell2,
+ *weak_cell1);
+ }
- ClearWeakCell(weak_cell3, isolate);
- CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ Object cleared1 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared1, *holdings2);
+
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
+ }
+
+ Object cleared2 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared2, *holdings1);
- ClearWeakCell(weak_cell1, isolate);
- CHECK(weak_factory->active_cells()->IsUndefined(isolate));
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ }
}
-TEST(TestJSWeakCellClearClearedCells) {
+TEST(TestUnregisterActiveCells) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
- Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+ Handle<JSObject> key2 = CreateKey("key2", isolate);
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- NullifyWeakCell(weak_cell1, isolate);
- NullifyWeakCell(weak_cell2, isolate);
- NullifyWeakCell(weak_cell3, isolate);
+ Handle<WeakCell> weak_cell1a = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+ Handle<WeakCell> weak_cell1b = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell3);
- CHECK(weak_cell3->prev()->IsUndefined(isolate));
- CHECK_EQ(weak_cell3->next(), *weak_cell2);
- CHECK_EQ(weak_cell2->prev(), *weak_cell3);
- CHECK_EQ(weak_cell2->next(), *weak_cell1);
- CHECK_EQ(weak_cell1->prev(), *weak_cell2);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ Handle<WeakCell> weak_cell2a = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key2, isolate);
+ Handle<WeakCell> weak_cell2b = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key2, isolate);
- // Clear all JSWeakCells in cleared_cells and verify the consistency of the
- // cleared_cells list in all stages.
- ClearWeakCell(weak_cell2, isolate);
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell3);
- CHECK(weak_cell3->prev()->IsUndefined(isolate));
- CHECK_EQ(weak_cell3->next(), *weak_cell1);
- CHECK_EQ(weak_cell1->prev(), *weak_cell3);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 4,
+ *weak_cell2b, *weak_cell2a, *weak_cell1b, *weak_cell1a);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell1b,
+ *weak_cell1a);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 2, *weak_cell2b,
+ *weak_cell2a);
+ }
- ClearWeakCell(weak_cell3, isolate);
- CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ JSFinalizationGroup::Unregister(finalization_group, key1, isolate);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 2, *weak_cell2b,
+ *weak_cell2a);
+ }
+
+ // Both weak_cell1a and weak_cell1b removed from active_cells.
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ *weak_cell2b, *weak_cell2a);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+}
+
+TEST(TestUnregisterActiveAndClearedCells) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+ Handle<JSObject> key2 = CreateKey("key2", isolate);
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
+
+ Handle<WeakCell> weak_cell1a = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+ Handle<WeakCell> weak_cell1b = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+
+ Handle<WeakCell> weak_cell2a = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key2, isolate);
+ Handle<WeakCell> weak_cell2b = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key2, isolate);
+
+ NullifyWeakCell(weak_cell2a, isolate);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 3,
+ *weak_cell2b, *weak_cell1b, *weak_cell1a);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 1,
+ *weak_cell2a);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell1b,
+ *weak_cell1a);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 2, *weak_cell2b,
+ *weak_cell2a);
+ }
- ClearWeakCell(weak_cell1, isolate);
- CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+ JSFinalizationGroup::Unregister(finalization_group, key2, isolate);
+
+ // Both weak_cell2a and weak_cell2b removed.
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ *weak_cell1b, *weak_cell1a);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell1b,
+ *weak_cell1a);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key2), 0);
+ }
}
-TEST(TestJSWeakCellClearTwice) {
+TEST(TestWeakCellUnregisterTwice) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+ Handle<Object> undefined =
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
+
+ Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
+ finalization_group, js_object, undefined, key1, isolate);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 1,
+ *weak_cell1);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
+ }
+
+ JSFinalizationGroup::Unregister(finalization_group, key1, isolate);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ }
+
+ JSFinalizationGroup::Unregister(finalization_group, key1, isolate);
- ClearWeakCell(weak_cell1, isolate);
- ClearWeakCell(weak_cell1, isolate);
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ }
}
-TEST(TestJSWeakCellClearPopped) {
+TEST(TestWeakCellUnregisterPopped) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+ Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
+ Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
+ finalization_group, js_object, holdings1, key1, isolate);
- Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
NullifyWeakCell(weak_cell1, isolate);
- JSWeakCell cleared1 = weak_factory->PopClearedCell(isolate);
- CHECK_EQ(cleared1, *weak_cell1);
- ClearWeakCell(weak_cell1, isolate);
+ CHECK(finalization_group->NeedsCleanup());
+ Object cleared1 =
+ JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK_EQ(cleared1, *holdings1);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ }
+
+ JSFinalizationGroup::Unregister(finalization_group, key1, isolate);
+
+ VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ {
+ Handle<ObjectHashTable> key_map =
+ handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
+ VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 0);
+ }
+}
+
+TEST(TestWeakCellUnregisterNonexistentKey) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSFinalizationGroup> finalization_group =
+ ConstructJSFinalizationGroup(isolate);
+ Handle<JSObject> key1 = CreateKey("key1", isolate);
+
+ JSFinalizationGroup::Unregister(finalization_group, key1, isolate);
}
TEST(TestJSWeakRef) {
@@ -350,7 +653,7 @@ TEST(TestJSWeakRef) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
// This doesn't add the target into the KeepDuringJob set.
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
CcTest::CollectAllGarbage();
CHECK(!inner_weak_ref->target()->IsUndefined(isolate));
@@ -384,7 +687,7 @@ TEST(TestJSWeakRefIncrementalMarking) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
// This doesn't add the target into the KeepDuringJob set.
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
@@ -415,7 +718,7 @@ TEST(TestJSWeakRefKeepDuringJob) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
heap->AddKeepDuringJobTarget(js_object);
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
@@ -452,7 +755,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
heap->AddKeepDuringJobTarget(js_object);
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 5a4dcd588e..d7c13f8d5d 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -938,32 +938,17 @@ class IsolateGenesisThread : public JoinableThread {
TEST(ExtensionsRegistration) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
const int kNThreads = 10;
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
- const int kNThreads = 4;
#elif V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT
const int kNThreads = 10;
#else
const int kNThreads = 40;
#endif
- v8::RegisterExtension(new v8::Extension("test0",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test1",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test2",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test3",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test4",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test5",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test6",
- kSimpleExtensionSource));
- v8::RegisterExtension(new v8::Extension("test7",
- kSimpleExtensionSource));
- const char* extension_names[] = { "test0", "test1",
- "test2", "test3", "test4",
- "test5", "test6", "test7" };
+ const char* extension_names[] = {"test0", "test1", "test2", "test3",
+ "test4", "test5", "test6", "test7"};
+ for (const char* name : extension_names) {
+ v8::RegisterExtension(
+ v8::base::make_unique<v8::Extension>(name, kSimpleExtensionSource));
+ }
std::vector<JoinableThread*> threads;
threads.reserve(kNThreads);
for (int i = 0; i < kNThreads; i++) {
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index c1789560fa..271c57b92d 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -47,93 +47,6 @@ using F = void*(int x, int y, int p2, int p3, int p4);
using F3 = void*(void* p0, int p1, int p2, int p3, int p4);
using F5 = int(void*, void*, void*, void*, void*);
-TEST(LoadAndStoreWithRepresentation) {
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
-
- auto buffer = AllocateAssemblerBuffer();
- MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
- buffer->CreateView());
- MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
-
- __ sub(sp, sp, Operand(1 * kPointerSize));
- Label exit;
-
- // Test 1.
- __ mov(r0, Operand(1)); // Test number.
- __ mov(r1, Operand(0));
- __ str(r1, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(-1));
- __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::UInteger8());
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(255));
- __ cmp(r3, r2);
- __ b(ne, &exit);
- __ mov(r2, Operand(255));
- __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::UInteger8());
- __ cmp(r3, r2);
- __ b(ne, &exit);
-
- // Test 2.
- __ mov(r0, Operand(2)); // Test number.
- __ mov(r1, Operand(0));
- __ str(r1, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(-1));
- __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::Integer8());
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(255));
- __ cmp(r3, r2);
- __ b(ne, &exit);
- __ mov(r2, Operand(-1));
- __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::Integer8());
- __ cmp(r3, r2);
- __ b(ne, &exit);
-
- // Test 3.
- __ mov(r0, Operand(3)); // Test number.
- __ mov(r1, Operand(0));
- __ str(r1, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(-1));
- __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::UInteger16());
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(65535));
- __ cmp(r3, r2);
- __ b(ne, &exit);
- __ mov(r2, Operand(65535));
- __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::UInteger16());
- __ cmp(r3, r2);
- __ b(ne, &exit);
-
- // Test 4.
- __ mov(r0, Operand(4)); // Test number.
- __ mov(r1, Operand(0));
- __ str(r1, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(-1));
- __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::Integer16());
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
- __ mov(r2, Operand(65535));
- __ cmp(r3, r2);
- __ b(ne, &exit);
- __ mov(r2, Operand(-1));
- __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::Integer16());
- __ cmp(r3, r2);
- __ b(ne, &exit);
-
- __ mov(r0, Operand(0)); // Success.
- __ bind(&exit);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ bx(lr);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
-
- // Call the function from C++.
- auto f = GeneratedCode<F5>::FromCode(*code);
- CHECK(!f.Call(0, 0, 0, 0, 0));
-}
-
TEST(ExtractLane) {
if (!CpuFeatures::IsSupported(NEON)) return;
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index a110ed76aa..7dff5dbe8c 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -528,7 +528,8 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kSystemPointerSize);
+ Operand sp2c2 =
+ Operand(rsp, rcx, times_system_pointer_size, 2 * kSystemPointerSize);
// Test 6.
__ movl(rdx, sp2c2); // Sanity check.
@@ -582,7 +583,7 @@ TEST(OperandOffset) {
__ incq(rax);
Operand bp2c4 =
- Operand(rbp, rcx, times_pointer_size, -4 * kSystemPointerSize);
+ Operand(rbp, rcx, times_system_pointer_size, -4 * kSystemPointerSize);
// Test 14:
__ movl(rdx, bp2c4); // Sanity check.
@@ -638,7 +639,7 @@ TEST(OperandOffset) {
__ incq(rax);
Operand bx2c2 =
- Operand(rbx, rcx, times_pointer_size, -2 * kSystemPointerSize);
+ Operand(rbx, rcx, times_system_pointer_size, -2 * kSystemPointerSize);
// Test 23.
__ movl(rdx, bx2c2); // Sanity check.
@@ -807,167 +808,6 @@ TEST(OperandOffset) {
CHECK_EQ(0, result);
}
-
-TEST(LoadAndStoreWithRepresentation) {
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- auto buffer = AllocateAssemblerBuffer();
- MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
- buffer->CreateView());
-
- MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
- EntryCode(masm);
- __ subq(rsp, Immediate(1 * kSystemPointerSize));
- Label exit;
-
- // Test 1.
- __ movq(rax, Immediate(1)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::UInteger8());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(255));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::UInteger8());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 2.
- __ movq(rax, Immediate(2)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ Set(rcx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx, Representation::Smi());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ Set(rdx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize), Representation::Smi());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 3.
- __ movq(rax, Immediate(3)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::Integer32());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(-1));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::Integer32());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 4.
- __ movq(rax, Immediate(4)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movl(rcx, Immediate(0x44332211));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::HeapObject());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(0x44332211));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::HeapObject());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 5.
- __ movq(rax, Immediate(5)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ Set(rcx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx, Representation::Tagged());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ Set(rdx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize), Representation::Tagged());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 6.
- __ movq(rax, Immediate(6)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ Set(rcx, V8_2PART_UINT64_C(0x11223344, 55667788));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::External());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ Set(rdx, V8_2PART_UINT64_C(0x11223344, 55667788));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::External());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 7.
- __ movq(rax, Immediate(7)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::Integer8());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(255));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::Integer8());
- __ movq(rcx, Immediate(-1));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 8.
- __ movq(rax, Immediate(8)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::Integer16());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(65535));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::Integer16());
- __ movq(rcx, Immediate(-1));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- // Test 9.
- __ movq(rax, Immediate(9)); // Test number.
- __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
- __ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
- Representation::UInteger16());
- __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
- __ movl(rdx, Immediate(65535));
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
- Representation::UInteger16());
- __ cmpq(rcx, rdx);
- __ j(not_equal, &exit);
-
- __ xorq(rax, rax); // Success.
- __ bind(&exit);
- __ addq(rsp, Immediate(1 * kSystemPointerSize));
- ExitCode(masm);
- __ ret(0);
-
- CodeDesc desc;
- masm->GetCode(isolate, &desc);
- buffer->MakeExecutable();
- // Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
- int result = f.Call();
- CHECK_EQ(0, result);
-}
-
void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
float z, float w) {
__ subq(rsp, Immediate(kSimd128Size));
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index aa2b23c413..78b1e6a981 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/heap/factory.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 7d76b170d9..61945bc1fe 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/api-inl.h"
+#include "src/function-kind.h"
+#include "src/globals.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -251,5 +253,162 @@ TEST(EnumCache) {
}
}
+#define TEST_FUNCTION_KIND(Name) \
+ TEST(Name) { \
+ for (int i = 0; i < FunctionKind::kLastFunctionKind; i++) { \
+ FunctionKind kind = static_cast<FunctionKind>(i); \
+ CHECK_EQ(FunctionKind##Name(kind), Name(kind)); \
+ } \
+ }
+
+bool FunctionKindIsArrowFunction(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kArrowFunction:
+ case FunctionKind::kAsyncArrowFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsArrowFunction)
+
+bool FunctionKindIsAsyncGeneratorFunction(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kAsyncGeneratorFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsAsyncGeneratorFunction)
+
+bool FunctionKindIsGeneratorFunction(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kGeneratorFunction:
+ case FunctionKind::kAsyncGeneratorFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsGeneratorFunction)
+
+bool FunctionKindIsAsyncFunction(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kAsyncFunction:
+ case FunctionKind::kAsyncArrowFunction:
+ case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kAsyncGeneratorFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsAsyncFunction)
+
+bool FunctionKindIsConciseMethod(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kConciseMethod:
+ case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kClassMembersInitializerFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsConciseMethod)
+
+bool FunctionKindIsAccessorFunction(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kGetterFunction:
+ case FunctionKind::kSetterFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsAccessorFunction)
+
+bool FunctionKindIsDefaultConstructor(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kDefaultBaseConstructor:
+ case FunctionKind::kDefaultDerivedConstructor:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsDefaultConstructor)
+
+bool FunctionKindIsBaseConstructor(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kBaseConstructor:
+ case FunctionKind::kDefaultBaseConstructor:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsBaseConstructor)
+
+bool FunctionKindIsDerivedConstructor(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kDefaultDerivedConstructor:
+ case FunctionKind::kDerivedConstructor:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsDerivedConstructor)
+
+bool FunctionKindIsClassConstructor(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kBaseConstructor:
+ case FunctionKind::kDefaultBaseConstructor:
+ case FunctionKind::kDefaultDerivedConstructor:
+ case FunctionKind::kDerivedConstructor:
+ return true;
+ default:
+ return false;
+ }
+}
+TEST_FUNCTION_KIND(IsClassConstructor)
+
+bool FunctionKindIsConstructable(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kGetterFunction:
+ case FunctionKind::kSetterFunction:
+ case FunctionKind::kArrowFunction:
+ case FunctionKind::kAsyncArrowFunction:
+ case FunctionKind::kAsyncFunction:
+ case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kAsyncGeneratorFunction:
+ case FunctionKind::kGeneratorFunction:
+ case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kConciseMethod:
+ case FunctionKind::kClassMembersInitializerFunction:
+ return false;
+ default:
+ return true;
+ }
+}
+TEST_FUNCTION_KIND(IsConstructable)
+
+bool FunctionKindIsStrictFunctionWithoutPrototype(FunctionKind kind) {
+ return IsArrowFunction(kind) || IsConciseMethod(kind) ||
+ IsAccessorFunction(kind);
+}
+TEST_FUNCTION_KIND(IsStrictFunctionWithoutPrototype)
+
+#undef TEST_FUNCTION_KIND
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 2634c30ec0..2722e820ed 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -50,6 +50,7 @@
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/token.h"
+#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
#include "test/cctest/cctest.h"
#include "test/cctest/scope-test-helper.h"
@@ -91,6 +92,8 @@ TEST(AutoSemicolonToken) {
bool TokenIsAnyIdentifier(Token::Value token) {
switch (token) {
case Token::IDENTIFIER:
+ case Token::GET:
+ case Token::SET:
case Token::ASYNC:
case Token::AWAIT:
case Token::YIELD:
@@ -115,6 +118,8 @@ bool TokenIsCallable(Token::Value token) {
switch (token) {
case Token::SUPER:
case Token::IDENTIFIER:
+ case Token::GET:
+ case Token::SET:
case Token::ASYNC:
case Token::AWAIT:
case Token::YIELD:
@@ -139,6 +144,8 @@ bool TokenIsValidIdentifier(Token::Value token, LanguageMode language_mode,
bool is_generator, bool disallow_await) {
switch (token) {
case Token::IDENTIFIER:
+ case Token::GET:
+ case Token::SET:
case Token::ASYNC:
return true;
case Token::YIELD:
@@ -613,9 +620,8 @@ TEST(ScanHTMLEndComments) {
i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(&zone,
- i_isolate->ast_string_constants(),
- i_isolate->heap()->HashSeed());
+ i::AstValueFactory ast_value_factory(
+ &zone, i_isolate->ast_string_constants(), HashSeed(i_isolate));
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
@@ -632,9 +638,8 @@ TEST(ScanHTMLEndComments) {
i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(&zone,
- i_isolate->ast_string_constants(),
- i_isolate->heap()->HashSeed());
+ i::AstValueFactory ast_value_factory(
+ &zone, i_isolate->ast_string_constants(), HashSeed(i_isolate));
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
@@ -705,9 +710,8 @@ TEST(StandAlonePreParser) {
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(&zone,
- i_isolate->ast_string_constants(),
- i_isolate->heap()->HashSeed());
+ i::AstValueFactory ast_value_factory(
+ &zone, i_isolate->ast_string_constants(), HashSeed(i_isolate));
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
@@ -725,8 +729,8 @@ TEST(StandAlonePreParserNoNatives) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
const char* programs[] = {"%ArgleBargle(glop);", "var x = %_IsSmi(42);",
nullptr};
@@ -738,10 +742,9 @@ TEST(StandAlonePreParserNoNatives) {
scanner.Initialize();
// Preparser defaults to disallowing natives syntax.
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
@@ -772,15 +775,14 @@ TEST(RegressChromium62639) {
auto stream = i::ScannerStream::ForTesting(program);
i::Scanner scanner(stream.get(), false);
scanner.Initialize();
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
i::PendingCompilationErrorHandler pending_error_handler;
- i::PreParser preparser(
- &zone, &scanner, CcTest::i_isolate()->stack_guard()->real_climit(),
- &ast_value_factory, &pending_error_handler,
- isolate->counters()->runtime_call_stats(), isolate->logger());
+ i::PreParser preparser(&zone, &scanner, isolate->stack_guard()->real_climit(),
+ &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(),
+ isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -807,10 +809,9 @@ TEST(PreParseOverflow) {
i::Scanner scanner(stream.get(), false);
scanner.Initialize();
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(
&zone, &scanner, stack_limit, &ast_value_factory, &pending_error_handler,
@@ -845,19 +846,9 @@ TEST(StreamScanner) {
std::unique_ptr<i::Utf16CharacterStream> stream1(
i::ScannerStream::ForTesting(str1));
i::Token::Value expectations1[] = {
- i::Token::LBRACE,
- i::Token::IDENTIFIER,
- i::Token::IDENTIFIER,
- i::Token::FOR,
- i::Token::COLON,
- i::Token::MUL,
- i::Token::DIV,
- i::Token::LT,
- i::Token::SUB,
- i::Token::IDENTIFIER,
- i::Token::EOS,
- i::Token::ILLEGAL
- };
+ i::Token::LBRACE, i::Token::IDENTIFIER, i::Token::GET, i::Token::FOR,
+ i::Token::COLON, i::Token::MUL, i::Token::DIV, i::Token::LT,
+ i::Token::SUB, i::Token::IDENTIFIER, i::Token::EOS, i::Token::ILLEGAL};
TestStreamScanner(stream1.get(), expectations1, 0, 0);
const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
@@ -911,7 +902,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ HashSeed(CcTest::i_isolate()));
const i::AstRawString* current_symbol =
scanner.CurrentSymbol(&ast_value_factory);
ast_value_factory.Internalize(CcTest::i_isolate());
@@ -1101,8 +1092,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
if ((source_data[i].expected & THIS) != 0) {
// Currently the is_used() flag is conservative; all variables in a
// script scope are marked as used.
- CHECK(scope->LookupForTesting(info.ast_value_factory()->this_string())
- ->is_used());
+ CHECK(scope->GetReceiverScope()->receiver()->is_used());
}
if (is_sloppy(scope->language_mode())) {
CHECK_EQ((source_data[i].expected & EVAL) != 0,
@@ -1596,10 +1586,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(isolate, source));
i::Scanner scanner(stream.get(), is_module);
- i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- i::AstValueFactory ast_value_factory(
- &zone, CcTest::i_isolate()->ast_string_constants(),
- CcTest::i_isolate()->heap()->HashSeed());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
+ i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
isolate->counters()->runtime_call_stats(),
@@ -3118,8 +3107,7 @@ TEST(FuncNameInferrerBasic) {
ExpectString("Ctor()", "Ctor.foo5");
ExpectString("%FunctionGetInferredName(obj1.foo6)", "obj1.foo6");
ExpectString("%FunctionGetInferredName(obj2.foo7)", "obj2.foo7");
- ExpectString("%FunctionGetInferredName(obj3[1])",
- "obj3.(anonymous function)");
+ ExpectString("%FunctionGetInferredName(obj3[1])", "obj3.<computed>");
ExpectString("%FunctionGetInferredName(obj4[1])", "");
ExpectString("%FunctionGetInferredName(obj5['foo9'])", "obj5.foo9");
ExpectString("%FunctionGetInferredName(obj6.obj7.foo10)", "obj6.obj7.foo10");
@@ -3236,7 +3224,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Context context = f->context();
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
const i::AstRawString* name = avf.GetOneByteString("result");
avf.Internalize(isolate);
i::Handle<i::String> str = name->string();
@@ -3286,7 +3274,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Context context = f->context();
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
const i::AstRawString* name_x = avf.GetOneByteString("x");
avf.Internalize(isolate);
@@ -3473,7 +3461,6 @@ TEST(InnerAssignment) {
i::Variable* var = scope->LookupForTesting(var_name);
bool expected = outers[i].assigned || inners[j].assigned;
CHECK_NOT_NULL(var);
- CHECK(var->is_used() || !expected);
bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
CHECK(is_maybe_assigned == expected ||
(is_maybe_assigned && inners[j].allow_error_in_inner_function));
@@ -3615,7 +3602,7 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
}
CHECK_NOT_NULL(var);
- CHECK(var->is_used());
+ CHECK_IMPLIES(input.assigned, var->is_used());
STATIC_ASSERT(true == i::kMaybeAssigned);
CHECK_EQ(input.assigned, var->maybe_assigned() == i::kMaybeAssigned);
}
@@ -4202,7 +4189,7 @@ namespace {
i::Scope* DeserializeFunctionScope(i::Isolate* isolate, i::Zone* zone,
i::Handle<i::JSObject> m, const char* name) {
i::AstValueFactory avf(zone, isolate->ast_string_constants(),
- isolate->heap()->HashSeed());
+ HashSeed(isolate));
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(
i::JSReceiver::GetProperty(isolate, m, name).ToHandleChecked());
i::DeclarationScope* script_scope =
@@ -5622,6 +5609,8 @@ TEST(PrivateMembersInNonClassNoErrors) {
{"function() {", "}"},
{"() => {", "}"},
{"class C { test() {", "} }"},
+ {"const {", "} = {}"},
+ {"({", "} = {})"},
{nullptr, nullptr}};
const char* class_body_data[] = {
"#a = 1",
@@ -6086,7 +6075,7 @@ TEST(PrivateStaticClassFieldsErrors) {
TEST(PrivateNameNoErrors) {
// clang-format off
const char* context_data[][2] = {
- {"", ""},
+ {"class X { bar() { ", " } }"},
{"\"use strict\";", ""},
{nullptr, nullptr}
};
@@ -6138,6 +6127,9 @@ TEST(PrivateNameErrors) {
// clang-format off
const char* context_data[][2] = {
{"", ""},
+ {"function t() { ", " }"},
+ {"var t => { ", " }"},
+ {"var t = { [ ", " ] }"},
{"\"use strict\";", ""},
{nullptr, nullptr}
};
@@ -7599,7 +7591,7 @@ TEST(ModuleParsingInternals) {
i::VariableLocation::MODULE);
CHECK(declarations->AtForTest(7)->var()->raw_name()->IsOneByteEqualTo(
- "*default*"));
+ ".default"));
CHECK(declarations->AtForTest(7)->var()->mode() == i::VariableMode::kConst);
CHECK(declarations->AtForTest(7)->var()->binding_needs_init());
CHECK(declarations->AtForTest(7)->var()->location() ==
@@ -7688,7 +7680,7 @@ TEST(ModuleParsingInternals) {
entry = descriptor->regular_exports()
.find(declarations->AtForTest(7)->var()->raw_name())
->second;
- CheckEntry(entry, "default", "*default*", nullptr, -1);
+ CheckEntry(entry, "default", ".default", nullptr, -1);
entry = descriptor->regular_exports()
.find(declarations->AtForTest(12)->var()->raw_name())
->second;
@@ -9385,11 +9377,10 @@ TEST(EscapedKeywords) {
"class C { st\\u0061tic *bar() {} }",
"class C { st\\u0061tic get bar() {} }",
"class C { st\\u0061tic set bar() {} }",
-
- // TODO(adamk): These should not be errors in sloppy mode.
- "(y\\u0069eld);",
- "var y\\u0069eld = 1;",
- "var { y\\u0069eld } = {};",
+ "(async ()=>{\\u0061wait 100})()",
+ "({\\u0067et get(){}})",
+ "({\\u0073et set(){}})",
+ "(async ()=>{var \\u0061wait = 100})()",
nullptr
};
// clang-format on
@@ -9403,6 +9394,9 @@ TEST(EscapedKeywords) {
"var l\\u0065t = 1;",
"l\\u0065t = 1;",
"(l\\u0065t === 1);",
+ "(y\\u0069eld);",
+ "var y\\u0069eld = 1;",
+ "var { y\\u0069eld } = {};",
nullptr
};
// clang-format on
@@ -11297,7 +11291,7 @@ TEST(LexicalLoopVariable) {
}
}
-TEST(PrivateNamesSyntaxError) {
+TEST(PrivateNamesSyntaxErrorWithScopeAnalysis) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
LocalContext env;
@@ -11379,23 +11373,10 @@ TEST(PrivateNamesSyntaxError) {
"}",
};
- // TODO(gsathya): The preparser does not track unresolved
- // variables in top level function which fails this test.
- // https://bugs.chromium.org/p/v8/issues/detail?id=7468
- const char* parser_data[] = {
- "function t() {"
- " return this.#foo;"
- "}",
- };
-
for (const char* source : data) {
CHECK(test(source, true));
CHECK(test(source, false));
}
-
- for (const char* source : parser_data) {
- CHECK(test(source, false));
- }
}
TEST(HashbangSyntax) {
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index c9d7f1da68..27cb4f841b 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -491,7 +491,7 @@ TEST(SampleIds) {
CHECK_EQ(3, profile->samples_count());
unsigned expected_id[] = {3, 5, 7};
for (int i = 0; i < 3; i++) {
- CHECK_EQ(expected_id[i], profile->sample(i)->id());
+ CHECK_EQ(expected_id[i], profile->sample(i).node->id());
}
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 950237a105..49dcc49c3e 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -34,8 +34,10 @@
#include "src/assembler-arch.h"
#include "src/ast/ast.h"
#include "src/char-predicates-inl.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
+#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -44,36 +46,28 @@
#include "src/string-stream.h"
#include "src/unicode-inl.h"
#include "src/v8.h"
+#include "src/zone/zone-list-inl.h"
-#ifdef V8_INTERPRETED_REGEXP
-#include "src/regexp/interpreter-irregexp.h"
-#else // V8_INTERPRETED_REGEXP
-#include "src/macro-assembler.h"
#if V8_TARGET_ARCH_ARM
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#endif
-#if V8_TARGET_ARCH_ARM64
+#elif V8_TARGET_ARCH_ARM64
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
-#endif
-#if V8_TARGET_ARCH_S390
+#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#endif
-#if V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#endif
-#if V8_TARGET_ARCH_MIPS
+#elif V8_TARGET_ARCH_MIPS
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#endif
-#if V8_TARGET_ARCH_MIPS64
+#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#endif
-#if V8_TARGET_ARCH_X64
+#elif V8_TARGET_ARCH_X64
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#endif
-#if V8_TARGET_ARCH_IA32
+#elif V8_TARGET_ARCH_IA32
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
+#else
+#error Unknown architecture.
#endif
-#endif // V8_INTERPRETED_REGEXP
+
#include "test/cctest/cctest.h"
namespace v8 {
@@ -738,9 +732,6 @@ TEST(ParsePossessiveRepetition) {
// Tests of interpreter.
-
-#ifndef V8_INTERPRETED_REGEXP
-
#if V8_TARGET_ARCH_IA32
typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_X64
@@ -781,9 +772,11 @@ static ArchRegExpMacroAssembler::Result Execute(Code code, String input,
Address input_start,
Address input_end,
int* captures) {
- return NativeRegExpMacroAssembler::Execute(
- code, input, start_offset, reinterpret_cast<byte*>(input_start),
- reinterpret_cast<byte*>(input_end), captures, 0, CcTest::i_isolate());
+ return static_cast<NativeRegExpMacroAssembler::Result>(
+ NativeRegExpMacroAssembler::Execute(code, input, start_offset,
+ reinterpret_cast<byte*>(input_start),
+ reinterpret_cast<byte*>(input_end),
+ captures, 0, CcTest::i_isolate()));
}
TEST(MacroAssemblerNativeSuccess) {
@@ -1397,13 +1390,9 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
isolate->clear_pending_exception();
}
-#else // V8_INTERPRETED_REGEXP
-
TEST(MacroAssembler) {
- byte codes[1024];
Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), Vector<byte>(codes, 1024),
- &zone);
+ RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), &zone);
// ^f(o)o.
Label start, fail, backtrack;
@@ -1462,9 +1451,6 @@ TEST(MacroAssembler) {
CHECK_EQ(42, captures[0]);
}
-#endif // V8_INTERPRETED_REGEXP
-
-
TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
diff --git a/deps/v8/test/cctest/test-representation.cc b/deps/v8/test/cctest/test-representation.cc
index e839f528f3..af0051e0af 100644
--- a/deps/v8/test/cctest/test-representation.cc
+++ b/deps/v8/test/cctest/test-representation.cc
@@ -46,85 +46,16 @@ void TestPairNegative(Representation more_general,
TEST(RepresentationMoreGeneralThan) {
TestPairNegative(Representation::None(), Representation::None());
- TestPairPositive(Representation::Integer8(), Representation::None());
- TestPairPositive(Representation::UInteger8(), Representation::None());
- TestPairPositive(Representation::Integer16(), Representation::None());
- TestPairPositive(Representation::UInteger16(), Representation::None());
TestPairPositive(Representation::Smi(), Representation::None());
- TestPairPositive(Representation::Integer32(), Representation::None());
TestPairPositive(Representation::HeapObject(), Representation::None());
TestPairPositive(Representation::Double(), Representation::None());
TestPairPositive(Representation::Tagged(), Representation::None());
- TestPairNegative(Representation::None(), Representation::Integer8());
- TestPairNegative(Representation::Integer8(), Representation::Integer8());
- TestPairNegative(Representation::UInteger8(), Representation::Integer8());
- TestPairPositive(Representation::Integer16(), Representation::Integer8());
- TestPairPositive(Representation::UInteger16(), Representation::Integer8());
- TestPairPositive(Representation::Smi(), Representation::Integer8());
- TestPairPositive(Representation::Integer32(), Representation::Integer8());
- TestPairNegative(Representation::HeapObject(), Representation::Integer8());
- TestPairPositive(Representation::Double(), Representation::Integer8());
- TestPairPositive(Representation::Tagged(), Representation::Integer8());
-
- TestPairNegative(Representation::None(), Representation::UInteger8());
- TestPairNegative(Representation::Integer8(), Representation::UInteger8());
- TestPairNegative(Representation::UInteger8(), Representation::UInteger8());
- TestPairPositive(Representation::Integer16(), Representation::UInteger8());
- TestPairPositive(Representation::UInteger16(), Representation::UInteger8());
- TestPairPositive(Representation::Smi(), Representation::UInteger8());
- TestPairPositive(Representation::Integer32(), Representation::UInteger8());
- TestPairNegative(Representation::HeapObject(), Representation::UInteger8());
- TestPairPositive(Representation::Double(), Representation::UInteger8());
- TestPairPositive(Representation::Tagged(), Representation::UInteger8());
-
- TestPairNegative(Representation::None(), Representation::Integer16());
- TestPairNegative(Representation::Integer8(), Representation::Integer16());
- TestPairNegative(Representation::UInteger8(), Representation::Integer16());
- TestPairNegative(Representation::Integer16(), Representation::Integer16());
- TestPairNegative(Representation::UInteger16(), Representation::Integer16());
- TestPairPositive(Representation::Smi(), Representation::Integer16());
- TestPairPositive(Representation::Integer32(), Representation::Integer16());
- TestPairNegative(Representation::HeapObject(), Representation::Integer16());
- TestPairPositive(Representation::Double(), Representation::Integer16());
- TestPairPositive(Representation::Tagged(), Representation::Integer16());
-
- TestPairNegative(Representation::None(), Representation::UInteger16());
- TestPairNegative(Representation::Integer8(), Representation::UInteger16());
- TestPairNegative(Representation::UInteger8(), Representation::UInteger16());
- TestPairNegative(Representation::Integer16(), Representation::UInteger16());
- TestPairNegative(Representation::UInteger16(), Representation::UInteger16());
- TestPairPositive(Representation::Smi(), Representation::UInteger16());
- TestPairPositive(Representation::Integer32(), Representation::UInteger16());
- TestPairNegative(Representation::HeapObject(), Representation::UInteger16());
- TestPairPositive(Representation::Double(), Representation::UInteger16());
- TestPairPositive(Representation::Tagged(), Representation::UInteger16());
-
TestPairNegative(Representation::None(), Representation::Smi());
- TestPairNegative(Representation::Integer8(), Representation::Smi());
- TestPairNegative(Representation::UInteger8(), Representation::Smi());
- TestPairNegative(Representation::Integer16(), Representation::Smi());
- TestPairNegative(Representation::UInteger16(), Representation::Smi());
TestPairNegative(Representation::Smi(), Representation::Smi());
- TestPairPositive(Representation::Integer32(), Representation::Smi());
TestPairNegative(Representation::HeapObject(), Representation::Smi());
TestPairPositive(Representation::Double(), Representation::Smi());
TestPairPositive(Representation::Tagged(), Representation::Smi());
-
- TestPairNegative(Representation::None(), Representation::Integer32());
- TestPairNegative(Representation::Integer8(), Representation::Integer32());
- TestPairNegative(Representation::UInteger8(), Representation::Integer32());
- TestPairNegative(Representation::Integer16(), Representation::Integer32());
- TestPairNegative(Representation::UInteger16(), Representation::Integer32());
- TestPairNegative(Representation::Smi(), Representation::Integer32());
- TestPairNegative(Representation::Integer32(), Representation::Integer32());
- TestPairNegative(Representation::HeapObject(), Representation::Integer32());
- TestPairPositive(Representation::Double(), Representation::Integer32());
- TestPairPositive(Representation::Tagged(), Representation::Integer32());
-
- TestPairNegative(Representation::None(), Representation::External());
- TestPairNegative(Representation::External(), Representation::External());
- TestPairPositive(Representation::External(), Representation::None());
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index bb4e09d8a4..a0ad1f06c6 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/feedback-cell.h"
+#include "src/objects/script.h"
#include "src/roots-inl.h"
#include "test/cctest/cctest.h"
@@ -42,7 +45,7 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(builtins_constants_table) \
V(current_microtask) \
V(detached_contexts) \
- V(dirty_js_weak_factories) \
+ V(dirty_js_finalization_groups) \
V(feedback_vectors_for_profiling_tools) \
V(materialized_objects) \
V(noscript_shared_function_infos) \
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index ed1718adde..818505febc 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -37,6 +37,8 @@
#include "src/compilation-cache.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/hash-seed-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
#include "src/interpreter/interpreter.h"
#include "src/macro-assembler-inl.h"
@@ -49,6 +51,7 @@
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-deserializer.h"
#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-deserializer.h"
@@ -90,7 +93,7 @@ class TestSerializer {
v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(nullptr);
+ isolate->Init(nullptr, nullptr);
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
return v8_isolate;
}
@@ -98,13 +101,14 @@ class TestSerializer {
static v8::Isolate* NewIsolateFromBlob(StartupBlobs& blobs) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
- StartupDeserializer deserializer(&startup_snapshot, &read_only_snapshot);
+ ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot);
+ StartupDeserializer startup_deserializer(&startup_snapshot);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(&deserializer);
+ isolate->Init(&read_only_deserializer, &startup_deserializer);
return v8_isolate;
}
@@ -860,7 +864,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobStringNotInternalized) {
FreeCurrentEmbeddedBlob();
}
-#ifndef V8_INTERPRETED_REGEXP
namespace {
void TestCustomSnapshotDataBlobWithIrregexpCode(
@@ -937,7 +940,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithIrregexpCodeClearCode) {
TestCustomSnapshotDataBlobWithIrregexpCode(
v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
-#endif // V8_INTERPRETED_REGEXP
UNINITIALIZED_TEST(SnapshotChecksum) {
DisableAlwaysOpt();
@@ -1339,9 +1341,9 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- DisableEmbeddedBlobRefcounting();
v8::Isolate* isolate0 = v8::Isolate::New(create_params);
{
v8::Locker locker(isolate0);
@@ -1929,21 +1931,25 @@ TEST(CodeSerializerThreeBigStrings) {
v8::HandleScope scope(CcTest::isolate());
+ const int32_t length_of_a = kMaxRegularHeapObjectSize * 2;
+ const int32_t length_of_b = kMaxRegularHeapObjectSize / 2;
+ const int32_t length_of_c = kMaxRegularHeapObjectSize / 2;
+
Vector<const uint8_t> source_a =
ConstructSource(StaticCharVector("var a = \""), StaticCharVector("a"),
- StaticCharVector("\";"), 700000);
+ StaticCharVector("\";"), length_of_a);
Handle<String> source_a_str =
f->NewStringFromOneByte(source_a).ToHandleChecked();
Vector<const uint8_t> source_b =
ConstructSource(StaticCharVector("var b = \""), StaticCharVector("b"),
- StaticCharVector("\";"), 400000);
+ StaticCharVector("\";"), length_of_b);
Handle<String> source_b_str =
f->NewStringFromOneByte(source_b).ToHandleChecked();
Vector<const uint8_t> source_c =
ConstructSource(StaticCharVector("var c = \""), StaticCharVector("c"),
- StaticCharVector("\";"), 400000);
+ StaticCharVector("\";"), length_of_c);
Handle<String> source_c_str =
f->NewStringFromOneByte(source_c).ToHandleChecked();
@@ -1976,10 +1982,10 @@ TEST(CodeSerializerThreeBigStrings) {
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
- CHECK_EQ(400000 + 700000, result.FromJust());
+ CHECK_EQ(length_of_a + length_of_b, result.FromJust());
result = CompileRun("(b + c).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
- CHECK_EQ(400000 + 400000, result.FromJust());
+ CHECK_EQ(length_of_b + length_of_c, result.FromJust());
Heap* heap = isolate->heap();
v8::Local<v8::String> result_str =
CompileRun("a")
@@ -1989,20 +1995,12 @@ TEST(CodeSerializerThreeBigStrings) {
result_str = CompileRun("b")
->ToString(CcTest::isolate()->GetCurrentContext())
.ToLocalChecked();
-#if V8_HOST_ARCH_PPC
- CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), LO_SPACE));
-#else
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), OLD_SPACE));
-#endif
result_str = CompileRun("c")
->ToString(CcTest::isolate()->GetCurrentContext())
.ToLocalChecked();
-#if V8_HOST_ARCH_PPC
- CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), LO_SPACE));
-#else
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), OLD_SPACE));
-#endif
delete cache;
source_a.Dispose();
@@ -2577,8 +2575,9 @@ TEST(Regress503552) {
delete cache_data;
}
-TEST(SnapshotCreatorMultipleContexts) {
+UNINITIALIZED_TEST(SnapshotCreatorMultipleContexts) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -2637,6 +2636,7 @@ TEST(SnapshotCreatorMultipleContexts) {
isolate->Dispose();
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
static int serialized_static_field = 314;
@@ -2699,8 +2699,9 @@ intptr_t replaced_external_references[] = {
intptr_t short_external_references[] = {
reinterpret_cast<intptr_t>(SerializedCallbackReplacement), 0};
-TEST(SnapshotCreatorExternalReferences) {
+UNINITIALIZED_TEST(SnapshotCreatorExternalReferences) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator(original_external_references);
@@ -2787,10 +2788,12 @@ TEST(SnapshotCreatorExternalReferences) {
CHECK_EQ(3, serializable_two_byte_resource.dispose_count());
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
-TEST(SnapshotCreatorShortExternalReferences) {
+UNINITIALIZED_TEST(SnapshotCreatorShortExternalReferences) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator(original_external_references);
@@ -2829,6 +2832,7 @@ TEST(SnapshotCreatorShortExternalReferences) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
v8::StartupData CreateSnapshotWithDefaultAndCustom() {
@@ -2865,8 +2869,9 @@ v8::StartupData CreateSnapshotWithDefaultAndCustom() {
return creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
-TEST(SnapshotCreatorNoExternalReferencesDefault) {
+UNINITIALIZED_TEST(SnapshotCreatorNoExternalReferencesDefault) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateSnapshotWithDefaultAndCustom();
// Deserialize with an incomplete list of external references.
@@ -2887,6 +2892,7 @@ TEST(SnapshotCreatorNoExternalReferencesDefault) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
v8::StartupData CreateCustomSnapshotWithPreparseDataAndNoOuterScope() {
@@ -2912,8 +2918,9 @@ v8::StartupData CreateCustomSnapshotWithPreparseDataAndNoOuterScope() {
return creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
-TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
+UNINITIALIZED_TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateCustomSnapshotWithPreparseDataAndNoOuterScope();
// Deserialize with an incomplete list of external references.
@@ -2932,6 +2939,7 @@ TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
v8::StartupData CreateCustomSnapshotArrayJoinWithKeep() {
@@ -2952,8 +2960,9 @@ v8::StartupData CreateCustomSnapshotArrayJoinWithKeep() {
return creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kKeep);
}
-TEST(SnapshotCreatorArrayJoinWithKeep) {
+UNINITIALIZED_TEST(SnapshotCreatorArrayJoinWithKeep) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateCustomSnapshotArrayJoinWithKeep();
// Deserialize with an incomplete list of external references.
@@ -2973,6 +2982,7 @@ TEST(SnapshotCreatorArrayJoinWithKeep) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
TEST(SnapshotCreatorNoExternalReferencesCustomFail1) {
@@ -3025,8 +3035,9 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail2) {
delete[] blob.data;
}
-TEST(SnapshotCreatorUnknownExternalReferences) {
+UNINITIALIZED_TEST(SnapshotCreatorUnknownExternalReferences) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::SnapshotCreator creator;
v8::Isolate* isolate = creator.GetIsolate();
{
@@ -3047,10 +3058,12 @@ TEST(SnapshotCreatorUnknownExternalReferences) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
-TEST(SnapshotCreatorTemplates) {
+UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
@@ -3214,10 +3227,12 @@ TEST(SnapshotCreatorTemplates) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
-TEST(SnapshotCreatorAddData) {
+UNINITIALIZED_TEST(SnapshotCreatorAddData) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
@@ -3414,6 +3429,7 @@ TEST(SnapshotCreatorAddData) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
TEST(SnapshotCreatorUnknownHandles) {
@@ -3441,8 +3457,9 @@ TEST(SnapshotCreatorUnknownHandles) {
delete[] blob.data;
}
-TEST(SnapshotCreatorIncludeGlobalProxy) {
+UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
@@ -3537,14 +3554,15 @@ TEST(SnapshotCreatorIncludeGlobalProxy) {
v8::Isolate::Scope isolate_scope(isolate);
// We can introduce new extensions, which could override functions already
// in the snapshot.
- v8::Extension* extension = new v8::Extension("new extension",
- "function i() { return 24; }"
- "function j() { return 25; }"
- "try {"
- " if (o.p == 7) o.p++;"
- "} catch {}");
+ auto extension =
+ base::make_unique<v8::Extension>("new extension",
+ "function i() { return 24; }"
+ "function j() { return 25; }"
+ "try {"
+ " if (o.p == 7) o.p++;"
+ "} catch {}");
extension->set_auto_enable(true);
- v8::RegisterExtension(extension);
+ v8::RegisterExtension(std::move(extension));
{
// Create a new context from default context snapshot. This will
// create a new global object from a new global object template
@@ -3630,6 +3648,7 @@ TEST(SnapshotCreatorIncludeGlobalProxy) {
isolate->Dispose();
}
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
@@ -3666,7 +3685,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
{
// Check that no rehashing has been performed.
CHECK_EQ(static_cast<uint64_t>(42),
- reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
+ HashSeed(reinterpret_cast<i::Isolate*>(isolate)));
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -3731,7 +3750,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
{
// Check that rehashing has been performed.
CHECK_EQ(static_cast<uint64_t>(1337),
- reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
+ HashSeed(reinterpret_cast<i::Isolate*>(isolate)));
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -3751,7 +3770,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
FreeCurrentEmbeddedBlob();
}
-TEST(SerializationStats) {
+UNINITIALIZED_TEST(SerializationStats) {
FLAG_profile_deserialization = true;
FLAG_always_opt = false;
v8::StartupData blob = CreateSnapshotDataBlob();
@@ -3766,6 +3785,8 @@ TEST(SerializationStats) {
}
PrintF("Embedded blob is %d bytes\n", embedded_blob_size);
}
+
+ FreeCurrentEmbeddedBlob();
}
void CheckSFIsAreWeak(WeakFixedArray sfis, Isolate* isolate) {
@@ -3784,10 +3805,11 @@ void CheckSFIsAreWeak(WeakFixedArray sfis, Isolate* isolate) {
CHECK_GT(no_of_weak, 0);
}
-TEST(WeakArraySerializizationInSnapshot) {
+UNINITIALIZED_TEST(WeakArraySerializationInSnapshot) {
const char* code = "var my_func = function() { }";
DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
i::FLAG_allow_natives_syntax = true;
v8::StartupData blob;
{
@@ -3829,10 +3851,11 @@ TEST(WeakArraySerializizationInSnapshot) {
// Verify that the pointers in shared_function_infos are weak.
WeakFixedArray sfis =
Script::cast(function->shared()->script())->shared_function_infos();
- CheckSFIsAreWeak(sfis, CcTest::i_isolate());
+ CheckSFIsAreWeak(sfis, reinterpret_cast<i::Isolate*>(isolate));
}
isolate->Dispose();
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
TEST(WeakArraySerializationInCodeCache) {
@@ -3900,5 +3923,27 @@ TEST(CachedCompileFunctionInContext) {
}
}
+UNINITIALIZED_TEST(SnapshotCreatorAnonClassWithKeep) {
+ DisableAlwaysOpt();
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ {
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ CompileRun(
+ "function Foo() { return class {}; } \n"
+ "class Bar extends Foo() {}\n"
+ "Foo()\n");
+ creator.SetDefaultContext(context);
+ }
+ }
+ v8::StartupData blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kKeep);
+
+ delete[] blob.data;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9afde32287..0871992ec6 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -37,6 +37,7 @@
#include "src/api-inl.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/unicode-decoder.h"
@@ -1425,7 +1426,7 @@ TEST(InternalizeExternal) {
Handle<String> string = v8::Utils::OpenHandle(*ext_string);
CHECK(string->IsExternalString());
CHECK(!string->IsInternalizedString());
- CHECK(!i::Heap::InNewSpace(*string));
+ CHECK(!i::Heap::InYoungGeneration(*string));
CHECK_EQ(
isolate->factory()->string_table()->LookupStringIfExists_NoAllocate(
isolate, string->ptr()),
@@ -1433,7 +1434,7 @@ TEST(InternalizeExternal) {
factory->InternalizeName(string);
CHECK(string->IsExternalString());
CHECK(string->IsInternalizedString());
- CHECK(!i::Heap::InNewSpace(*string));
+ CHECK(!i::Heap::InYoungGeneration(*string));
}
CcTest::CollectGarbage(i::OLD_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
@@ -1817,7 +1818,7 @@ TEST(ExternalStringIndexOf) {
->NewStringFromOneByte(Vector<const uint8_t>( \
reinterpret_cast<const uint8_t*>(buf), len)) \
.ToHandleChecked(); \
- CHECK(Heap::InNewSpace(*main_string)); \
+ CHECK(Heap::InYoungGeneration(*main_string)); \
/* Next allocation will cause GC. */ \
heap::SimulateFullSpace(CcTest::i_isolate()->heap()->new_space()); \
/* Offset by two to check substring-ing. */ \
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 72d01c2b7b..074e516220 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -176,10 +176,7 @@ class TerminatorThread : public v8::base::Thread {
v8::Isolate* isolate_;
};
-
-// Test that a single thread of JavaScript execution can be terminated
-// from the side by another thread.
-TEST(TerminateOnlyV8ThreadFromOtherThread) {
+void TestTerminatingSlowOperation(const char* source) {
semaphore = new v8::base::Semaphore(0);
TerminatorThread thread(CcTest::i_isolate());
thread.Start();
@@ -191,40 +188,55 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
v8::Context::New(CcTest::isolate(), nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!CcTest::isolate()->IsExecutionTerminating());
- // Run a loop that will be infinite if thread termination does not work.
v8::MaybeLocal<v8::Value> result =
- CompileRun(CcTest::isolate()->GetCurrentContext(),
- "try { loop(); fail(); } catch(e) { fail(); }");
+ CompileRun(CcTest::isolate()->GetCurrentContext(), source);
CHECK(result.IsEmpty());
thread.Join();
delete semaphore;
semaphore = nullptr;
}
+// Test that a single thread of JavaScript execution can be terminated
+// from the side by another thread.
+TEST(TerminateOnlyV8ThreadFromOtherThread) {
+ // Run a loop that will be infinite if thread termination does not work.
+ TestTerminatingSlowOperation("try { loop(); fail(); } catch(e) { fail(); }");
+}
+
// Test that execution can be terminated from within JSON.stringify.
TEST(TerminateJsonStringify) {
- semaphore = new v8::base::Semaphore(0);
- TerminatorThread thread(CcTest::i_isolate());
- thread.Start();
+ TestTerminatingSlowOperation(
+ "var x = [];"
+ "x[2**31]=1;"
+ "terminate();"
+ "JSON.stringify(x);"
+ "fail();");
+}
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::ObjectTemplate> global =
- CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
- v8::Local<v8::Context> context =
- v8::Context::New(CcTest::isolate(), nullptr, global);
- v8::Context::Scope context_scope(context);
- CHECK(!CcTest::isolate()->IsExecutionTerminating());
- v8::MaybeLocal<v8::Value> result =
- CompileRun(CcTest::isolate()->GetCurrentContext(),
- "var x = [];"
- "x[2**31]=1;"
- "terminate();"
- "JSON.stringify(x);"
- "fail();");
- CHECK(result.IsEmpty());
- thread.Join();
- delete semaphore;
- semaphore = nullptr;
+TEST(TerminateBigIntMultiplication) {
+ TestTerminatingSlowOperation(
+ "terminate();"
+ "var a = 5n ** 555555n;"
+ "var b = 3n ** 3333333n;"
+ "a * b;"
+ "fail();");
+}
+
+TEST(TerminateBigIntDivision) {
+ TestTerminatingSlowOperation(
+ "var a = 2n ** 2222222n;"
+ "var b = 3n ** 333333n;"
+ "terminate();"
+ "a / b;"
+ "fail();");
+}
+
+TEST(TerminateBigIntToString) {
+ TestTerminatingSlowOperation(
+ "var a = 2n ** 2222222n;"
+ "terminate();"
+ "a.toString();"
+ "fail();");
}
int call_count = 0;
@@ -854,26 +866,26 @@ class TerminatorSleeperThread : public v8::base::Thread {
};
TEST(TerminateRegExp) {
-// regexp interpreter does not support preemption.
-#ifndef V8_INTERPRETED_REGEXP
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
- isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
- v8::Context::Scope context_scope(context);
- CHECK(!isolate->IsExecutionTerminating());
- v8::TryCatch try_catch(isolate);
- CHECK(!isolate->IsExecutionTerminating());
- CHECK(!CompileRun("var re = /(x+)+y$/; re.test('x');").IsEmpty());
- TerminatorSleeperThread terminator(isolate, 100);
- terminator.Start();
- CHECK(CompileRun("re.test('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); fail();")
- .IsEmpty());
- CHECK(try_catch.HasCaught());
- CHECK(isolate->IsExecutionTerminating());
-#endif // V8_INTERPRETED_REGEXP
+ // The regexp interpreter does not support preemption.
+ if (!i::FLAG_regexp_interpret_all) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, DoLoopCancelTerminate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::TryCatch try_catch(isolate);
+ CHECK(!isolate->IsExecutionTerminating());
+ CHECK(!CompileRun("var re = /(x+)+y$/; re.test('x');").IsEmpty());
+ TerminatorSleeperThread terminator(isolate, 100);
+ terminator.Start();
+ CHECK(CompileRun("re.test('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); fail();")
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(isolate->IsExecutionTerminating());
+ }
}
TEST(TerminateInMicrotask) {
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index f73641d9cf..ed04a247ab 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -234,26 +234,6 @@ TEST(TestEventWithId) {
CHECK_EQ(event_id, GET_TRACE_OBJECT(1)->id);
}
-TEST(TestEventInContext) {
- MockTracingPlatform platform;
-
- static uint64_t isolate_id = 0x20151021;
- {
- TRACE_EVENT_SCOPED_CONTEXT("v8-cat", "Isolate", isolate_id);
- TRACE_EVENT0("v8-cat", "e");
- }
-
- CHECK_EQ(3, GET_TRACE_OBJECTS_LIST->size());
- CHECK_EQ(TRACE_EVENT_PHASE_ENTER_CONTEXT, GET_TRACE_OBJECT(0)->phase);
- CHECK_EQ("Isolate", GET_TRACE_OBJECT(0)->name);
- CHECK_EQ(isolate_id, GET_TRACE_OBJECT(0)->id);
- CHECK_EQ(TRACE_EVENT_PHASE_COMPLETE, GET_TRACE_OBJECT(1)->phase);
- CHECK_EQ("e", GET_TRACE_OBJECT(1)->name);
- CHECK_EQ(TRACE_EVENT_PHASE_LEAVE_CONTEXT, GET_TRACE_OBJECT(2)->phase);
- CHECK_EQ("Isolate", GET_TRACE_OBJECT(2)->name);
- CHECK_EQ(isolate_id, GET_TRACE_OBJECT(2)->id);
-}
-
TEST(TestEventWithTimestamp) {
MockTracingPlatform platform;
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index c4b7451147..c43d7ba9b7 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -13,7 +13,7 @@
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/objects-inl.h"
-#include "src/transitions.h"
+#include "src/transitions-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-transitions.h"
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index c8d5e37fa2..73257cc057 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -10,7 +10,7 @@
#include "src/isolate.h"
#include "src/objects.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/types-fuzz.h"
+#include "test/common/types-fuzz.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 0d1ebe6759..c202140f42 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -15,6 +15,7 @@
#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/ic/ic.h"
@@ -1114,7 +1115,7 @@ TEST(DoScavenge) {
// Construct a double value that looks like a pointer to the new space object
// and store it into the obj.
- Address fake_object = temp->ptr() + kPointerSize;
+ Address fake_object = temp->ptr() + kSystemPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
@@ -1261,11 +1262,11 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset(), instance_size,
&end_of_region_offset));
CHECK_GT(end_of_region_offset, 0);
- CHECK_EQ(end_of_region_offset % kPointerSize, 0);
+ CHECK_EQ(end_of_region_offset % kTaggedSize, 0);
CHECK(end_of_region_offset <= instance_size);
for (int offset = index.offset(); offset < end_of_region_offset;
- offset += kPointerSize) {
+ offset += kTaggedSize) {
CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
}
if (end_of_region_offset < instance_size) {
@@ -1275,7 +1276,7 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
}
}
- for (int offset = 0; offset < JSObject::kHeaderSize; offset += kPointerSize) {
+ for (int offset = 0; offset < JSObject::kHeaderSize; offset += kTaggedSize) {
// Header queries
CHECK(helper.IsTagged(offset));
int end_of_region_offset;
@@ -1454,7 +1455,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
obj_value = factory->NewHeapNumber(0.);
}
- CHECK(Heap::InNewSpace(*obj_value));
+ CHECK(Heap::InYoungGeneration(*obj_value));
{
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
@@ -1468,7 +1469,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// |boom_value| to the slot that was earlier recorded by write barrier.
JSObject::MigrateToMap(obj, new_map);
- Address fake_object = obj_value->ptr() + kPointerSize;
+ Address fake_object = obj_value->ptr() + kTaggedSize;
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
FieldIndex double_field_index =
diff --git a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
index de0976825d..d98e2739d5 100644
--- a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
+++ b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
@@ -28,8 +28,6 @@ TEST(Unscopables) {
v8::Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
v8::Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
- t1->SetHiddenPrototype(true);
-
v8::Local<v8::Object> object = t0->GetFunction(current_context)
.ToLocalChecked()
->NewInstance(current_context)
diff --git a/deps/v8/test/cctest/test-unwinder.cc b/deps/v8/test/cctest/test-unwinder.cc
index 26b7c99b2f..63ce17d57c 100644
--- a/deps/v8/test/cctest/test-unwinder.cc
+++ b/deps/v8/test/cctest/test-unwinder.cc
@@ -6,6 +6,7 @@
#include "src/api-inl.h"
#include "src/builtins/builtins.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/objects/code-inl.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index b6fe4e7597..4ef3766473 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -29,6 +29,7 @@
#include "src/global-handles.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -173,7 +174,7 @@ TEST(Regress2060a) {
HandleScope scope(isolate);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
- CHECK(!Heap::InNewSpace(*object));
+ CHECK(!Heap::InYoungGeneration(*object));
CHECK(!first_page->Contains(object->address()));
int32_t hash = key->GetOrCreateHash(isolate)->value();
JSWeakCollection::Set(weakmap, key, object, hash);
@@ -211,7 +212,7 @@ TEST(Regress2060b) {
Handle<JSObject> keys[32];
for (int i = 0; i < 32; i++) {
keys[i] = factory->NewJSObject(function, TENURED);
- CHECK(!Heap::InNewSpace(*keys[i]));
+ CHECK(!Heap::InYoungGeneration(*keys[i]));
CHECK(!first_page->Contains(keys[i]->address()));
}
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
@@ -247,6 +248,39 @@ TEST(Regress399527) {
CcTest::CollectAllGarbage();
}
+TEST(WeakMapsWithChainedEntries) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+
+ const int initial_gc_count = i_isolate->heap()->gc_count();
+ Handle<JSWeakMap> weakmap1 = i_isolate->factory()->NewJSWeakMap();
+ Handle<JSWeakMap> weakmap2 = i_isolate->factory()->NewJSWeakMap();
+ v8::Global<v8::Object> g1;
+ v8::Global<v8::Object> g2;
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o1 = v8::Object::New(isolate);
+ g1.Reset(isolate, o1);
+ g1.SetWeak();
+ v8::Local<v8::Object> o2 = v8::Object::New(isolate);
+ g2.Reset(isolate, o2);
+ g2.SetWeak();
+ Handle<Object> i_o1 = v8::Utils::OpenHandle(*o1);
+ Handle<Object> i_o2 = v8::Utils::OpenHandle(*o2);
+ int32_t hash1 = i_o1->GetOrCreateHash(i_isolate)->value();
+ int32_t hash2 = i_o2->GetOrCreateHash(i_isolate)->value();
+ JSWeakCollection::Set(weakmap1, i_o1, i_o2, hash1);
+ JSWeakCollection::Set(weakmap2, i_o2, i_o1, hash2);
+ }
+ CcTest::CollectGarbage(OLD_SPACE);
+ CHECK(g1.IsEmpty());
+ CHECK(g2.IsEmpty());
+ CHECK_EQ(1, i_isolate->heap()->gc_count() - initial_gc_count);
+}
+
} // namespace test_weakmaps
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 763a809f87..36c9e339b8 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -29,6 +29,7 @@
#include "src/global-handles.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -184,7 +185,7 @@ TEST(WeakSet_Regress2060a) {
HandleScope scope(isolate);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
- CHECK(!Heap::InNewSpace(*object));
+ CHECK(!Heap::InYoungGeneration(*object));
CHECK(!first_page->Contains(object->address()));
int32_t hash = key->GetOrCreateHash(isolate)->value();
JSWeakCollection::Set(weakset, key, object, hash);
@@ -222,7 +223,7 @@ TEST(WeakSet_Regress2060b) {
Handle<JSObject> keys[32];
for (int i = 0; i < 32; i++) {
keys[i] = factory->NewJSObject(function, TENURED);
- CHECK(!Heap::InNewSpace(*keys[i]));
+ CHECK(!Heap::InYoungGeneration(*keys[i]));
CHECK(!first_page->Contains(keys[i]->address()));
}
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 562f44098a..5c09dedbd2 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -36,8 +36,8 @@ from testrunner.objects import testcase
SHELL = 'cctest'
-class TestSuite(testsuite.TestSuite):
- def ListTests(self):
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
shell = os.path.abspath(os.path.join(self.test_config.shell_dir, SHELL))
if utils.IsWindows():
shell += ".exe"
@@ -46,14 +46,19 @@ class TestSuite(testsuite.TestSuite):
shell=shell,
args=["--list"] + self.test_config.extra_flags)
output = cmd.execute()
+ # TODO make errors visible (see duplicated code in 'unittests')
if output.exit_code != 0:
print cmd
print output.stdout
print output.stderr
return []
- tests = map(self._create_test, output.stdout.strip().split())
- tests.sort(key=lambda t: t.path)
- return tests
+
+ return sorted(output.stdout.strip().split())
+
+
+class TestSuite(testsuite.TestSuite):
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 0941795bd6..067bccea02 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -275,6 +275,22 @@ TEST(TestGenericOverload) {
ft.Call();
}
+TEST(TestEquality) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestEquality(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
TEST(TestLogicalOperators) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
@@ -423,6 +439,22 @@ TEST(TestStructConstructor) {
ft.Call();
}
+TEST(TestInternalClass) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestInternalClass(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/unicode-helpers.cc b/deps/v8/test/cctest/unicode-helpers.cc
index 524e5936fc..1a74e0ca94 100644
--- a/deps/v8/test/cctest/unicode-helpers.cc
+++ b/deps/v8/test/cctest/unicode-helpers.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "test/cctest/unicode-helpers.h"
+#include "src/unicode-inl.h"
int Ucs2CharLength(unibrow::uchar c) {
if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
@@ -19,10 +20,9 @@ int Utf8LengthHelper(const char* s) {
unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
int length = 0;
- size_t i = 0;
- while (s[i] != '\0') {
- unibrow::uchar tmp =
- unibrow::Utf8::ValueOfIncremental(s[i], &i, &state, &buffer);
+ const uint8_t* c = reinterpret_cast<const uint8_t*>(s);
+ while (*c != '\0') {
+ unibrow::uchar tmp = unibrow::Utf8::ValueOfIncremental(&c, &state, &buffer);
length += Ucs2CharLength(tmp);
}
unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&state);
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 6c246bc48c..fc711aa7a7 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -109,7 +109,7 @@ TEST(TestCWasmEntryArgPassing_int32) {
return base::AddWithWraparound(base::MulWithWraparound(2, a), 1);
});
- FOR_INT32_INPUTS(v) { tester.CheckCall(*v); }
+ FOR_INT32_INPUTS(v) { tester.CheckCall(v); }
}
// Pass int64_t, return double.
@@ -119,7 +119,7 @@ TEST(TestCWasmEntryArgPassing_double_int64) {
WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0))},
[](int64_t a) { return static_cast<double>(a); });
- FOR_INT64_INPUTS(v) { tester.CheckCall(*v); }
+ FOR_INT64_INPUTS(v) { tester.CheckCall(v); }
}
// Pass double, return int64_t.
@@ -129,7 +129,7 @@ TEST(TestCWasmEntryArgPassing_int64_double) {
WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0))},
[](double d) { return static_cast<int64_t>(d); });
- FOR_INT64_INPUTS(i) { tester.CheckCall(*i); }
+ FOR_INT64_INPUTS(i) { tester.CheckCall(i); }
}
// Pass float, return double.
@@ -141,7 +141,7 @@ TEST(TestCWasmEntryArgPassing_float_double) {
WASM_F64(1))},
[](float f) { return 2. * static_cast<double>(f) + 1.; });
- FOR_FLOAT32_INPUTS(f) { tester.CheckCall(*f); }
+ FOR_FLOAT32_INPUTS(f) { tester.CheckCall(f); }
}
// Pass two doubles, return double.
@@ -152,7 +152,7 @@ TEST(TestCWasmEntryArgPassing_double_double) {
[](double a, double b) { return a + b; });
FOR_FLOAT64_INPUTS(d1) {
- FOR_FLOAT64_INPUTS(d2) { tester.CheckCall(*d1, *d2); }
+ FOR_FLOAT64_INPUTS(d2) { tester.CheckCall(d1, d2); }
}
}
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index d8d9e0412e..1472196504 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -24,10 +24,6 @@ namespace wasm {
#define __ masm.
-// TODO(v8:7424,v8:8018): Extend this test to all architectures.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_ARM64
-
namespace {
static volatile int global_stop_bit = 0;
@@ -109,6 +105,30 @@ Address GenerateJumpTableThunk(
__ Tbnz(scratch, 0, &exit);
__ Mov(scratch, Immediate(jump_target, RelocInfo::NONE));
__ Br(scratch);
+#elif V8_TARGET_ARCH_PPC64
+ __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ LoadP(scratch, MemOperand(scratch));
+ __ cmpi(scratch, Operand::Zero());
+ __ bne(&exit);
+ __ mov(scratch, Operand(jump_target, RelocInfo::NONE));
+ __ Jump(scratch);
+#elif V8_TARGET_ARCH_S390X
+ __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ LoadP(scratch, MemOperand(scratch));
+ __ CmpP(scratch, Operand(0));
+ __ bne(&exit);
+ __ mov(scratch, Operand(jump_target, RelocInfo::NONE));
+ __ Jump(scratch);
+#elif V8_TARGET_ARCH_MIPS64
+ __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ Lw(scratch, MemOperand(scratch, 0));
+ __ Branch(&exit, ne, scratch, Operand(zero_reg));
+ __ Jump(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_MIPS
+ __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ lw(scratch, MemOperand(scratch, 0));
+ __ Branch(&exit, ne, scratch, Operand(zero_reg));
+ __ Jump(jump_target, RelocInfo::NONE);
#else
#error Unsupported architecture
#endif
@@ -236,9 +256,6 @@ TEST(JumpTablePatchingStress) {
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM ||
- // V8_TARGET_ARCH_ARM64
-
#undef __
#undef TRACE
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index cca0c1b061..dfce94b20c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -32,9 +32,9 @@ WASM_EXEC_TEST(I64Const) {
WASM_EXEC_TEST(I64Const_many) {
int cntr = 0;
- FOR_INT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i) {
WasmRunner<int64_t> r(execution_tier);
- const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
+ const int64_t kExpectedValue = (static_cast<uint64_t>(i) << 32) | cntr;
// return(kExpectedValue)
BUILD(r, WASM_I64V(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
@@ -47,7 +47,7 @@ WASM_EXEC_TEST(Return_I64) {
BUILD(r, WASM_RETURN1(WASM_GET_LOCAL(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(I64Add) {
@@ -55,7 +55,7 @@ WASM_EXEC_TEST(I64Add) {
BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(base::AddWithWraparound(*i, *j), r.Call(*i, *j));
+ CHECK_EQ(base::AddWithWraparound(i, j), r.Call(i, j));
}
}
}
@@ -79,7 +79,7 @@ WASM_EXEC_TEST(I64Sub) {
BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(base::SubWithWraparound(*i, *j), r.Call(*i, *j));
+ CHECK_EQ(base::SubWithWraparound(i, j), r.Call(i, j));
}
}
}
@@ -99,8 +99,8 @@ WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(base::AddWithWraparound(*i, *j)),
- r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::AddWithWraparound(i, j)),
+ r.Call(i, j));
}
}
}
@@ -111,8 +111,8 @@ WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(base::SubWithWraparound(*i, *j)),
- r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::SubWithWraparound(i, j)),
+ r.Call(i, j));
}
}
}
@@ -123,8 +123,8 @@ WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(base::MulWithWraparound(*i, *j)),
- r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::MulWithWraparound(i, j)),
+ r.Call(i, j));
}
}
}
@@ -135,8 +135,8 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(base::ShlWithWraparound(*i, *j));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>(base::ShlWithWraparound(i, j));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -147,8 +147,8 @@ WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3F));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>((i) >> (j & 0x3F));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -159,8 +159,8 @@ WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3F));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>((i) >> (j & 0x3F));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -170,12 +170,12 @@ WASM_EXEC_TEST(I64DivS) {
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- if (*j == 0) {
- CHECK_TRAP64(r.Call(*i, *j));
- } else if (*j == -1 && *i == std::numeric_limits<int64_t>::min()) {
- CHECK_TRAP64(r.Call(*i, *j));
+ if (j == 0) {
+ CHECK_TRAP64(r.Call(i, j));
+ } else if (j == -1 && i == std::numeric_limits<int64_t>::min()) {
+ CHECK_TRAP64(r.Call(i, j));
} else {
- CHECK_EQ(*i / *j, r.Call(*i, *j));
+ CHECK_EQ(i / j, r.Call(i, j));
}
}
}
@@ -210,10 +210,10 @@ WASM_EXEC_TEST(I64DivU) {
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- if (*j == 0) {
- CHECK_TRAP64(r.Call(*i, *j));
+ if (j == 0) {
+ CHECK_TRAP64(r.Call(i, j));
} else {
- CHECK_EQ(*i / *j, r.Call(*i, *j));
+ CHECK_EQ(i / j, r.Call(i, j));
}
}
}
@@ -248,10 +248,10 @@ WASM_EXEC_TEST(I64RemS) {
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- if (*j == 0) {
- CHECK_TRAP64(r.Call(*i, *j));
+ if (j == 0) {
+ CHECK_TRAP64(r.Call(i, j));
} else {
- CHECK_EQ(*i % *j, r.Call(*i, *j));
+ CHECK_EQ(i % j, r.Call(i, j));
}
}
}
@@ -272,10 +272,10 @@ WASM_EXEC_TEST(I64RemU) {
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- if (*j == 0) {
- CHECK_TRAP64(r.Call(*i, *j));
+ if (j == 0) {
+ CHECK_TRAP64(r.Call(i, j));
} else {
- CHECK_EQ(*i % *j, r.Call(*i, *j));
+ CHECK_EQ(i % j, r.Call(i, j));
}
}
}
@@ -294,7 +294,7 @@ WASM_EXEC_TEST(I64And) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ((i) & (j), r.Call(i, j)); }
}
}
@@ -302,7 +302,7 @@ WASM_EXEC_TEST(I64Ior) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ((*i) | (*j), r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ((i) | (j), r.Call(i, j)); }
}
}
@@ -310,7 +310,7 @@ WASM_EXEC_TEST(I64Xor) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ((*i) ^ (*j), r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ((i) ^ (j), r.Call(i, j)); }
}
}
@@ -321,30 +321,30 @@ WASM_EXEC_TEST(I64Shl) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- uint64_t expected = (*i) << (*j & 0x3F);
- CHECK_EQ(expected, r.Call(*i, *j));
+ uint64_t expected = (i) << (j & 0x3F);
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 0, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 0, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 32, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 32, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 20, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 20, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 40, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i << 40, r.Call(i)); }
}
}
@@ -355,30 +355,30 @@ WASM_EXEC_TEST(I64ShrU) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- uint64_t expected = (*i) >> (*j & 0x3F);
- CHECK_EQ(expected, r.Call(*i, *j));
+ uint64_t expected = (i) >> (j & 0x3F);
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 0, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 32, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 20, r.Call(i)); }
}
{
WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(i >> 40, r.Call(i)); }
}
}
@@ -389,30 +389,30 @@ WASM_EXEC_TEST(I64ShrS) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int64_t expected = (*i) >> (*j & 0x3F);
- CHECK_EQ(expected, r.Call(*i, *j));
+ int64_t expected = (i) >> (j & 0x3F);
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 0, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 32, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 20, r.Call(i)); }
}
{
WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i >> 40, r.Call(i)); }
}
}
@@ -420,7 +420,7 @@ WASM_EXEC_TEST(I64Eq) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i == *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i == j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -428,7 +428,7 @@ WASM_EXEC_TEST(I64Ne) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i != *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i != j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -436,7 +436,7 @@ WASM_EXEC_TEST(I64LtS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i < j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -444,7 +444,7 @@ WASM_EXEC_TEST(I64LeS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i <= j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -452,7 +452,7 @@ WASM_EXEC_TEST(I64LtU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(i < j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -460,7 +460,7 @@ WASM_EXEC_TEST(I64LeU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(i <= j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -468,7 +468,7 @@ WASM_EXEC_TEST(I64GtS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i > j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -476,7 +476,7 @@ WASM_EXEC_TEST(I64GeS) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i >= j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -484,7 +484,7 @@ WASM_EXEC_TEST(I64GtU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(i > j ? 1 : 0, r.Call(i, j)); }
}
}
@@ -492,28 +492,28 @@ WASM_EXEC_TEST(I64GeU) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(i >= j ? 1 : 0, r.Call(i, j)); }
}
}
WASM_EXEC_TEST(I32ConvertI64) {
FOR_INT64_INPUTS(i) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(*i)));
- CHECK_EQ(static_cast<int32_t>(*i), r.Call());
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(i)));
+ CHECK_EQ(static_cast<int32_t>(i), r.Call());
}
}
WASM_EXEC_TEST(I64SConvertI32) {
WasmRunner<int64_t, int32_t> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(i), r.Call(i)); }
}
WASM_EXEC_TEST(I64UConvertI32) {
WasmRunner<int64_t, uint32_t> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(i), r.Call(i)); }
}
WASM_EXEC_TEST(I64Popcnt) {
@@ -536,7 +536,7 @@ WASM_EXEC_TEST(I64Popcnt) {
WASM_EXEC_TEST(F32SConvertI64) {
WasmRunner<float, int64_t> r(execution_tier);
BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(*i), r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32UConvertI64) {
@@ -629,7 +629,7 @@ WASM_EXEC_TEST(F32UConvertI64) {
WASM_EXEC_TEST(F64SConvertI64) {
WasmRunner<double, int64_t> r(execution_tier);
BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), r.Call(*i)); }
+ FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64UConvertI64) {
@@ -723,11 +723,11 @@ WASM_EXEC_TEST(I64SConvertF32) {
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ if (i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ CHECK_EQ(static_cast<int64_t>(i), r.Call(i));
} else {
- CHECK_TRAP64(r.Call(*i));
+ CHECK_TRAP64(r.Call(i));
}
}
}
@@ -738,17 +738,17 @@ WASM_EXEC_TEST(I64SConvertSatF32) {
BUILD(r, WASM_I64_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
int64_t expected;
- if (*i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
- expected = static_cast<int64_t>(*i);
- } else if (std::isnan(*i)) {
+ if (i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ expected = static_cast<int64_t>(i);
+ } else if (std::isnan(i)) {
expected = static_cast<int64_t>(0);
- } else if (*i < 0.0) {
+ } else if (i < 0.0) {
expected = std::numeric_limits<int64_t>::min();
} else {
expected = std::numeric_limits<int64_t>::max();
}
- int64_t found = r.Call(*i);
+ int64_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -758,11 +758,11 @@ WASM_EXEC_TEST(I64SConvertF64) {
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- *i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ if (i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ CHECK_EQ(static_cast<int64_t>(i), r.Call(i));
} else {
- CHECK_TRAP64(r.Call(*i));
+ CHECK_TRAP64(r.Call(i));
}
}
}
@@ -773,17 +773,17 @@ WASM_EXEC_TEST(I64SConvertSatF64) {
BUILD(r, WASM_I64_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
- if (*i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- *i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
- expected = static_cast<int64_t>(*i);
- } else if (std::isnan(*i)) {
+ if (i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ expected = static_cast<int64_t>(i);
+ } else if (std::isnan(i)) {
expected = static_cast<int64_t>(0);
- } else if (*i < 0.0) {
+ } else if (i < 0.0) {
expected = std::numeric_limits<int64_t>::min();
} else {
expected = std::numeric_limits<int64_t>::max();
}
- int64_t found = r.Call(*i);
+ int64_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -793,11 +793,11 @@ WASM_EXEC_TEST(I64UConvertF32) {
BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(i), r.Call(i));
} else {
- CHECK_TRAP64(r.Call(*i));
+ CHECK_TRAP64(r.Call(i));
}
}
}
@@ -808,17 +808,17 @@ WASM_EXEC_TEST(I64UConvertSatF32) {
BUILD(r, WASM_I64_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
uint64_t expected;
- if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- *i > -1) {
- expected = static_cast<uint64_t>(*i);
- } else if (std::isnan(*i)) {
+ if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ i > -1) {
+ expected = static_cast<uint64_t>(i);
+ } else if (std::isnan(i)) {
expected = static_cast<uint64_t>(0);
- } else if (*i < 0.0) {
+ } else if (i < 0.0) {
expected = std::numeric_limits<uint64_t>::min();
} else {
expected = std::numeric_limits<uint64_t>::max();
}
- uint64_t found = r.Call(*i);
+ uint64_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -828,11 +828,11 @@ WASM_EXEC_TEST(I64UConvertF64) {
BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(i), r.Call(i));
} else {
- CHECK_TRAP64(r.Call(*i));
+ CHECK_TRAP64(r.Call(i));
}
}
}
@@ -843,17 +843,17 @@ WASM_EXEC_TEST(I64UConvertSatF64) {
BUILD(r, WASM_I64_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
- if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- *i > -1) {
- expected = static_cast<uint64_t>(*i);
- } else if (std::isnan(*i)) {
+ if (i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ i > -1) {
+ expected = static_cast<uint64_t>(i);
+ } else if (std::isnan(i)) {
expected = static_cast<uint64_t>(0);
- } else if (*i < 0.0) {
+ } else if (i < 0.0) {
expected = std::numeric_limits<uint64_t>::min();
} else {
expected = std::numeric_limits<uint64_t>::max();
}
- int64_t found = r.Call(*i);
+ int64_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -1115,50 +1115,50 @@ WASM_EXEC_TEST(I64Popcnt2) {
// Test the WasmRunner with an Int64 return value and different numbers of
// Int64 parameters.
WASM_EXEC_TEST(I64WasmRunner) {
- {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r(execution_tier);
- BUILD(r, WASM_I64V(*i));
- CHECK_EQ(*i, r.Call());
-}
-}
-{
- WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_GET_LOCAL(0));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
-}
-{
- WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i ^ *j, r.Call(*i, *j)); }
+ WasmRunner<int64_t> r(execution_tier);
+ BUILD(r, WASM_I64V(i));
+ CHECK_EQ(i, r.Call());
}
-}
-{
- WasmRunner<int64_t, int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
- WASM_I64_XOR(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i ^ *j ^ *j, r.Call(*i, *j, *j));
- CHECK_EQ(*j ^ *i ^ *j, r.Call(*j, *i, *j));
- CHECK_EQ(*j ^ *j ^ *i, r.Call(*j, *j, *i));
+ {
+ WasmRunner<int64_t, int64_t> r(execution_tier);
+ BUILD(r, WASM_GET_LOCAL(0));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
+ }
+ {
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(i ^ j, r.Call(i, j)); }
}
}
-}
-{
- WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
- WASM_I64_XOR(WASM_GET_LOCAL(1),
- WASM_I64_XOR(WASM_GET_LOCAL(2),
- WASM_GET_LOCAL(3)))));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i ^ *j ^ *j ^ *j, r.Call(*i, *j, *j, *j));
- CHECK_EQ(*j ^ *i ^ *j ^ *j, r.Call(*j, *i, *j, *j));
- CHECK_EQ(*j ^ *j ^ *i ^ *j, r.Call(*j, *j, *i, *j));
- CHECK_EQ(*j ^ *j ^ *j ^ *i, r.Call(*j, *j, *j, *i));
+ {
+ WasmRunner<int64_t, int64_t, int64_t, int64_t> r(execution_tier);
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
+ WASM_I64_XOR(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(i ^ j ^ j, r.Call(i, j, j));
+ CHECK_EQ(j ^ i ^ j, r.Call(j, i, j));
+ CHECK_EQ(j ^ j ^ i, r.Call(j, j, i));
+ }
+ }
+ }
+ {
+ WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
+ WASM_I64_XOR(WASM_GET_LOCAL(1),
+ WASM_I64_XOR(WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(i ^ j ^ j ^ j, r.Call(i, j, j, j));
+ CHECK_EQ(j ^ i ^ j ^ j, r.Call(j, i, j, j));
+ CHECK_EQ(j ^ j ^ i ^ j, r.Call(j, j, i, j));
+ CHECK_EQ(j ^ j ^ j ^ i, r.Call(j, j, j, i));
+ }
}
}
-}
}
WASM_EXEC_TEST(Call_Int64Sub) {
@@ -1172,16 +1172,9 @@ WASM_EXEC_TEST(Call_Int64Sub) {
BUILD(r, WASM_CALL_FUNCTION(t.function_index(), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1)));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int64_t a = static_cast<int64_t>(*i) << 32 |
- (static_cast<int64_t>(*j) | 0xFFFFFFFF);
- int64_t b = static_cast<int64_t>(*j) << 32 |
- (static_cast<int64_t>(*i) | 0xFFFFFFFF);
-
- int64_t expected = static_cast<int64_t>(static_cast<uint64_t>(a) -
- static_cast<uint64_t>(b));
- CHECK_EQ(expected, r.Call(a, b));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(base::SubWithWraparound(i, j), r.Call(i, j));
}
}
}
@@ -1217,7 +1210,8 @@ WASM_EXEC_TEST(LoadStoreI64_sx) {
r.builder().BlankMemory();
memory[size - 1] = static_cast<byte>(i); // set the high order byte.
- int64_t expected = static_cast<int64_t>(i) << ((size - 1) * 8);
+ int64_t expected = static_cast<uint64_t>(static_cast<int64_t>(i))
+ << ((size - 1) * 8);
CHECK_EQ(expected, r.Call());
CHECK_EQ(static_cast<byte>(i), memory[8 + size - 1]);
@@ -1238,8 +1232,8 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
FOR_INT32_INPUTS(i) {
- int64_t expected = base::MulWithWraparound(static_cast<int64_t>(*i),
- int64_t{0x300010001L});
+ int64_t expected =
+ base::MulWithWraparound(static_cast<int64_t>(i), int64_t{0x300010001L});
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -1264,8 +1258,8 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
- int64_t expected = base::MulWithWraparound(static_cast<int64_t>(*i),
- int64_t{0x300010001L});
+ int64_t expected =
+ base::MulWithWraparound(static_cast<int64_t>(i), int64_t{0x300010001L});
CHECK_EQ(expected, r.Call(expected));
CHECK_EQ(expected, r.builder().ReadMemory<int64_t>(&memory[0]));
}
@@ -1380,8 +1374,8 @@ WASM_EXEC_TEST(I64Eqz) {
BUILD(r, WASM_I64_EQZ(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) {
- int32_t result = *i == 0 ? 1 : 0;
- CHECK_EQ(result, r.Call(*i));
+ int32_t result = i == 0 ? 1 : 0;
+ CHECK_EQ(result, r.Call(i));
}
}
@@ -1391,8 +1385,8 @@ WASM_EXEC_TEST(I64Ror) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = base::bits::RotateRight64(*i, *j & 0x3F);
- CHECK_EQ(expected, r.Call(*i, *j));
+ int64_t expected = base::bits::RotateRight64(i, j & 0x3F);
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -1403,8 +1397,8 @@ WASM_EXEC_TEST(I64Rol) {
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
- int64_t expected = base::bits::RotateLeft64(*i, *j & 0x3F);
- CHECK_EQ(expected, r.Call(*i, *j));
+ int64_t expected = base::bits::RotateLeft64(i, j & 0x3F);
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index fc9e395d44..ea45762a7c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -73,8 +73,8 @@ WASM_EXEC_TEST(I32AsmjsSConvertF32) {
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- int32_t expected = DoubleToInt32(*i);
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = DoubleToInt32(i);
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -84,8 +84,8 @@ WASM_EXEC_TEST(I32AsmjsSConvertF64) {
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- int32_t expected = DoubleToInt32(*i);
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = DoubleToInt32(i);
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -95,8 +95,8 @@ WASM_EXEC_TEST(I32AsmjsUConvertF32) {
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- uint32_t expected = DoubleToUint32(*i);
- CHECK_EQ(expected, r.Call(*i));
+ uint32_t expected = DoubleToUint32(i);
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -106,8 +106,8 @@ WASM_EXEC_TEST(I32AsmjsUConvertF64) {
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- uint32_t expected = DoubleToUint32(*i);
- CHECK_EQ(expected, r.Call(*i));
+ uint32_t expected = DoubleToUint32(i);
+ CHECK_EQ(expected, r.Call(i));
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 8eddaa0224..7895b192f8 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -22,11 +22,11 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t initial = *i;
+ uint32_t initial = i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint32_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint32_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -51,11 +51,11 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t initial = *i;
+ uint16_t initial = i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint16_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint16_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -79,11 +79,11 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t initial = *i;
+ uint8_t initial = i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint8_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint8_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -107,11 +107,11 @@ WASM_EXEC_TEST(I32AtomicCompareExchange) {
WASM_GET_LOCAL(1), MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t initial = *i;
+ uint32_t initial = i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint32_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint32_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -129,11 +129,11 @@ WASM_EXEC_TEST(I32AtomicCompareExchange16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t initial = *i;
+ uint16_t initial = i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint16_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint16_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -150,16 +150,38 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t initial = *i;
+ uint8_t initial = i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint8_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint8_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
+WASM_EXEC_TEST(I32AtomicCompareExchange_fail) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint32_t* memory =
+ r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
+ BUILD(r, WASM_ATOMICS_TERNARY_OP(
+ kExprI32AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), MachineRepresentation::kWord32));
+
+ // The original value at the memory location.
+ uint32_t old_val = 4;
+ // The value we use as the expected value for the compare-exchange so that it
+ // fails.
+ uint32_t expected = 6;
+ // The new value for the compare-exchange.
+ uint32_t new_val = 5;
+
+ r.builder().WriteMemory(&memory[0], old_val);
+ CHECK_EQ(old_val, r.Call(expected, new_val));
+}
+
WASM_EXEC_TEST(I32AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
@@ -170,7 +192,7 @@ WASM_EXEC_TEST(I32AtomicLoad) {
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = *i;
+ uint32_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -186,7 +208,7 @@ WASM_EXEC_TEST(I32AtomicLoad16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t expected = *i;
+ uint16_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -201,7 +223,7 @@ WASM_EXEC_TEST(I32AtomicLoad8U) {
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t expected = *i;
+ uint8_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -221,8 +243,8 @@ WASM_EXEC_TEST(I32AtomicStoreLoad) {
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
+ uint32_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -242,8 +264,8 @@ WASM_EXEC_TEST(I32AtomicStoreLoad16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
+ uint16_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -261,9 +283,9 @@ WASM_EXEC_TEST(I32AtomicStoreLoad8U) {
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
- CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
+ uint8_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
+ CHECK_EQ(i, r.builder().ReadMemory(&memory[0]));
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 624982a117..bbeafc9151 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -22,11 +22,11 @@ void RunU64BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
- uint64_t initial = *i;
+ uint64_t initial = i;
FOR_UINT64_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint64_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint64_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -51,11 +51,11 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t initial = *i;
+ uint32_t initial = i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint32_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint32_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -80,11 +80,11 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t initial = *i;
+ uint16_t initial = i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint16_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint16_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -108,11 +108,11 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t initial = *i;
+ uint8_t initial = i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*j));
- uint8_t expected = expected_op(*i, *j);
+ CHECK_EQ(initial, r.Call(j));
+ uint8_t expected = expected_op(i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -136,11 +136,11 @@ WASM_EXEC_TEST(I64AtomicCompareExchange) {
WASM_GET_LOCAL(1), MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
- uint64_t initial = *i;
+ uint64_t initial = i;
FOR_UINT64_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint64_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint64_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -158,11 +158,11 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32U) {
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t initial = *i;
+ uint32_t initial = i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint32_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint32_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -180,11 +180,11 @@ WASM_EXEC_TEST(I64AtomicCompareExchange16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t initial = *i;
+ uint16_t initial = i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint16_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint16_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -200,11 +200,11 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t initial = *i;
+ uint8_t initial = i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
- CHECK_EQ(initial, r.Call(*i, *j));
- uint8_t expected = CompareExchange(initial, *i, *j);
+ CHECK_EQ(initial, r.Call(i, j));
+ uint8_t expected = CompareExchange(initial, i, j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -220,7 +220,7 @@ WASM_EXEC_TEST(I64AtomicLoad) {
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
- uint64_t expected = *i;
+ uint64_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -236,7 +236,7 @@ WASM_EXEC_TEST(I64AtomicLoad32U) {
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = *i;
+ uint32_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -252,7 +252,7 @@ WASM_EXEC_TEST(I64AtomicLoad16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t expected = *i;
+ uint16_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -267,7 +267,7 @@ WASM_EXEC_TEST(I64AtomicLoad8U) {
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t expected = *i;
+ uint8_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -287,8 +287,8 @@ WASM_EXEC_TEST(I64AtomicStoreLoad) {
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
- uint64_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
+ uint64_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -308,8 +308,8 @@ WASM_EXEC_TEST(I64AtomicStoreLoad32U) {
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
- uint32_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
+ uint32_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -329,8 +329,8 @@ WASM_EXEC_TEST(I64AtomicStoreLoad16U) {
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
- uint16_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
+ uint16_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
@@ -348,9 +348,9 @@ WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
- uint8_t expected = *i;
- CHECK_EQ(expected, r.Call(*i));
- CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
+ uint8_t expected = i;
+ CHECK_EQ(expected, r.Call(i));
+ CHECK_EQ(i, r.builder().ReadMemory(&memory[0]));
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
new file mode 100644
index 0000000000..eefaa678d1
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -0,0 +1,220 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-inl.h"
+#include "test/cctest/wasm/wasm-atomics-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_exceptions {
+
+WASM_EXEC_TEST(TryCatchThrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_THROW(except))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_EXEC_TEST(TryCatchCallDirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(
+ throw_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_EXEC_TEST(TryCatchCallIndirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+ r.builder().AddSignature(sigs.i_ii());
+ throw_func.SetSigIndex(0);
+
+ // Add an indirect function table.
+ uint16_t indirect_function_table[] = {
+ static_cast<uint16_t>(throw_func.function_index())};
+ r.builder().AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+ r.builder().PopulateIndirectFunctionTable();
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_STMTS(WASM_CALL_INDIRECT2(
+ 0, WASM_GET_LOCAL(0),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_EXEC_TEST(TryCatchCallExternal) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ HandleScope scope(CcTest::InitIsolateOnce());
+ const char* source = "(function() { throw 'ball'; })";
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ ManuallyImportedJSFunction import = {sigs.i_ii(), js_function};
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kJSFunc = 0;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_EXEC_TEST(TryCatchTrapTypeError) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ HandleScope scope(CcTest::InitIsolateOnce());
+ const char* source = "(function() { return 0; })";
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ // Make sure to use a signature incompatible with JS below.
+ ManuallyImportedJSFunction import = {sigs.i_ll(), js_function};
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kJSFunc = 0;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I64V(7),
+ WASM_I64V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+namespace {
+
+// TODO(8729): The semantics of this are not yet specified and might change,
+// this test aims at keeping semantics of various execution tiers consistent.
+void TestTryCatchTrap(byte* code, size_t code_size,
+ ExecutionTier execution_tier) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, nullptr, "main",
+ kRuntimeExceptionSupport);
+ r.builder().AddMemory(kWasmPageSize);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a trapping helper function.
+ WasmFunctionCompiler& trap_func = r.NewFunction(sigs.i_ii());
+ trap_func.Build(code, code + code_size);
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(
+ trap_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+} // namespace
+
+WASM_EXEC_TEST(TryCatchTrapUnreachable) {
+ byte code[] = {WASM_UNREACHABLE};
+ TestTryCatchTrap(code, arraysize(code), execution_tier);
+}
+
+WASM_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
+ byte code[] = {WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-1))};
+ TestTryCatchTrap(code, arraysize(code), execution_tier);
+}
+
+WASM_EXEC_TEST(TryCatchTrapDivByZero) {
+ byte code[] = {WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
+ TestTryCatchTrap(code, arraysize(code), execution_tier);
+}
+
+WASM_EXEC_TEST(TryCatchTrapRemByZero) {
+ byte code[] = {WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
+ TestTryCatchTrap(code, arraysize(code), execution_tier);
+}
+
+} // namespace test_run_wasm_exceptions
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index c5d0d84e63..1a8c15be31 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -143,6 +143,109 @@ TEST(Run_Wasm_nested_ifs_i) {
CHECK_EQ(14, r.Call(0, 0));
}
+// Repeated from test-run-wasm.cc to avoid poluting header files.
+template <typename T>
+static T factorial(T v) {
+ T expected = 1;
+ for (T i = v; i > 1; i--) {
+ expected *= i;
+ }
+ return expected;
+}
+
+// Basic test of return call in interpreter. Good old factorial.
+TEST(Run_Wasm_returnCallFactorial) {
+ EXPERIMENTAL_FLAG_SCOPE(return_call);
+ // Run in bounded amount of stack - 8kb.
+ FlagScope<int32_t> stack_size(&v8::internal::FLAG_stack_size, 8);
+
+ WasmRunner<uint32_t, int32_t> r(ExecutionTier::kInterpreter);
+
+ WasmFunctionCompiler& fact_aux_fn =
+ r.NewFunction<int32_t, int32_t, int32_t>("fact_aux");
+
+ BUILD(r, WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_GET_LOCAL(0), WASM_I32V(1)));
+
+ BUILD(fact_aux_fn,
+ WASM_IF_ELSE_I(
+ WASM_I32_EQ(WASM_I32V(1), WASM_GET_LOCAL(0)), WASM_GET_LOCAL(1),
+ WASM_RETURN_CALL_FUNCTION(
+ fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))));
+
+ // Runs out of stack space without using return call.
+ uint32_t test_values[] = {1, 2, 5, 10, 20, 20000};
+
+ for (uint32_t v : test_values) {
+ uint32_t found = r.Call(v);
+ CHECK_EQ(factorial(v), found);
+ }
+}
+
+TEST(Run_Wasm_returnCallFactorial64) {
+ EXPERIMENTAL_FLAG_SCOPE(return_call);
+
+ int32_t test_values[] = {1, 2, 5, 10, 20};
+ WasmRunner<int64_t, int32_t> r(ExecutionTier::kInterpreter);
+
+ WasmFunctionCompiler& fact_aux_fn =
+ r.NewFunction<int64_t, int32_t, int64_t>("fact_aux");
+
+ BUILD(r, WASM_RETURN_CALL_FUNCTION(fact_aux_fn.function_index(),
+ WASM_GET_LOCAL(0), WASM_I64V(1)));
+
+ BUILD(fact_aux_fn,
+ WASM_IF_ELSE_L(
+ WASM_I32_EQ(WASM_I32V(1), WASM_GET_LOCAL(0)), WASM_GET_LOCAL(1),
+ WASM_RETURN_CALL_FUNCTION(
+ fact_aux_fn.function_index(),
+ WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V(1)),
+ WASM_I64_MUL(WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1)))));
+
+ for (int32_t v : test_values) {
+ CHECK_EQ(factorial<int64_t>(v), r.Call(v));
+ }
+}
+
+TEST(Run_Wasm_returnCallIndirectFactorial) {
+ EXPERIMENTAL_FLAG_SCOPE(return_call);
+
+ TestSignatures sigs;
+
+ WasmRunner<uint32_t, uint32_t> r(ExecutionTier::kInterpreter);
+
+ WasmFunctionCompiler& fact_aux_fn = r.NewFunction(sigs.i_ii(), "fact_aux");
+ fact_aux_fn.SetSigIndex(0);
+
+ r.builder().AddSignature(sigs.i_ii());
+
+ // Function table.
+ uint16_t indirect_function_table[] = {
+ static_cast<uint16_t>(fact_aux_fn.function_index())};
+
+ r.builder().AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+ r.builder().PopulateIndirectFunctionTable();
+
+ BUILD(r, WASM_RETURN_CALL_INDIRECT(0, WASM_I32V(0), WASM_GET_LOCAL(0),
+ WASM_I32V(1)));
+
+ BUILD(fact_aux_fn,
+ WASM_IF_ELSE_I(
+ WASM_I32_EQ(WASM_I32V(1), WASM_GET_LOCAL(0)), WASM_GET_LOCAL(1),
+ WASM_RETURN_CALL_INDIRECT(
+ 0, WASM_I32V(0), WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V(1)),
+ WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))));
+
+ uint32_t test_values[] = {1, 2, 5, 10, 20};
+
+ for (uint32_t v : test_values) {
+ CHECK_EQ(factorial(v), r.Call(v));
+ }
+}
// Make tests more robust by not hard-coding offsets of various operations.
// The {Find} method finds the offsets for the given bytecodes, returning
// the offsets in an array.
@@ -192,7 +295,7 @@ TEST(Breakpoint_I32Add) {
FOR_UINT32_INPUTS(a) {
for (uint32_t b = 11; b < 3000000000u; b += 1000000000u) {
thread->Reset();
- WasmValue args[] = {WasmValue(*a), WasmValue(b)};
+ WasmValue args[] = {WasmValue(a), WasmValue(b)};
thread->InitFrame(r.function(), args);
for (int i = 0; i < kNumBreakpoints; i++) {
@@ -207,7 +310,7 @@ TEST(Breakpoint_I32Add) {
// Check the thread finished with the right value.
CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
- uint32_t expected = (*a) + (b);
+ uint32_t expected = (a) + (b);
CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
}
}
@@ -227,7 +330,7 @@ TEST(Step_I32Mul) {
FOR_UINT32_INPUTS(a) {
for (uint32_t b = 33; b < 3000000000u; b += 1000000000u) {
thread->Reset();
- WasmValue args[] = {WasmValue(*a), WasmValue(b)};
+ WasmValue args[] = {WasmValue(a), WasmValue(b)};
thread->InitFrame(r.function(), args);
// Run instructions one by one.
@@ -242,7 +345,7 @@ TEST(Step_I32Mul) {
// Check the thread finished with the right value.
CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
- uint32_t expected = (*a) * (b);
+ uint32_t expected = (a) * (b);
CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
}
}
@@ -269,7 +372,7 @@ TEST(Breakpoint_I32And_disable) {
interpreter->SetBreakpoint(r.function(), kLocalsDeclSize + offsets[0],
do_break);
thread->Reset();
- WasmValue args[] = {WasmValue(*a), WasmValue(b)};
+ WasmValue args[] = {WasmValue(a), WasmValue(b)};
thread->InitFrame(r.function(), args);
if (do_break) {
@@ -284,7 +387,7 @@ TEST(Breakpoint_I32And_disable) {
// Check the thread finished with the right value.
CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
- uint32_t expected = (*a) & (b);
+ uint32_t expected = (a) & (b);
CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
}
}
@@ -438,7 +541,7 @@ TEST(TestPossibleNondeterminism) {
TEST(WasmInterpreterActivations) {
WasmRunner<void> r(ExecutionTier::kInterpreter);
Isolate* isolate = r.main_isolate();
- BUILD(r, WASM_NOP);
+ BUILD(r, WASM_UNREACHABLE);
WasmInterpreter* interpreter = r.interpreter();
WasmInterpreter::Thread* thread = interpreter->GetThread(0);
@@ -451,17 +554,20 @@ TEST(WasmInterpreterActivations) {
thread->InitFrame(r.function(), nullptr);
CHECK_EQ(2, thread->NumActivations());
CHECK_EQ(2, thread->GetFrameCount());
- isolate->set_pending_exception(Smi::kZero);
- thread->HandleException(isolate);
+ CHECK_EQ(WasmInterpreter::TRAPPED, thread->Run());
+ thread->RaiseException(isolate, handle(Smi::kZero, isolate));
CHECK_EQ(1, thread->GetFrameCount());
CHECK_EQ(2, thread->NumActivations());
thread->FinishActivation(act1);
+ isolate->clear_pending_exception();
CHECK_EQ(1, thread->GetFrameCount());
CHECK_EQ(1, thread->NumActivations());
- thread->HandleException(isolate);
+ CHECK_EQ(WasmInterpreter::TRAPPED, thread->Run());
+ thread->RaiseException(isolate, handle(Smi::kZero, isolate));
CHECK_EQ(0, thread->GetFrameCount());
CHECK_EQ(1, thread->NumActivations());
thread->FinishActivation(act0);
+ isolate->clear_pending_exception();
CHECK_EQ(0, thread->NumActivations());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 499942464e..47ed644673 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -69,68 +69,39 @@ ManuallyImportedJSFunction CreateJSSelector(FunctionSig* sig, int which) {
return import;
}
-
-void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
- Handle<Object>* buffer, int count) {
- Isolate* isolate = jsfunc->GetIsolate();
- Handle<Object> global(isolate->context()->global_object(), isolate);
- MaybeHandle<Object> retval =
- Execution::Call(isolate, jsfunc, global, count, buffer);
-
- CHECK(!retval.is_null());
- Handle<Object> result = retval.ToHandleChecked();
- if (result->IsSmi()) {
- CHECK_EQ(expected, Smi::ToInt(*result));
- } else {
- CHECK(result->IsHeapNumber());
- CHECK_FLOAT_EQ(expected, HeapNumber::cast(*result)->value());
- }
-}
-
-void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
- double b) {
- Isolate* isolate = jsfunc->GetIsolate();
- Handle<Object> buffer[] = {isolate->factory()->NewNumber(a),
- isolate->factory()->NewNumber(b)};
- EXPECT_CALL(expected, jsfunc, buffer, 2);
-}
} // namespace
WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
WasmRunner<int, int, int> r(execution_tier);
BUILD(r, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
- EXPECT_CALL(33, jsfunc, 44, 11);
- EXPECT_CALL(-8723487, jsfunc, -8000000, 723487);
+ r.CheckCallViaJS(33, 44, 11);
+ r.CheckCallViaJS(-8723487, -8000000, 723487);
}
WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
- EXPECT_CALL(92, jsfunc, 46, 0.5);
- EXPECT_CALL(64, jsfunc, -16, -0.25);
+ r.CheckCallViaJS(92, 46, 0.5);
+ r.CheckCallViaJS(64, -16, -0.25);
}
WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
- EXPECT_CALL(3, jsfunc, 2, 1);
- EXPECT_CALL(-5.5, jsfunc, -5.25, -0.25);
+ r.CheckCallViaJS(3, 2, 1);
+ r.CheckCallViaJS(-5.5, -5.25, -0.25);
}
WASM_EXEC_TEST(Run_I32Popcount_jswrapped) {
WasmRunner<int, int> r(execution_tier);
BUILD(r, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
- EXPECT_CALL(2, jsfunc, 9, 0);
- EXPECT_CALL(3, jsfunc, 11, 0);
- EXPECT_CALL(6, jsfunc, 0x3F, 0);
+ r.CheckCallViaJS(2, 9);
+ r.CheckCallViaJS(3, 11);
+ r.CheckCallViaJS(6, 0x3F);
}
WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
@@ -143,15 +114,48 @@ WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
ManuallyImportedJSFunction import = {sigs.i_i(), js_function};
WasmRunner<int, int> r(execution_tier, &import);
uint32_t js_index = 0;
+ BUILD(r, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
- WasmFunctionCompiler& t = r.NewFunction(sigs.i_i());
- BUILD(t, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
+ r.CheckCallViaJS(101, 2);
+ r.CheckCallViaJS(199, 100);
+ r.CheckCallViaJS(-666666801, -666666900);
+}
- Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
+WASM_EXEC_TEST(Run_IndirectCallJSFunction) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ TestSignatures sigs;
- EXPECT_CALL(101, jsfunc, 2, -8);
- EXPECT_CALL(199, jsfunc, 100, -1);
- EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
+ const char* source = "(function(a, b, c) { if(c) return a; return b; })";
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+
+ ManuallyImportedJSFunction import = {sigs.i_iii(), js_function};
+
+ WasmRunner<int32_t, int32_t> r(execution_tier, &import);
+
+ const uint32_t js_index = 0;
+ const int32_t left = -2;
+ const int32_t right = 3;
+
+ WasmFunctionCompiler& rc_fn = r.NewFunction(sigs.i_i(), "rc");
+
+ r.builder().AddSignature(sigs.i_iii());
+ uint16_t indirect_function_table[] = {static_cast<uint16_t>(js_index)};
+
+ r.builder().AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+ r.builder().PopulateIndirectFunctionTable();
+
+ BUILD(rc_fn, WASM_CALL_INDIRECT3(0, WASM_I32V(js_index), WASM_I32V(left),
+ WASM_I32V(right), WASM_GET_LOCAL(0)));
+
+ Handle<Object> args_left[] = {isolate->factory()->NewNumber(1)};
+ r.CheckCallViaJS(left, rc_fn.function_index(), args_left, 1);
+
+ Handle<Object> args_right[] = {isolate->factory()->NewNumber(0)};
+ r.CheckCallViaJS(right, rc_fn.function_index(), args_right, 1);
}
void RunJSSelectTest(ExecutionTier tier, int which) {
@@ -184,9 +188,8 @@ void RunJSSelectTest(ExecutionTier tier, int which) {
t.Build(&code[0], &code[end]);
}
- Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
double expected = inputs.arg_d(which);
- EXPECT_CALL(expected, jsfunc, 0.0, 0.0);
+ r.CheckCallViaJS(expected, t.function_index(), nullptr, 0);
}
}
@@ -243,7 +246,6 @@ void RunWASMSelectTest(ExecutionTier tier, int which) {
WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
Handle<Object> args[] = {
isolate->factory()->NewNumber(inputs.arg_d(0)),
@@ -257,7 +259,7 @@ void RunWASMSelectTest(ExecutionTier tier, int which) {
};
double expected = inputs.arg_d(which);
- EXPECT_CALL(expected, jsfunc, args, kMaxParams);
+ r.CheckCallViaJS(expected, t.function_index(), args, kMaxParams);
}
}
@@ -315,7 +317,6 @@ void RunWASMSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
- Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
Handle<Object> args[] = {isolate->factory()->NewNumber(inputs.arg_d(0)),
isolate->factory()->NewNumber(inputs.arg_d(1)),
@@ -330,7 +331,7 @@ void RunWASMSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
double nan = std::numeric_limits<double>::quiet_NaN();
double expected = which < num_args ? inputs.arg_d(which) : nan;
- EXPECT_CALL(expected, jsfunc, args, num_args);
+ r.CheckCallViaJS(expected, t.function_index(), args, num_args);
}
}
@@ -430,8 +431,6 @@ void RunJSSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
WasmFunctionCompiler& t = r.NewFunction(&sig);
t.Build(&code[0], &code[end]);
- Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
-
Handle<Object> args[] = {
factory->NewNumber(inputs.arg_d(0)),
factory->NewNumber(inputs.arg_d(1)),
@@ -447,7 +446,7 @@ void RunJSSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
double nan = std::numeric_limits<double>::quiet_NaN();
double expected = which < num_args ? inputs.arg_d(which) : nan;
- EXPECT_CALL(expected, jsfunc, args, num_args);
+ r.CheckCallViaJS(expected, t.function_index(), args, num_args);
}
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 2503ec57fd..d23bdc133f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -854,8 +854,7 @@ TEST(Run_WasmModule_Reclaim_Memory) {
Handle<JSArrayBuffer> buffer;
for (int i = 0; i < 256; ++i) {
HandleScope scope(isolate);
- CHECK(NewArrayBuffer(isolate, kWasmPageSize, SharedFlag::kNotShared)
- .ToHandle(&buffer));
+ CHECK(NewArrayBuffer(isolate, kWasmPageSize).ToHandle(&buffer));
}
}
#endif
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index ed8bdf7281..fa27e983af 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -158,7 +158,8 @@ int UnsignedGreaterEqual(T a, T b) {
template <typename T>
T LogicalShiftLeft(T a, int shift) {
- return a << shift;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<UnsignedT>(a) << shift;
}
template <typename T>
@@ -407,8 +408,8 @@ WASM_SIMD_TEST(F32x4Splat) {
WASM_SIMD_CHECK_SPLAT_F32x4(simd, lane_val), WASM_RETURN1(WASM_ONE));
FOR_FLOAT32_INPUTS(i) {
- if (SkipFPExpectedValue(*i)) continue;
- CHECK_EQ(1, r.Call(*i));
+ if (SkipFPExpectedValue(i)) continue;
+ CHECK_EQ(1, r.Call(i));
}
}
@@ -485,8 +486,8 @@ WASM_SIMD_COMPILED_TEST(F32x4ConvertI32x4) {
WASM_RETURN1(WASM_ONE));
FOR_INT32_INPUTS(i) {
- CHECK_EQ(1, r.Call(*i, static_cast<float>(*i),
- static_cast<float>(static_cast<uint32_t>(*i))));
+ CHECK_EQ(1, r.Call(i, static_cast<float>(i),
+ static_cast<float>(static_cast<uint32_t>(i))));
}
}
@@ -504,11 +505,11 @@ void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_RETURN1(WASM_ONE));
FOR_FLOAT32_INPUTS(i) {
- if (SkipFPValue(*i)) continue;
- float expected = expected_op(*i);
+ if (SkipFPValue(i)) continue;
+ float expected = expected_op(i);
if (SkipFPExpectedValue(expected)) continue;
float abs_error = std::abs(expected) * error;
- CHECK_EQ(1, r.Call(*i, expected - abs_error, expected + abs_error));
+ CHECK_EQ(1, r.Call(i, expected - abs_error, expected + abs_error));
}
}
@@ -546,12 +547,12 @@ void RunF32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT_F32x4(simd1, expected), WASM_RETURN1(WASM_ONE));
FOR_FLOAT32_INPUTS(i) {
- if (SkipFPValue(*i)) continue;
+ if (SkipFPValue(i)) continue;
FOR_FLOAT32_INPUTS(j) {
- if (SkipFPValue(*j)) continue;
- float expected = expected_op(*i, *j);
+ if (SkipFPValue(j)) continue;
+ float expected = expected_op(i, j);
if (SkipFPExpectedValue(expected)) continue;
- CHECK_EQ(1, r.Call(*i, *j, expected));
+ CHECK_EQ(1, r.Call(i, j, expected));
}
}
}
@@ -587,12 +588,12 @@ void RunF32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
FOR_FLOAT32_INPUTS(i) {
- if (SkipFPValue(*i)) continue;
+ if (SkipFPValue(i)) continue;
FOR_FLOAT32_INPUTS(j) {
- if (SkipFPValue(*j)) continue;
- float diff = *i - *j;
+ if (SkipFPValue(j)) continue;
+ float diff = i - j;
if (SkipFPExpectedValue(diff)) continue;
- CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j)));
+ CHECK_EQ(1, r.Call(i, j, expected_op(i, j)));
}
}
}
@@ -639,7 +640,7 @@ WASM_SIMD_TEST(I32x4Splat) {
WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(lane_val))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, lane_val), WASM_ONE);
- FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(i)); }
}
WASM_SIMD_TEST(I32x4ReplaceLane) {
@@ -676,7 +677,7 @@ WASM_SIMD_TEST(I16x8Splat) {
WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(lane_val))),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, lane_val), WASM_ONE);
- FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
+ FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(i)); }
}
WASM_SIMD_TEST(I16x8ReplaceLane) {
@@ -736,7 +737,7 @@ WASM_SIMD_TEST(I8x16Splat) {
WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(lane_val))),
WASM_SIMD_CHECK_SPLAT8(I8x16, simd, I32, lane_val), WASM_ONE);
- FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
+ FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(i)); }
}
WASM_SIMD_TEST(I8x16ReplaceLane) {
@@ -874,10 +875,10 @@ WASM_SIMD_TEST(I32x4ConvertF32x4) {
WASM_SIMD_CHECK_SPLAT4(I32x4, simd2, I32, expected_unsigned), WASM_ONE);
FOR_FLOAT32_INPUTS(i) {
- if (SkipFPValue(*i)) continue;
- int32_t signed_value = ConvertToInt(*i, false);
- int32_t unsigned_value = ConvertToInt(*i, true);
- CHECK_EQ(1, r.Call(*i, signed_value, unsigned_value));
+ if (SkipFPValue(i)) continue;
+ int32_t signed_value = ConvertToInt(i, false);
+ int32_t unsigned_value = ConvertToInt(i, true);
+ CHECK_EQ(1, r.Call(i, signed_value, unsigned_value));
}
}
@@ -915,10 +916,9 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
WASM_ONE);
FOR_INT16_INPUTS(i) {
- int32_t unpacked_signed = static_cast<int32_t>(Widen<int16_t>(*i));
- int32_t unpacked_unsigned =
- static_cast<int32_t>(UnsignedWiden<int16_t>(*i));
- CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
+ int32_t unpacked_signed = static_cast<int32_t>(Widen<int16_t>(i));
+ int32_t unpacked_unsigned = static_cast<int32_t>(UnsignedWiden<int16_t>(i));
+ CHECK_EQ(1, r.Call(i, unpacked_signed, unpacked_unsigned, 0));
}
}
@@ -932,7 +932,7 @@ void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
- FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i))); }
}
WASM_SIMD_TEST(I32x4Neg) {
@@ -959,7 +959,7 @@ void RunI32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1023,7 +1023,7 @@ void RunI32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT32_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1084,7 +1084,7 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
- FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i, shift))); }
}
}
@@ -1140,9 +1140,9 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
WASM_ONE);
FOR_INT8_INPUTS(i) {
- int32_t unpacked_signed = static_cast<int32_t>(Widen<int8_t>(*i));
- int32_t unpacked_unsigned = static_cast<int32_t>(UnsignedWiden<int8_t>(*i));
- CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
+ int32_t unpacked_signed = static_cast<int32_t>(Widen<int8_t>(i));
+ int32_t unpacked_unsigned = static_cast<int32_t>(UnsignedWiden<int8_t>(i));
+ CHECK_EQ(1, r.Call(i, unpacked_signed, unpacked_unsigned, 0));
}
}
@@ -1156,7 +1156,7 @@ void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
- FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
+ FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i))); }
}
WASM_SIMD_TEST(I16x8Neg) {
@@ -1196,15 +1196,15 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
// packed signed values
- int32_t ps_a = Narrow<int16_t>(*i);
- int32_t ps_b = Narrow<int16_t>(*j);
+ int32_t ps_a = Narrow<int16_t>(i);
+ int32_t ps_b = Narrow<int16_t>(j);
// packed unsigned values
- int32_t pu_a = UnsignedNarrow<int16_t>(*i);
- int32_t pu_b = UnsignedNarrow<int16_t>(*j);
+ int32_t pu_a = UnsignedNarrow<int16_t>(i);
+ int32_t pu_b = UnsignedNarrow<int16_t>(j);
// Sign-extend here, since ExtractLane sign extends.
if (pu_a & 0x8000) pu_a |= 0xFFFF0000;
if (pu_b & 0x8000) pu_b |= 0xFFFF0000;
- CHECK_EQ(1, r.Call(*i, *j, ps_a, ps_b, pu_a, pu_b));
+ CHECK_EQ(1, r.Call(i, j, ps_a, ps_b, pu_a, pu_b));
}
}
}
@@ -1224,7 +1224,7 @@ void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, expected), WASM_ONE);
FOR_INT16_INPUTS(i) {
- FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1296,7 +1296,7 @@ void RunI16x8CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, expected), WASM_ONE);
FOR_INT16_INPUTS(i) {
- FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT16_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1357,7 +1357,7 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
- FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
+ FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i, shift))); }
}
}
@@ -1386,7 +1386,7 @@ void RunI8x16UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
- FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
+ FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i))); }
}
WASM_SIMD_TEST(I8x16Neg) {
@@ -1428,15 +1428,15 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
FOR_INT16_INPUTS(i) {
FOR_INT16_INPUTS(j) {
// packed signed values
- int32_t ps_a = Narrow<int8_t>(*i);
- int32_t ps_b = Narrow<int8_t>(*j);
+ int32_t ps_a = Narrow<int8_t>(i);
+ int32_t ps_b = Narrow<int8_t>(j);
// packed unsigned values
- int32_t pu_a = UnsignedNarrow<int8_t>(*i);
- int32_t pu_b = UnsignedNarrow<int8_t>(*j);
+ int32_t pu_a = UnsignedNarrow<int8_t>(i);
+ int32_t pu_b = UnsignedNarrow<int8_t>(j);
// Sign-extend here, since ExtractLane sign extends.
if (pu_a & 0x80) pu_a |= 0xFFFFFF00;
if (pu_b & 0x80) pu_b |= 0xFFFFFF00;
- CHECK_EQ(1, r.Call(*i, *j, ps_a, ps_b, pu_a, pu_b));
+ CHECK_EQ(1, r.Call(i, j, ps_a, ps_b, pu_a, pu_b));
}
}
}
@@ -1456,7 +1456,7 @@ void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT16(I8x16, simd1, I32, expected), WASM_ONE);
FOR_INT8_INPUTS(i) {
- FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1523,7 +1523,7 @@ void RunI8x16CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WASM_SIMD_CHECK_SPLAT16(I8x16, simd1, I32, expected), WASM_ONE);
FOR_INT8_INPUTS(i) {
- FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(*i, *j, expected_op(*i, *j))); }
+ FOR_INT8_INPUTS(j) { CHECK_EQ(1, r.Call(i, j, expected_op(i, j))); }
}
}
@@ -1589,7 +1589,7 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
simd, WASM_SIMD_SHIFT_OP(simd_op, shift, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
- FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
+ FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(i, expected_op(i, shift))); }
}
}
@@ -2334,7 +2334,7 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i;
+ int32_t expected = i;
r.builder().WriteMemory(&memory[1], expected);
CHECK_EQ(expected, r.Call());
}
@@ -2356,9 +2356,9 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
DCHECK_EQ(1, r.Call(5)); \
DCHECK_EQ(0, r.Call(0)); \
}
-WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff);
-WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff);
-WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff);
+WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff)
+WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff)
+WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff)
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max) \
WASM_SIMD_TEST_TURBOFAN(S##format##AllTrue) { \
@@ -2372,9 +2372,9 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff);
DCHECK_EQ(0, r.Call(21)); \
DCHECK_EQ(0, r.Call(0)); \
}
-WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff);
-WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff);
-WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff);
+WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff)
+WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff)
+WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff)
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST_TURBOFAN(BitSelect) {
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 7e6ba47448..ef481bc929 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -38,7 +38,7 @@ WASM_EXEC_TEST(Int32Const) {
WASM_EXEC_TEST(Int32Const_many) {
FOR_INT32_INPUTS(i) {
WasmRunner<int32_t> r(execution_tier);
- const int32_t kExpectedValue = *i;
+ const int32_t kExpectedValue = i;
// return(kExpectedValue)
BUILD(r, WASM_I32V(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
@@ -57,21 +57,21 @@ WASM_EXEC_TEST(Int32Param0) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// return(local[0])
BUILD(r, WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Int32Param0_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// local[0]
BUILD(r, WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Int32Param1) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// local[1]
BUILD(r, WASM_GET_LOCAL(1));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(-111, *i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(-111, i)); }
}
WASM_EXEC_TEST(Int32Add) {
@@ -85,14 +85,14 @@ WASM_EXEC_TEST(Int32Add_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(*i, 13), r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(i, 13), r.Call(i)); }
}
WASM_EXEC_TEST(Int32Add_P_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(*i, 13), r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(i, 13), r.Call(i)); }
}
static void RunInt32AddTest(ExecutionTier execution_tier, const byte* code,
@@ -104,9 +104,9 @@ static void RunInt32AddTest(ExecutionTier execution_tier, const byte* code,
r.Build(code, code + size);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
- static_cast<uint32_t>(*j));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(i) +
+ static_cast<uint32_t>(j));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -168,8 +168,8 @@ static void TestInt32Binop(ExecutionTier execution_tier, WasmOpcode opcode,
FOR_INT32_INPUTS(j) {
WasmRunner<ctype> r(execution_tier);
// Apply {opcode} on two constants.
- BUILD(r, WASM_BINOP(opcode, WASM_I32V(*i), WASM_I32V(*j)));
- CHECK_EQ(expected(*i, *j), r.Call());
+ BUILD(r, WASM_BINOP(opcode, WASM_I32V(i), WASM_I32V(j)));
+ CHECK_EQ(expected(i, j), r.Call());
}
}
{
@@ -178,7 +178,7 @@ static void TestInt32Binop(ExecutionTier execution_tier, WasmOpcode opcode,
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- CHECK_EQ(expected(*i, *j), r.Call(*i, *j));
+ CHECK_EQ(expected(i, j), r.Call(i, j));
}
}
}
@@ -204,7 +204,7 @@ WASM_I32_BINOP_TEST(RemU, uint32_t, b == 0 ? 0xDEADBEEF : a % b)
WASM_I32_BINOP_TEST(And, int32_t, a& b)
WASM_I32_BINOP_TEST(Ior, int32_t, a | b)
WASM_I32_BINOP_TEST(Xor, int32_t, a ^ b)
-WASM_I32_BINOP_TEST(Shl, int32_t, a << (b & 0x1F))
+WASM_I32_BINOP_TEST(Shl, int32_t, base::ShlWithWraparound(a, b))
WASM_I32_BINOP_TEST(ShrU, uint32_t, a >> (b & 0x1F))
WASM_I32_BINOP_TEST(ShrS, int32_t, a >> (b & 0x1F))
WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F)))
@@ -392,11 +392,11 @@ WASM_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
BUILD(r, WASM_I32_ASMJS_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
FOR_INT32_INPUTS(i) {
if (denom == 0) {
- CHECK_EQ(0, r.Call(*i));
- } else if (denom == -1 && *i == std::numeric_limits<int32_t>::min()) {
- CHECK_EQ(std::numeric_limits<int32_t>::min(), r.Call(*i));
+ CHECK_EQ(0, r.Call(i));
+ } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(std::numeric_limits<int32_t>::min(), r.Call(i));
} else {
- CHECK_EQ(*i / denom, r.Call(*i));
+ CHECK_EQ(i / denom, r.Call(i));
}
}
}
@@ -409,11 +409,11 @@ WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
BUILD(r, WASM_I32_ASMJS_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
FOR_INT32_INPUTS(i) {
if (denom == 0) {
- CHECK_EQ(0, r.Call(*i));
- } else if (denom == -1 && *i == std::numeric_limits<int32_t>::min()) {
- CHECK_EQ(0, r.Call(*i));
+ CHECK_EQ(0, r.Call(i));
+ } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(0, r.Call(i));
} else {
- CHECK_EQ(*i % denom, r.Call(*i));
+ CHECK_EQ(i % denom, r.Call(i));
}
}
}
@@ -605,8 +605,7 @@ WASM_EXEC_TEST(Float32Neg) {
BUILD(r, WASM_F32_NEG(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- CHECK_EQ(0x80000000,
- bit_cast<uint32_t>(*i) ^ bit_cast<uint32_t>(r.Call(*i)));
+ CHECK_EQ(0x80000000, bit_cast<uint32_t>(i) ^ bit_cast<uint32_t>(r.Call(i)));
}
}
@@ -616,7 +615,7 @@ WASM_EXEC_TEST(Float64Neg) {
FOR_FLOAT64_INPUTS(i) {
CHECK_EQ(0x8000000000000000,
- bit_cast<uint64_t>(*i) ^ bit_cast<uint64_t>(r.Call(*i)));
+ bit_cast<uint64_t>(i) ^ bit_cast<uint64_t>(r.Call(i)));
}
}
@@ -627,36 +626,36 @@ WASM_EXEC_TEST(IfElse_P) {
WASM_I32V_1(11), // --
WASM_I32V_1(22))); // --
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 11 : 22;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(i));
}
}
WASM_EXEC_TEST(If_empty1) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprEnd, WASM_GET_LOCAL(1));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 9, *i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 9, i)); }
}
WASM_EXEC_TEST(IfElse_empty1) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, kExprEnd,
WASM_GET_LOCAL(1));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 8, *i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 8, i)); }
}
WASM_EXEC_TEST(IfElse_empty2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, WASM_NOP, kExprElse,
kExprEnd, WASM_GET_LOCAL(1));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 7, *i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 7, i)); }
}
WASM_EXEC_TEST(IfElse_empty3) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, WASM_NOP,
kExprEnd, WASM_GET_LOCAL(1));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 6, *i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i - 6, i)); }
}
WASM_EXEC_TEST(If_chain1) {
@@ -664,7 +663,7 @@ WASM_EXEC_TEST(If_chain1) {
// if (p0) 13; if (p0) 14; 15
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP),
WASM_IF(WASM_GET_LOCAL(0), WASM_NOP), WASM_I32V_1(15));
- FOR_INT32_INPUTS(i) { CHECK_EQ(15, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(15, r.Call(i)); }
}
WASM_EXEC_TEST(If_chain_set) {
@@ -674,8 +673,8 @@ WASM_EXEC_TEST(If_chain_set) {
WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(1, WASM_I32V_2(74))),
WASM_GET_LOCAL(1));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 74 : *i;
- CHECK_EQ(expected, r.Call(*i, *i));
+ int32_t expected = i ? 74 : i;
+ CHECK_EQ(expected, r.Call(i, i));
}
}
@@ -716,7 +715,7 @@ WASM_EXEC_TEST(Return_I32) {
BUILD(r, RET(WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Return_F32) {
@@ -725,7 +724,7 @@ WASM_EXEC_TEST(Return_F32) {
BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- float expect = *i;
+ float expect = i;
float result = r.Call(expect);
if (std::isnan(expect)) {
CHECK(std::isnan(result));
@@ -741,7 +740,7 @@ WASM_EXEC_TEST(Return_F64) {
BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- double expect = *i;
+ double expect = i;
double result = r.Call(expect);
if (std::isnan(expect)) {
CHECK(std::isnan(result));
@@ -764,8 +763,8 @@ WASM_EXEC_TEST(Select) {
// return select(11, 22, a);
BUILD(r, WASM_SELECT(WASM_I32V_1(11), WASM_I32V_1(22), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 11 : 22;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -776,7 +775,7 @@ WASM_EXEC_TEST(Select_strict1) {
WASM_TEE_LOCAL(0, WASM_I32V_1(1)),
WASM_TEE_LOCAL(0, WASM_I32V_1(2))),
WASM_DROP, WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(i)); }
}
WASM_EXEC_TEST(Select_strict2) {
@@ -787,8 +786,8 @@ WASM_EXEC_TEST(Select_strict2) {
BUILD(r, WASM_SELECT(WASM_TEE_LOCAL(1, WASM_I32V_1(5)),
WASM_TEE_LOCAL(2, WASM_I32V_1(6)), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 5 : 6;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 5 : 6;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -802,7 +801,7 @@ WASM_EXEC_TEST(Select_strict3) {
WASM_TEE_LOCAL(0, WASM_GET_LOCAL(1))));
FOR_INT32_INPUTS(i) {
int32_t expected = 5;
- CHECK_EQ(expected, r.Call(*i));
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -811,7 +810,7 @@ WASM_EXEC_TEST(BrIf_strict) {
BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_GET_LOCAL(0),
WASM_TEE_LOCAL(0, WASM_I32V_2(99)))));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Br_height) {
@@ -839,7 +838,7 @@ WASM_EXEC_TEST(BrTable0a) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))),
WASM_I32V_2(91));
- FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable0b) {
@@ -847,7 +846,7 @@ WASM_EXEC_TEST(BrTable0b) {
BUILD(r,
B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0)))),
WASM_I32V_2(92));
- FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable0c) {
@@ -858,15 +857,15 @@ WASM_EXEC_TEST(BrTable0c) {
RET_I8(76))),
WASM_I32V_2(77));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i == 0 ? 76 : 77;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i == 0 ? 76 : 77;
+ CHECK_EQ(expected, r.Call(i));
}
}
WASM_EXEC_TEST(BrTable1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), RET_I8(93));
- FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(i)); }
}
WASM_EXEC_TEST(BrTable_loop) {
@@ -1021,7 +1020,7 @@ WASM_EXEC_TEST(F32ReinterpretI32) {
WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i;
+ int32_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -1037,7 +1036,7 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
WASM_I32V_2(107));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i;
+ int32_t expected = i;
CHECK_EQ(107, r.Call(expected));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
@@ -1079,7 +1078,7 @@ WASM_EXEC_TEST(LoadStoreLoad) {
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i;
+ int32_t expected = i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -1170,61 +1169,61 @@ WASM_EXEC_TEST(VoidReturn2) {
WASM_EXEC_TEST(BrEmpty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BRV(0, WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(BrIfEmpty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, kExprBlock, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_br1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_BR(0)), WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_brif1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_ZERO)), WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_empty_brif2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i, i + 1)); }
}
WASM_EXEC_TEST(Block_i) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_f) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_BLOCK_F(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_d) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_BLOCK_D(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Block_br2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, static_cast<uint32_t>(r.Call(*i))); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, static_cast<uint32_t>(r.Call(i))); }
}
WASM_EXEC_TEST(Block_If_P) {
@@ -1235,51 +1234,51 @@ WASM_EXEC_TEST(Block_If_P) {
WASM_BRV(1, WASM_I32V_1(51))), // --
WASM_I32V_1(52))); // --
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 51 : 52;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 51 : 52;
+ CHECK_EQ(expected, r.Call(i));
}
}
WASM_EXEC_TEST(Loop_empty) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, kExprLoop, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_i) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_LOOP_I(WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_f) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_LOOP_F(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_d) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_LOOP_D(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_br1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_LOOP(WASM_BR(1))), WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_brif1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_LOOP(WASM_BR_IF(1, WASM_ZERO))), WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
WASM_EXEC_TEST(Loop_empty_brif2) {
WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_LOOP_I(WASM_BRV_IF(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(i, r.Call(i, i + 1)); }
}
WASM_EXEC_TEST(Loop_empty_brif3) {
@@ -1288,8 +1287,8 @@ WASM_EXEC_TEST(Loop_empty_brif3) {
WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- CHECK_EQ(*i, r.Call(0, *i, *j));
- CHECK_EQ(*j, r.Call(1, *i, *j));
+ CHECK_EQ(i, r.Call(0, i, j));
+ CHECK_EQ(j, r.Call(1, i, j));
}
}
}
@@ -1299,8 +1298,8 @@ WASM_EXEC_TEST(Block_BrIf_P) {
BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(51), WASM_GET_LOCAL(0)),
WASM_I32V_1(52)));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 51 : 52;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 51 : 52;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -1313,8 +1312,8 @@ WASM_EXEC_TEST(Block_IfElse_P_assign) {
WASM_SET_LOCAL(0, WASM_I32V_2(72))), // --
WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 71 : 72;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 71 : 72;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -1327,8 +1326,8 @@ WASM_EXEC_TEST(Block_IfElse_P_return) {
RET_I8(82)), // --
WASM_ZERO); // --
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 81 : 82;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 81 : 82;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -1338,8 +1337,8 @@ WASM_EXEC_TEST(Block_If_P_assign) {
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I32V_1(61))),
WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 61 : *i;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 61 : i;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -1357,8 +1356,8 @@ WASM_EXEC_TEST(ExprIf_P) {
WASM_I32V_1(11), // --
WASM_I32V_1(22))); // --
FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 11 : 22;
- CHECK_EQ(expected, r.Call(*i));
+ int32_t expected = i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(i));
}
}
@@ -2039,7 +2038,7 @@ WASM_EXEC_TEST(Int32LoadInt16_signext) {
BUILD(r, WASM_LOAD_MEM(MachineType::Int16(), WASM_GET_LOCAL(0)));
for (int i = 0; i < kNumBytes; i += 2) {
- int32_t expected = memory[i] | (static_cast<int8_t>(memory[i + 1]) << 8);
+ int32_t expected = static_cast<int16_t>(memory[i] | (memory[i + 1] << 8));
CHECK_EQ(expected, r.Call(i));
}
}
@@ -2275,9 +2274,9 @@ WASM_EXEC_TEST(Call_Int32Add) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
- static_cast<uint32_t>(*j));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(i) +
+ static_cast<uint32_t>(j));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -2294,7 +2293,7 @@ WASM_EXEC_TEST(Call_Float32Sub) {
WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, r.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(i - j, r.Call(i, j)); }
}
}
@@ -2312,9 +2311,9 @@ WASM_EXEC_TEST(Call_Float64Sub) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- r.builder().WriteMemory(&memory[0], *i);
- r.builder().WriteMemory(&memory[1], *j);
- double expected = *i - *j;
+ r.builder().WriteMemory(&memory[0], i);
+ r.builder().WriteMemory(&memory[1], j);
+ double expected = i - j;
CHECK_EQ(107, r.Call());
if (expected != expected) {
@@ -2445,9 +2444,9 @@ WASM_EXEC_TEST(MultiReturnSub) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*j) -
- static_cast<uint32_t>(*i));
- CHECK_EQ(expected, r.Call(*i, *j));
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(j) -
+ static_cast<uint32_t>(i));
+ CHECK_EQ(expected, r.Call(i, j));
}
}
}
@@ -2779,56 +2778,56 @@ WASM_EXEC_TEST(F32Floor) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32Ceil) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32Trunc) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32NearestInt) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyintf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyintf(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64Floor) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64Ceil) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64Trunc) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(i), r.Call(i)); }
}
WASM_EXEC_TEST(F64NearestInt) {
WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(i), r.Call(i)); }
}
WASM_EXEC_TEST(F32Min) {
@@ -2836,7 +2835,7 @@ WASM_EXEC_TEST(F32Min) {
BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(i, j), r.Call(i, j)); }
}
}
@@ -2852,7 +2851,7 @@ WASM_EXEC_TEST(F64Min) {
BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(i, j), r.Call(i, j)); }
}
}
@@ -2868,7 +2867,7 @@ WASM_EXEC_TEST(F32Max) {
BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(JSMax(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(JSMax(i, j), r.Call(i, j)); }
}
}
@@ -2885,8 +2884,8 @@ WASM_EXEC_TEST(F64Max) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- double result = r.Call(*i, *j);
- CHECK_DOUBLE_EQ(JSMax(*i, *j), result);
+ double result = r.Call(i, j);
+ CHECK_DOUBLE_EQ(JSMax(i, j), result);
}
}
}
@@ -2903,10 +2902,10 @@ WASM_EXEC_TEST(I32SConvertF32) {
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (is_inbounds<int32_t>(*i)) {
- CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
+ if (is_inbounds<int32_t>(i)) {
+ CHECK_EQ(static_cast<int32_t>(i), r.Call(i));
} else {
- CHECK_TRAP32(r.Call(*i));
+ CHECK_TRAP32(r.Call(i));
}
}
}
@@ -2918,12 +2917,12 @@ WASM_EXEC_TEST(I32SConvertSatF32) {
FOR_FLOAT32_INPUTS(i) {
int32_t expected =
- is_inbounds<int32_t>(*i)
- ? static_cast<int32_t>(*i)
- : std::isnan(*i) ? 0
- : *i < 0.0 ? std::numeric_limits<int32_t>::min()
- : std::numeric_limits<int32_t>::max();
- int32_t found = r.Call(*i);
+ is_inbounds<int32_t>(i)
+ ? static_cast<int32_t>(i)
+ : std::isnan(i) ? 0
+ : i < 0.0 ? std::numeric_limits<int32_t>::min()
+ : std::numeric_limits<int32_t>::max();
+ int32_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -2933,10 +2932,10 @@ WASM_EXEC_TEST(I32SConvertF64) {
BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (is_inbounds<int32_t>(*i)) {
- CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
+ if (is_inbounds<int32_t>(i)) {
+ CHECK_EQ(static_cast<int32_t>(i), r.Call(i));
} else {
- CHECK_TRAP32(r.Call(*i));
+ CHECK_TRAP32(r.Call(i));
}
}
}
@@ -2947,12 +2946,12 @@ WASM_EXEC_TEST(I32SConvertSatF64) {
BUILD(r, WASM_I32_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
- is_inbounds<int32_t>(*i)
- ? static_cast<int32_t>(*i)
- : std::isnan(*i) ? 0
- : *i < 0.0 ? std::numeric_limits<int32_t>::min()
- : std::numeric_limits<int32_t>::max();
- int32_t found = r.Call(*i);
+ is_inbounds<int32_t>(i)
+ ? static_cast<int32_t>(i)
+ : std::isnan(i) ? 0
+ : i < 0.0 ? std::numeric_limits<int32_t>::min()
+ : std::numeric_limits<int32_t>::max();
+ int32_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -2961,10 +2960,10 @@ WASM_EXEC_TEST(I32UConvertF32) {
WasmRunner<uint32_t, float> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (is_inbounds<uint32_t>(*i)) {
- CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
+ if (is_inbounds<uint32_t>(i)) {
+ CHECK_EQ(static_cast<uint32_t>(i), r.Call(i));
} else {
- CHECK_TRAP32(r.Call(*i));
+ CHECK_TRAP32(r.Call(i));
}
}
}
@@ -2975,12 +2974,12 @@ WASM_EXEC_TEST(I32UConvertSatF32) {
BUILD(r, WASM_I32_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
int32_t expected =
- is_inbounds<uint32_t>(*i)
- ? static_cast<uint32_t>(*i)
- : std::isnan(*i) ? 0
- : *i < 0.0 ? std::numeric_limits<uint32_t>::min()
- : std::numeric_limits<uint32_t>::max();
- int32_t found = r.Call(*i);
+ is_inbounds<uint32_t>(i)
+ ? static_cast<uint32_t>(i)
+ : std::isnan(i) ? 0
+ : i < 0.0 ? std::numeric_limits<uint32_t>::min()
+ : std::numeric_limits<uint32_t>::max();
+ int32_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -2989,10 +2988,10 @@ WASM_EXEC_TEST(I32UConvertF64) {
WasmRunner<uint32_t, double> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (is_inbounds<uint32_t>(*i)) {
- CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
+ if (is_inbounds<uint32_t>(i)) {
+ CHECK_EQ(static_cast<uint32_t>(i), r.Call(i));
} else {
- CHECK_TRAP32(r.Call(*i));
+ CHECK_TRAP32(r.Call(i));
}
}
}
@@ -3003,12 +3002,12 @@ WASM_EXEC_TEST(I32UConvertSatF64) {
BUILD(r, WASM_I32_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
- is_inbounds<uint32_t>(*i)
- ? static_cast<uint32_t>(*i)
- : std::isnan(*i) ? 0
- : *i < 0.0 ? std::numeric_limits<uint32_t>::min()
- : std::numeric_limits<uint32_t>::max();
- int32_t found = r.Call(*i);
+ is_inbounds<uint32_t>(i)
+ ? static_cast<uint32_t>(i)
+ : std::isnan(i) ? 0
+ : i < 0.0 ? std::numeric_limits<uint32_t>::min()
+ : std::numeric_limits<uint32_t>::max();
+ int32_t found = r.Call(i);
CHECK_EQ(expected, found);
}
}
@@ -3018,7 +3017,7 @@ WASM_EXEC_TEST(F64CopySign) {
BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(copysign(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(copysign(i, j), r.Call(i, j)); }
}
}
@@ -3027,7 +3026,7 @@ WASM_EXEC_TEST(F32CopySign) {
BUILD(r, WASM_F32_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(copysignf(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(copysignf(i, j), r.Call(i, j)); }
}
}
@@ -3307,9 +3306,11 @@ WASM_EXEC_TEST(I32MulOnDifferentRegisters) {
}
WASM_EXEC_TEST(I32ShlOnDifferentRegisters) {
- BinOpOnDifferentRegisters<int32_t>(
- execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Shl,
- [](int32_t lhs, int32_t rhs, bool* trap) { return lhs << (rhs & 31); });
+ BinOpOnDifferentRegisters<int32_t>(execution_tier, kWasmI32,
+ ArrayVector(kSome32BitInputs), kExprI32Shl,
+ [](int32_t lhs, int32_t rhs, bool* trap) {
+ return base::ShlWithWraparound(lhs, rhs);
+ });
}
WASM_EXEC_TEST(I32ShrSOnDifferentRegisters) {
@@ -3383,9 +3384,11 @@ WASM_EXEC_TEST(I64MulOnDifferentRegisters) {
}
WASM_EXEC_TEST(I64ShlOnDifferentRegisters) {
- BinOpOnDifferentRegisters<int64_t>(
- execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Shl,
- [](int64_t lhs, int64_t rhs, bool* trap) { return lhs << (rhs & 63); });
+ BinOpOnDifferentRegisters<int64_t>(execution_tier, kWasmI64,
+ ArrayVector(kSome64BitInputs), kExprI64Shl,
+ [](int64_t lhs, int64_t rhs, bool* trap) {
+ return base::ShlWithWraparound(lhs, rhs);
+ });
}
WASM_EXEC_TEST(I64ShrSOnDifferentRegisters) {
@@ -3470,10 +3473,9 @@ TEST(Liftoff_tier_up) {
memcpy(buffer.get(), sub_code->instructions().start(), sub_size);
desc.buffer = buffer.get();
desc.instr_size = static_cast<int>(sub_size);
- WasmCode* code = native_module->AddCode(
- add.function_index(), desc, 0, 0, 0, {}, OwnedVector<byte>(),
- WasmCode::kFunction, WasmCode::kOther);
- native_module->PublishCode(code);
+ native_module->AddCode(add.function_index(), desc, 0, 0, {},
+ OwnedVector<byte>(), WasmCode::kFunction,
+ WasmCode::kOther);
// Second run should now execute {sub}.
CHECK_EQ(4, r.Call(11, 7));
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 43ba7dfea1..3ef62d869f 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -59,14 +59,14 @@ class MockPlatform final : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
- UNREACHABLE();
- };
+ tasks_.push(std::move(task));
+ }
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
UNREACHABLE();
}
- bool IdleTasksEnabled() override { return false; };
+ bool IdleTasksEnabled() override { return false; }
void ExecuteTasks() {
while (!tasks_.empty()) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
index ba189a57ca..68366dc2df 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -18,11 +18,10 @@ namespace wasm {
namespace test_wasm_import_wrapper_cache {
std::unique_ptr<NativeModule> NewModule(Isolate* isolate) {
- WasmCodeManager* manager = isolate->wasm_engine()->code_manager();
std::shared_ptr<WasmModule> module(new WasmModule);
bool can_request_more = false;
size_t size = 16384;
- auto native_module = manager->NewNativeModule(
+ auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, kAllWasmFeatures, size, can_request_more, std::move(module));
native_module->SetRuntimeStubs(isolate);
return native_module;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 1349ce2d17..211a79978e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -48,7 +48,7 @@ class ArgPassingHelper {
runner.Build(outer_code.data(), outer_code.data() + outer_code.size());
int funcs_to_redict[] = {static_cast<int>(inner_compiler.function_index())};
- runner.builder().Link();
+ runner.builder().SetExecutable();
WasmDebugInfo::RedirectToInterpreter(debug_info_,
ArrayVector(funcs_to_redict));
main_fun_wrapper_ = runner.builder().WrapCode(runner.function_index());
@@ -105,7 +105,7 @@ TEST(TestArgumentPassing_int32) {
return base::AddWithWraparound(base::MulWithWraparound(2, a), 1);
});
- FOR_INT32_INPUTS(v) { helper.CheckCall(*v); }
+ FOR_INT32_INPUTS(v) { helper.CheckCall(v); }
}
// Pass int64_t, return double.
@@ -124,17 +124,17 @@ TEST(TestArgumentPassing_double_int64) {
WASM_CALL_FUNCTION0(f2.function_index())},
[](int32_t a, int32_t b) {
int64_t a64 = static_cast<int64_t>(a) & 0xFFFFFFFF;
- int64_t b64 = static_cast<int64_t>(b) << 32;
+ int64_t b64 = static_cast<uint64_t>(static_cast<int64_t>(b)) << 32;
return static_cast<double>(a64 | b64);
});
FOR_INT32_INPUTS(v1) {
- FOR_INT32_INPUTS(v2) { helper.CheckCall(*v1, *v2); }
+ FOR_INT32_INPUTS(v2) { helper.CheckCall(v1, v2); }
}
FOR_INT64_INPUTS(v) {
- int32_t v1 = static_cast<int32_t>(*v);
- int32_t v2 = static_cast<int32_t>(*v >> 32);
+ int32_t v1 = static_cast<int32_t>(v);
+ int32_t v2 = static_cast<int32_t>(v >> 32);
helper.CheckCall(v1, v2);
helper.CheckCall(v2, v1);
}
@@ -175,7 +175,7 @@ TEST(TestArgumentPassing_float_double) {
WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
[](float f) { return 2. * static_cast<double>(f) + 1.; });
- FOR_FLOAT32_INPUTS(f) { helper.CheckCall(*f); }
+ FOR_FLOAT32_INPUTS(f) { helper.CheckCall(f); }
}
// Pass two doubles, return double.
@@ -192,7 +192,7 @@ TEST(TestArgumentPassing_double_double) {
[](double a, double b) { return a + b; });
FOR_FLOAT64_INPUTS(d1) {
- FOR_FLOAT64_INPUTS(d2) { helper.CheckCall(*d1, *d2); }
+ FOR_FLOAT64_INPUTS(d2) { helper.CheckCall(d1, d2); }
}
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index f95760569f..ad57b458c6 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -6,6 +6,7 @@
#include "src/assembler-inl.h"
#include "src/code-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/wasm-import-wrapper-cache-inl.h"
#include "src/wasm/wasm-memory.h"
@@ -57,7 +58,7 @@ TestingModuleBuilder::TestingModuleBuilder(
}
}
-byte* TestingModuleBuilder::AddMemory(uint32_t size) {
+byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
CHECK(!test_module_->has_memory);
CHECK_NULL(mem_start_);
CHECK_EQ(0, mem_size_);
@@ -65,9 +66,16 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
DCHECK_IMPLIES(test_module_->origin == kWasmOrigin,
size % kWasmPageSize == 0);
test_module_->has_memory = true;
+ uint32_t max_size =
+ (test_module_->maximum_pages != 0) ? test_module_->maximum_pages : size;
uint32_t alloc_size = RoundUp(size, kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
- CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
+ if (shared == SharedFlag::kShared) {
+ CHECK(NewSharedArrayBuffer(isolate_, alloc_size, max_size)
+ .ToHandle(&new_buffer));
+ } else {
+ CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
+ }
CHECK(!new_buffer.is_null());
mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
mem_size_ = size;
@@ -75,9 +83,8 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
memset(mem_start_, 0, size);
// Create the WasmMemoryObject.
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_, new_buffer,
- (test_module_->maximum_pages != 0) ? test_module_->maximum_pages : -1);
+ Handle<WasmMemoryObject> memory_object =
+ WasmMemoryObject::New(isolate_, new_buffer, max_size);
instance_object_->set_memory_object(*memory_object);
WasmMemoryObject::AddInstance(isolate_, memory_object, instance_object_);
// TODO(wasm): Delete the following two lines when test-run-wasm will use a
@@ -119,8 +126,7 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name,
}
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
- // Wrap the code so it can be called as a JS function.
- Link();
+ SetExecutable();
FunctionSig* sig = test_module_->functions[index].sig;
MaybeHandle<Code> maybe_ret_code =
compiler::CompileJSToWasmWrapper(isolate_, sig, false);
@@ -140,6 +146,14 @@ Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
new_arr->set(old_arr->length(), *ret_code);
module_object->set_export_wrappers(*new_arr);
+ if (interpreter_) {
+ // Patch the jump table to call the interpreter for this function. This is
+ // only needed for functions with a wrapper. Other functions never get
+ // called through the jump table.
+ wasm::WasmCode* wasm_new_code = compiler::CompileWasmInterpreterEntry(
+ isolate_->wasm_engine(), native_module_, index, sig);
+ native_module_->PublishInterpreterEntry(wasm_new_code, index);
+ }
return ret;
}
@@ -181,12 +195,26 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
uint32_t bytes_offset = old_size ? old_size : 1;
size_t new_size = bytes_offset + bytes.size();
OwnedVector<uint8_t> new_bytes = OwnedVector<uint8_t>::New(new_size);
- memcpy(new_bytes.start(), old_bytes.start(), old_size);
+ if (old_size > 0) {
+ memcpy(new_bytes.start(), old_bytes.start(), old_size);
+ }
memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
native_module_->SetWireBytes(std::move(new_bytes));
return bytes_offset;
}
+uint32_t TestingModuleBuilder::AddException(FunctionSig* sig) {
+ DCHECK_EQ(0, sig->return_count());
+ uint32_t index = static_cast<uint32_t>(test_module_->exceptions.size());
+ test_module_->exceptions.push_back(WasmException{sig});
+ Handle<WasmExceptionTag> tag = WasmExceptionTag::New(isolate_, index);
+ Handle<FixedArray> table(instance_object_->exceptions_table(), isolate_);
+ table = isolate_->factory()->CopyFixedArrayAndGrow(table, 1);
+ instance_object_->set_exceptions_table(*table);
+ table->set(index, *tag);
+ return index;
+}
+
CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
return {
test_module_ptr_,
@@ -219,6 +247,7 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
native_module_->ReserveCodeTableForTesting(kMaxFunctions);
auto instance = WasmInstanceObject::New(isolate_, module_object);
+ instance->set_exceptions_table(*isolate_->factory()->empty_fixed_array());
instance->set_globals_start(globals_data_);
return instance;
}
@@ -393,15 +422,9 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
static_cast<uint32_t>(len)};
if (interpreter_) {
- // Add the code to the interpreter.
+ // Add the code to the interpreter; do not generate compiled code.
interpreter_->SetFunctionCodeForTesting(function_, start, end);
- }
-
- // TODO(wasm): tests that go through JS depend on having a compiled version
- // of each function, even if the execution tier is the interpreter. Fix.
- auto tier = builder_->execution_tier();
- if (tier == ExecutionTier::kInterpreter) {
- tier = ExecutionTier::kOptimized;
+ return;
}
Vector<const uint8_t> wire_bytes = builder_->instance_object()
@@ -419,7 +442,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
NativeModule* native_module =
builder_->instance_object()->module_object()->native_module();
WasmCompilationUnit unit(isolate()->wasm_engine(), function_->func_index,
- tier);
+ builder_->execution_tier());
WasmFeatures unused_detected_features;
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index af575fff77..d52d7bac76 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -39,6 +39,7 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/call-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
#include "test/common/wasm/flag-utils.h"
namespace v8 {
@@ -87,7 +88,7 @@ class TestingModuleBuilder {
void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsOrigin; }
- byte* AddMemory(uint32_t size);
+ byte* AddMemory(uint32_t size, SharedFlag shared = SharedFlag::kNotShared);
size_t CodeTableLength() const { return native_module_->num_functions(); }
@@ -175,6 +176,7 @@ class TestingModuleBuilder {
enum FunctionType { kImport, kWasm };
uint32_t AddFunction(FunctionSig* sig, const char* name, FunctionType type);
+ // Wrap the code so it can be called as a JS function.
Handle<JSFunction> WrapCode(uint32_t index);
void AddIndirectFunctionTable(const uint16_t* function_indexes,
@@ -184,6 +186,8 @@ class TestingModuleBuilder {
uint32_t AddBytes(Vector<const byte> bytes);
+ uint32_t AddException(FunctionSig* sig);
+
WasmFunction* GetFunctionAt(int index) {
return &test_module_->functions[index];
}
@@ -201,11 +205,8 @@ class TestingModuleBuilder {
Address globals_start() const {
return reinterpret_cast<Address>(globals_data_);
}
- void Link() {
- if (linked_) return;
- linked_ = true;
- native_module_->SetExecutable(true);
- }
+
+ void SetExecutable() { native_module_->SetExecutable(true); }
CompilationEnv CreateCompilationEnv();
@@ -228,7 +229,6 @@ class TestingModuleBuilder {
ExecutionTier execution_tier_;
Handle<WasmInstanceObject> instance_object_;
NativeModule* native_module_ = nullptr;
- bool linked_ = false;
RuntimeExceptionSupport runtime_exception_support_;
LowerSimd lower_simd_;
@@ -450,7 +450,7 @@ class WasmRunner : public WasmRunnerBase {
wrapper_.SetInnerCode(builder_.GetFunctionCode(0));
wrapper_.SetInstance(builder_.instance_object());
- builder_.Link();
+ builder_.SetExecutable();
Handle<Code> wrapper_code = wrapper_.GetWrapperCode();
compiler::CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
wrapper_code, wrapper_.signature());
@@ -474,7 +474,9 @@ class WasmRunner : public WasmRunnerBase {
thread->Reset();
std::array<WasmValue, sizeof...(p)> args{{WasmValue(p)...}};
thread->InitFrame(function(), args.data());
- if (thread->Run() == WasmInterpreter::FINISHED) {
+ thread->Run();
+ CHECK_GT(thread->NumInterpretedCalls(), 0);
+ if (thread->state() == WasmInterpreter::FINISHED) {
WasmValue val = thread->GetReturnValue();
possible_nondeterminism_ |= thread->PossibleNondeterminism();
return val.to<ReturnType>();
@@ -488,7 +490,45 @@ class WasmRunner : public WasmRunnerBase {
}
}
+ void CheckCallViaJS(double expected, uint32_t function_index,
+ Handle<Object>* buffer, int count) {
+ Isolate* isolate = builder_.isolate();
+ if (jsfuncs_.size() <= function_index) {
+ jsfuncs_.resize(function_index + 1);
+ }
+ if (jsfuncs_[function_index].is_null()) {
+ jsfuncs_[function_index] = builder_.WrapCode(function_index);
+ }
+ Handle<JSFunction> jsfunc = jsfuncs_[function_index];
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, jsfunc, global, count, buffer);
+
+ CHECK(!retval.is_null());
+ Handle<Object> result = retval.ToHandleChecked();
+ if (result->IsSmi()) {
+ CHECK_EQ(expected, Smi::ToInt(*result));
+ } else {
+ CHECK(result->IsHeapNumber());
+ CHECK_DOUBLE_EQ(expected, HeapNumber::cast(*result)->value());
+ }
+
+ if (builder_.interpret()) {
+ CHECK_GT(builder_.interpreter()->GetThread(0)->NumInterpretedCalls(), 0);
+ }
+ }
+
+ void CheckCallViaJS(double expected, ParamTypes... p) {
+ Isolate* isolate = builder_.isolate();
+ uint32_t function_index = function()->func_index;
+ Handle<Object> buffer[] = {isolate->factory()->NewNumber(p)...};
+ CheckCallViaJS(expected, function_index, buffer, sizeof...(p));
+ }
+
Handle<Code> GetWrapperCode() { return wrapper_.GetWrapperCode(); }
+
+ private:
+ std::vector<Handle<JSFunction>> jsfuncs_;
};
// A macro to define tests that run in different engine configurations.
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index 5861acd71e..4b3499b149 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -6,6 +6,7 @@
#define V8_TEST_COMMON_ASSEMBLER_TESTER_H_
#include "src/assembler.h"
+#include "src/code-desc.h"
namespace v8 {
namespace internal {
@@ -45,7 +46,7 @@ class TestingAssemblerBuffer : public AssemblerBuffer {
// some older ARM kernels there is a bug which causes an access error on
// cache flush instructions to trigger access error on non-writable memory.
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- Assembler::FlushICache(buffer_, size_);
+ FlushInstructionCache(buffer_, size_);
bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
v8::PageAllocator::kReadExecute);
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/common/types-fuzz.h
index b6b5bf2dc5..f539ed9701 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/common/types-fuzz.h
@@ -45,13 +45,13 @@ class Types {
name = Type::name(); \
types.push_back(name);
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
- #undef DECLARE_TYPE
+#undef DECLARE_TYPE
SignedSmall = Type::SignedSmall();
UnsignedSmall = Type::UnsignedSmall();
- object_map = isolate->factory()->NewMap(
- JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ object_map =
+ isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
smi = handle(Smi::FromInt(666), isolate);
boxed_smi = isolate->factory()->NewHeapNumber(666);
@@ -161,9 +161,9 @@ class Types {
Type Fuzz(int depth = 4) {
switch (rng_->NextInt(depth == 0 ? 3 : 20)) {
case 0: { // bitset
- #define COUNT_BITSET_TYPES(type, value) + 1
+#define COUNT_BITSET_TYPES(type, value) +1
int n = 0 PROPER_BITSET_TYPE_LIST(COUNT_BITSET_TYPES);
- #undef COUNT_BITSET_TYPES
+#undef COUNT_BITSET_TYPES
// Pick a bunch of named bitsets and return their intersection.
Type result = Type::Any();
for (int i = 0, m = 1 + rng_->NextInt(3); i < m; ++i) {
@@ -179,7 +179,7 @@ class Types {
} \
}
PROPER_BITSET_TYPE_LIST(PICK_BITSET_TYPE)
- #undef PICK_BITSET_TYPE
+#undef PICK_BITSET_TYPE
}
return result;
}
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 17045ac325..8b0aab79a9 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -135,6 +135,10 @@
#define WASM_IF_ELSE_X(index, cond, tstmt, fstmt) \
cond, kExprIf, static_cast<byte>(index), tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_TRY_CATCH_T(t, trystmt, catchstmt) \
+ kExprTry, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), trystmt, \
+ kExprCatch, catchstmt, kExprEnd
+
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
#define WASM_RETURN0 kExprReturn
@@ -154,10 +158,12 @@
#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
+#define WASM_THROW(index) kExprThrow, static_cast<byte>(index)
+
//------------------------------------------------------------------------------
// Misc expressions.
//------------------------------------------------------------------------------
-#define WASM_ID(...) __VA_ARGS__
+#define WASM_STMTS(...) __VA_ARGS__
#define WASM_ZERO kExprI32Const, 0
#define WASM_ONE kExprI32Const, 1
@@ -348,6 +354,10 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
#define WASM_SET_GLOBAL(index, val) \
val, kExprSetGlobal, static_cast<byte>(index)
+#define WASM_GET_TABLE(table_index, index) \
+ index, kExprGetTable, static_cast<byte>(table_index)
+#define WASM_SET_TABLE(table_index, index, val) \
+ index, val, kExprSetTable, static_cast<byte>(table_index)
#define WASM_LOAD_MEM(type, index) \
index, \
static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, false)), \
@@ -377,6 +387,11 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_CALL_FUNCTION(index, ...) \
__VA_ARGS__, kExprCallFunction, static_cast<byte>(index)
+#define WASM_RETURN_CALL_FUNCTION0(index) \
+ kExprReturnCall, static_cast<byte>(index)
+#define WASM_RETURN_CALL_FUNCTION(index, ...) \
+ __VA_ARGS__, kExprReturnCall, static_cast<byte>(index)
+
#define TABLE_ZERO 0
// TODO(titzer): change usages of these macros to put func last.
@@ -395,6 +410,12 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
__VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
+#define WASM_RETURN_CALL_INDIRECT0(index, func) \
+ func, kExprReturnCallIndirect, static_cast<byte>(index), TABLE_ZERO
+#define WASM_RETURN_CALL_INDIRECT(index, func, ...) \
+ __VA_ARGS__, func, kExprReturnCallIndirect, static_cast<byte>(index), \
+ TABLE_ZERO
+
#define WASM_NOT(x) x, kExprI32Eqz
#define WASM_SEQ(...) __VA_ARGS__
@@ -585,17 +606,17 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define MEMORY_ZERO 0
#define WASM_MEMORY_INIT(seg, dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprMemoryInit), MEMORY_ZERO, U32V_1(seg)
-#define WASM_MEMORY_DROP(seg) WASM_NUMERIC_OP(kExprMemoryDrop), U32V_1(seg)
+ dst, src, size, WASM_NUMERIC_OP(kExprMemoryInit), U32V_1(seg), MEMORY_ZERO
+#define WASM_DATA_DROP(seg) WASM_NUMERIC_OP(kExprDataDrop), U32V_1(seg)
#define WASM_MEMORY_COPY(dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprMemoryCopy), MEMORY_ZERO
+ dst, src, size, WASM_NUMERIC_OP(kExprMemoryCopy), MEMORY_ZERO, MEMORY_ZERO
#define WASM_MEMORY_FILL(dst, val, size) \
dst, val, size, WASM_NUMERIC_OP(kExprMemoryFill), MEMORY_ZERO
#define WASM_TABLE_INIT(seg, dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprTableInit), TABLE_ZERO, U32V_1(seg)
-#define WASM_TABLE_DROP(seg) WASM_NUMERIC_OP(kExprTableDrop), U32V_1(seg)
+ dst, src, size, WASM_NUMERIC_OP(kExprTableInit), U32V_1(seg), TABLE_ZERO
+#define WASM_ELEM_DROP(seg) WASM_NUMERIC_OP(kExprElemDrop), U32V_1(seg)
#define WASM_TABLE_COPY(dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), TABLE_ZERO
+ dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), TABLE_ZERO, TABLE_ZERO
//------------------------------------------------------------------------------
// Memory Operations.
@@ -649,6 +670,8 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_ATOMICS_STORE_OP(op, x, y, representation) \
x, y, WASM_ATOMICS_OP(op), \
static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+#define WASM_ATOMICS_WAIT(op, index, value, timeout, offset) \
+ index, value, timeout, WASM_ATOMICS_OP(op), ZERO_ALIGNMENT, offset
//------------------------------------------------------------------------------
// Sign Externsion Operations.
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 15e71b017d..45428cbc75 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -72,7 +72,10 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
size_t param_count = signature->parameter_count();
std::unique_ptr<WasmValue[]> arguments(new WasmValue[param_count]);
- memcpy(arguments.get(), args, std::min(param_count, argc));
+ size_t arg_count = std::min(param_count, argc);
+ if (arg_count > 0) {
+ memcpy(arguments.get(), args, arg_count);
+ }
// Fill the parameters up with default values.
for (size_t i = argc; i < param_count; ++i) {
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-dead-function-fails.js b/deps/v8/test/debugger/debug/debug-evaluate-dead-function-fails.js
new file mode 100644
index 0000000000..ffa2916511
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-evaluate-dead-function-fails.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-always-opt --no-stress-opt
+
+Debug = debug.Debug
+
+var exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ // Evaluating the live function should succeed.
+ assertEquals(exec_state.frame(0).evaluate("live()").value(), 1);
+ // Evaluating the dead function should fail.
+ assertThrows(()=>exec_state.frame(0).evaluate("dead()"), ReferenceError);
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+(function() {
+ "use strict";
+ function live() { return 1; }
+ function dead() { return 2; }
+ // Use 'foo' to make it non-dead.
+ live;
+ debugger;
+})();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js b/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js
index 656399b0ae..deb0d42a33 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js
@@ -33,6 +33,8 @@ Debug.setListener(listener);
a *= 2;
e *= 2;
}
+ // Make sure bar is 'used' so that it is visible to the debugger.
+ bar;
debugger;
assertEquals(5, a);
assertEquals(7, e);
diff --git a/deps/v8/test/debugger/debug/debug-optimize.js b/deps/v8/test/debugger/debug/debug-optimize.js
index f296816aa2..7ee65e29f6 100644
--- a/deps/v8/test/debugger/debug/debug-optimize.js
+++ b/deps/v8/test/debugger/debug/debug-optimize.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --opt --no-always-opt
+// Flags: --opt --no-always-opt --turbo-inlining
var Debug = debug.Debug;
diff --git a/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
index 8a5eaea8d2..1ada64d64e 100644
--- a/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
@@ -26,84 +26,84 @@ function listener(event, exec_state, event_data, data) {
Debug.setListener(listener);
-var id = x => x; // B11 B12 B42 B43
+var id = x => x; // B9 B10 B36 B37
function test() {
debugger; // B0
function fx1([
- a, // B3
- b // B4
- ]) { // B2
- assertEquals([1, 2], [a, b]); // B5
- } // B6
+ a, // B2
+ b // B3
+ ]) {
+ assertEquals([1, 2], [a, b]); // B4
+ } // B5
fx1([1, 2, 3]); // B1
function f2([
- a, // B9
- b = id(3) // B10
- ]) { // B8
- assertEquals([4, 3], [a, b]); // B13
- } // B14
- f2([4]); // B7
+ a, // B7
+ b = id(3) // B8
+ ]) {
+ assertEquals([4, 3], [a, b]); // B11
+ } // B12
+ f2([4]); // B6
function f3({
- x: a, // B17
- y: b // B18
- }) { // B16
- assertEquals([5, 6], [a, b]); // B19
- } // B20
- f3({y: 6, x: 5}); // B15
+ x: a, // B14
+ y: b // B15
+ }) {
+ assertEquals([5, 6], [a, b]); // B16
+ } // B17
+ f3({y: 6, x: 5}); // B13
function f4([
- a, // B23
+ a, // B19
{
- b, // B24
- c, // B25
+ b, // B20
+ c, // B21
}
- ]) { // B22
- assertEquals([2, 4, 6], [a, b, c]); // B26
- } // B27
- f4([2, {c: 6, b: 4}]); // B21
+ ]) { // B19
+ assertEquals([2, 4, 6], [a, b, c]); // B22
+ } // B23
+ f4([2, {c: 6, b: 4}]); // B18
function f5([
{
- a, // B30
- b = 7 // B31
+ a, // B25
+ b = 7 // B26
},
- c = 3 // B32
- ] = [{a:1}]) { // B29
- assertEquals([1, 7, 3], [a, b, c]); // B33
- } // B34
- f5(); // B28
+ c = 3 // B27
+ ] = [{a:1}]) {
+ assertEquals([1, 7, 3], [a, b, c]); // B28
+ } // B29
+ f5(); // B24
- var name = "x"; // B35
+ var name = "x"; // B30
function f6({
- [id(name)]: a, // B40 B41
- b = a // B44
- }) { // B39
- assertEquals([9, 9], [a, b]); // B45
- } // B46
- var o6 = {}; // B36
- o6[name] = 9; // B37
- f6(o6); // B38
+ [id(name)]: a, // B34 B35
+ b = a // B38
+ }) {
+ assertEquals([9, 9], [a, b]); // B39
+ } // B40
+ var o6 = {}; // B31
+ o6[name] = 9; // B32
+ f6(o6); // B33
try {
- throw [3, 4]; // B47
+ throw [3, 4]; // B41
} catch ([
- a, // B49
- b, // B50
- c = 6 // B51
- ]) { // B48
- assertEquals([3, 4, 6], [a, b, c]); // B52
+ a, // B42
+ b, // B43
+ c = 6 // B44
+ ]) {
+ assertEquals([3, 4, 6], [a, b, c]); // B45
}
var {
- x: a, // B54
- y: b = 9 // B55
- } = { x: 4 }; // B53
- assertEquals([4, 9], [a, b]); // B56
-} // B57
+ x: a, // B47
+ y: b = 9 // B48
+ } = { x: 4 }; // B46
+ assertEquals([4, 9], [a, b]); // B49
+} // B50
test();
-Debug.setListener(null); // B58
+Debug.setListener(null); // B51
assertNull(exception);
diff --git a/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js b/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
index cfc2f77e17..9f8eda5a68 100644
--- a/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
@@ -41,5 +41,5 @@ Debug.setListener(null); // c
assertNull(exception);
assertEquals("default", result);
-assertEquals(["a0","b13","f31b13","f18b13","d2f18b13","d19f18b13","g14b13","c0"],
+assertEquals(["a0","b13","f18b13","d2f18b13","d19f18b13","g14b13","c0"],
log);
diff --git a/deps/v8/test/debugger/debug/regress-3225.js b/deps/v8/test/debugger/debug/regress-3225.js
index 454ff6e7a4..4ba6777207 100644
--- a/deps/v8/test/debugger/debug/regress-3225.js
+++ b/deps/v8/test/debugger/debug/regress-3225.js
@@ -32,6 +32,8 @@ Debug.setListener(listener);
function* generator(a, b) {
function set_a_to_5() { a = 5 }
+ // Make sure set_a_to_5 is 'used' so that it is visible to the debugger.
+ set_a_to_5;
var b = 3; // Shadows a parameter.
debugger;
yield a;
diff --git a/deps/v8/test/debugger/debug/regress/regress-1170187.js b/deps/v8/test/debugger/debug/regress/regress-1170187.js
index 832b9b753e..e41cc7ebad 100644
--- a/deps/v8/test/debugger/debug/regress/regress-1170187.js
+++ b/deps/v8/test/debugger/debug/regress/regress-1170187.js
@@ -73,7 +73,13 @@ Debug.setListener(listener);
// Call a function with local variables passing a different number parameters
// that the number of arguments.
-(function(x,y){var a,b,c; debugger; return 3})()
+(function(x,y){
+ var a,b,c;
+ // Make sure a, b, and c are used.
+ a,b,c;
+ debugger;
+ return 3
+})()
// Make sure that the debug event listener vas invoked (again).
assertTrue(listenerCalled);
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js b/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js
index 391b095a2b..17ffce1cc5 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js
@@ -37,11 +37,16 @@ function f(e, x) {
// and 'e' binds to the exception.
function write_0(v) { e = v }
function write_1(v) { x = v }
+ // Make sure write_0 and write_1 are 'used' so that they are visible to the
+ // debugger.
+ write_0, write_1;
debugger;
assertEquals("foo", e); // overwritten by the debugger
}
assertEquals("argument", e); // debugger did not overwrite
function write_2(v) { e = v }
+ // Make sure write_2 is 'used' so that it is visible to the debugger.
+ write_2;
debugger;
assertEquals("bar", e);
assertEquals("modified", x);
diff --git a/deps/v8/test/debugger/debug/wasm/frame-inspection.js b/deps/v8/test/debugger/debug/wasm/frame-inspection.js
index b91a466a10..882b7e38b0 100644
--- a/deps/v8/test/debugger/debug/wasm/frame-inspection.js
+++ b/deps/v8/test/debugger/debug/wasm/frame-inspection.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
Debug = debug.Debug
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 4f86b05769..668928137b 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -124,10 +124,25 @@
}], # 'arch == s390 or arch == s390x'
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'debug/wasm/*': [SKIP],
+ 'regress/regress-crbug-840288': [SKIP],
'wasm-*': [SKIP],
-}], # lite_mode
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless', {
+ # https://crbug.com/v8/7777
+ 'debug/debug-compile-optimized': [SKIP],
+ 'debug/debug-optimize': [SKIP],
+ 'debug/lazy-deopt-then-flush-bytecode': [SKIP],
+ 'debug/regress/regress-crbug-633999': [SKIP],
+}], # variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/debugger/regress/regress-crbug-840288.js b/deps/v8/test/debugger/regress/regress-crbug-840288.js
index 013fec9f05..ff08fd90d0 100644
--- a/deps/v8/test/debugger/regress/regress-crbug-840288.js
+++ b/deps/v8/test/debugger/regress/regress-crbug-840288.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
Debug = debug.Debug
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index 61893e9bbd..eebb578fb7 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -11,22 +11,16 @@ from testrunner.objects import testcase
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
+
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_files(self):
+ return {"test-api.js"}
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- dirs.sort()
- files.sort()
- for filename in files:
- if (filename.endswith(".js") and filename != "test-api.js"):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index 30bf257088..5a79a0171f 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -5,11 +5,17 @@
[
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'multi_return/*': [SKIP],
'wasm_async/*': [SKIP],
+ 'wasm_code/*': [SKIP],
'wasm_compile/*': [SKIP],
-}], # lite_mode
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 30b0020425..5db11f2168 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -142,7 +142,7 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(i::Isolate* isolate,
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
- return isolate->wasm_engine()->code_manager()->NewNativeModule(
+ return isolate->wasm_engine()->NewNativeModule(
isolate, i::wasm::kAllWasmFeatures, code_size, false, std::move(module));
}
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 95085885d5..f30ee07499 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -7,26 +7,35 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
+SUB_TESTS = [
+ 'json',
+ 'parser',
+ 'regexp_builtins',
+ 'regexp',
+ 'multi_return',
+ 'wasm',
+ 'wasm_async',
+ 'wasm_code',
+ 'wasm_compile',
+]
class VariantsGenerator(testsuite.VariantsGenerator):
def _get_variants(self, test):
return self._standard_variant
+class TestLoader(testsuite.GenericTestLoader):
+ @property
+ def test_dirs(self):
+ return SUB_TESTS
+
+ def _to_relpath(self, abspath, _):
+ return os.path.relpath(abspath, self.suite.root)
+
+
class TestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp_builtins', 'regexp', 'multi_return', 'wasm',
- 'wasm_async', 'wasm_code', 'wasm_compile')
-
- def ListTests(self):
- tests = []
- for subtest in TestSuite.SUB_TESTS:
- for fname in os.listdir(os.path.join(self.root, subtest)):
- if not os.path.isfile(os.path.join(self.root, subtest, fname)):
- continue
- test = self._create_test('%s/%s' % (subtest, fname))
- tests.append(test)
- tests.sort()
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
@@ -37,7 +46,7 @@ class TestSuite(testsuite.TestSuite):
class TestCase(testcase.TestCase):
def _get_files_params(self):
- suite, name = self.path.split('/')
+ suite, name = self.path.split(os.path.sep)
return [os.path.join(self.suite.root, suite, name)]
def _get_variant_flags(self):
@@ -50,7 +59,7 @@ class TestCase(testcase.TestCase):
return []
def get_shell(self):
- group, _ = self.path.split('/', 1)
+ group, _ = self.path.split(os.path.sep, 1)
return 'v8_simple_%s_fuzzer' % group
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 48c94be426..034ef731d8 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -139,7 +139,6 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"can be\n"
"// found in the LICENSE file.\n"
"\n"
- "load('test/mjsunit/wasm/wasm-constants.js');\n"
"load('test/mjsunit/wasm/wasm-module-builder.js');\n"
"\n"
"(function() {\n"
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index f83c7d044d..f0ca9fd693 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -24,6 +24,7 @@ v8_executable("inspector-test") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
+ "../../src/inspector:inspector_test_headers",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt
new file mode 100644
index 0000000000..ad1d9db168
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt
@@ -0,0 +1,4 @@
+Test that console profiles contain wasm function names.
+Compiling wasm.
+Running fib with increasing input until it shows up in the profile.
+Found fib in profile.
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
new file mode 100644
index 0000000000..dc96406d4a
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test that console profiles contain wasm function names.');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Add fibonacci function.
+var builder = new WasmModuleBuilder();
+builder.addFunction('fib', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprI32Const, 2,
+ kExprI32LeS, // i < 2 ?
+ kExprBrIf, 0, // --> return i
+ kExprI32Const, 1, kExprI32Sub, // i - 1
+ kExprCallFunction, 0, // fib(i - 1)
+ kExprGetLocal, 0, kExprI32Const, 2, kExprI32Sub, // i - 2
+ kExprCallFunction, 0, // fib(i - 2)
+ kExprI32Add
+ ])
+ .exportFunc();
+let module_bytes = builder.toArray();
+
+function compile(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; i++) {
+ view[i] = bytes[i] | 0;
+ }
+ let module = new WebAssembly.Module(buffer);
+ let instance = new WebAssembly.Instance(module);
+ return instance;
+}
+
+function checkError(message)
+{
+ if (message.error) {
+ InspectorTest.log("Error: ");
+ InspectorTest.logMessage(message);
+ InspectorTest.completeTest();
+ }
+}
+
+(async function test() {
+ Protocol.Profiler.enable();
+ checkError(await Protocol.Profiler.start());
+ let found_fib_in_profile = false;
+ let finished_profiles = 0;
+ Protocol.Profiler.onConsoleProfileFinished(e => {
+ ++finished_profiles;
+ if (e.params.profile.nodes.some(n => n.callFrame.functionName === 'fib'))
+ found_fib_in_profile = true;
+ });
+ InspectorTest.log('Compiling wasm.');
+ checkError(await Protocol.Runtime.evaluate({
+ expression: 'const instance = (' + compile + ')(' +
+ JSON.stringify(module_bytes) + ');'
+ }));
+ InspectorTest.log(
+ 'Running fib with increasing input until it shows up in the profile.');
+ for (let i = 1; !found_fib_in_profile; ++i) {
+ checkError(await Protocol.Runtime.evaluate(
+ {expression: 'console.profile(\'profile\');'}));
+ checkError(await Protocol.Runtime.evaluate(
+ {expression: 'instance.exports.fib(' + i + ');'}));
+ checkError(await Protocol.Runtime.evaluate(
+ {expression: 'console.profileEnd(\'profile\');'}));
+ if (finished_profiles != i) {
+ InspectorTest.log(
+ 'Missing consoleProfileFinished message (expected ' + i + ', got ' +
+ finished_profiles + ')');
+ }
+ }
+ InspectorTest.log('Found fib in profile.');
+ InspectorTest.completeTest();
+})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
index 863220ca6a..78f2adb412 100644
--- a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module-expected.txt
@@ -2,7 +2,7 @@ Tests evaluateOnCallFrame in module.
Running test: testTotal
foo1 (module1:7:2)
-foo2 (module2:6:9)
+foo2 (module2:7:9)
(anonymous) (module3:4:0)
local:foo1
[
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
index e656646cda..3547e9f75d 100644
--- a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-in-module.js
@@ -14,11 +14,14 @@ export function foo1() {
let g1 = 2;
debugger;
return a1 + b1 + c1 + g1;
-}`;
+};
+export default 42;
+`;
var module2 = `
import { foo1 } from 'module1';
let a2 = 20;
+export * as mod1 from 'module1';
export let b2 = 21;
export function foo2() {
let c2 = 22;
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt
new file mode 100644
index 0000000000..dfca60e0c2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt
@@ -0,0 +1,354 @@
+Tests that exercise various result types from Debugger.evaluateOnCallFrame
+
+Running test: testCreateFunction
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : SUCCESS
+ }
+ }
+}
+
+Running test: testNumericValue
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : -578.28
+ type : number
+ value : -578.28
+ }
+ }
+}
+
+Running test: testUnserializableValues
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : NaN
+ type : number
+ unserializableValue : NaN
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : Infinity
+ type : number
+ unserializableValue : Infinity
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : -Infinity
+ type : number
+ unserializableValue : -Infinity
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : -0
+ type : number
+ unserializableValue : -0
+ }
+ }
+}
+
+Running test: testBooleanValue
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : boolean
+ value : true
+ }
+ }
+}
+
+Running test: testObject
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+
+Running test: testConsoleLog
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 42
+ type : number
+ value : 42
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+
+Running test: testSymbol
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : Symbol(foo)
+ objectId : <objectId>
+ type : symbol
+ }
+ }
+}
+
+Running test: testSymbolReturnByValueError
+{
+ error : {
+ code : -32000
+ message : Object couldn't be returned by value
+ }
+ id : <messageId>
+}
+
+Running test: testPromiseResolveReturnByVal
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ }
+ }
+ }
+}
+
+Running test: testPromiseResolve
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ subtype : promise
+ type : object
+ }
+ }
+}
+
+Running test: testReleaseObject
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+
+Running test: testReleaseObjectInvalid
+ReleaseObject with invalid params.
+{
+ error : {
+ code : -32602
+ data : objectId: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+}
+
+Running test: testObjectGroups
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+
+Running test: testReleaseObjectGroupInvalid
+ReleaseObjectGroup with invalid params
+{
+ error : {
+ code : -32602
+ data : objectGroup: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+}
+
+Running test: testEvaluateSyntaxError
+{
+ className : SyntaxError
+ description : SyntaxError: Unexpected token ] at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+
+Running test: testEvaluateReferenceError
+{
+ className : ReferenceError
+ description : ReferenceError: totalRandomNotRealVariable789 is not defined at eval (eval at <anonymous> (:1:1), <anonymous>:1:1) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+}
+
+Running test: testCallFrameIdTypeError
+{
+ error : {
+ code : -32602
+ data : callFrameId: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+}
+
+Running test: testCallFrameIdInvalidInput
+Testing evaluateOnCallFrame with non-existent callFrameId
+{
+ error : {
+ code : -32000
+ message : Invalid call frame id
+ }
+ id : <messageId>
+}
+
+Running test: testNullExpression
+{
+ error : {
+ code : -32602
+ data : expression: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values.js b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values.js
new file mode 100644
index 0000000000..e0cc5344b7
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values.js
@@ -0,0 +1,131 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {Protocol} = InspectorTest.start(
+ `Tests that exercise various result types from Debugger.evaluateOnCallFrame`);
+
+(async function test(){
+ await Protocol.Debugger.enable();
+ await Protocol.Runtime.enable();
+ Protocol.Runtime.evaluate({expression: "debugger;"});
+ const {params:{callFrames:[{callFrameId}]}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.runAsyncTestSuite([
+ async function testCreateFunction() {
+ await evalAndLog('function testFunc() {return "SUCCESS";}; testFunc();', callFrameId, /*returnByValue*/ true );
+ },
+ async function testNumericValue() {
+ await evalAndLog('-578.28', callFrameId);
+ },
+ async function testUnserializableValues() {
+ const unserializableExpressions = ['NaN', 'Infinity', '-Infinity', '-0'];
+ for (const expression of unserializableExpressions)
+ await evalAndLog(expression, callFrameId);
+ },
+ async function testBooleanValue() {
+ await evalAndLog('Infinity > 0', callFrameId);
+ },
+ async function testObject() {
+ await evalAndLog('({ })', callFrameId);
+ },
+ async function testConsoleLog() {
+ Protocol.Debugger.evaluateOnCallFrame({ expression: `console.log(42)`, callFrameId });
+ const result = await Protocol.Runtime.onceConsoleAPICalled();
+ InspectorTest.logMessage(result);
+ },
+ async function testSymbol() {
+ await evalAndLog(`const symbolTest = Symbol('foo'); symbolTest;`, callFrameId);
+ },
+ async function testSymbolReturnByValueError() {
+ await evalAndLog(`const symbolTest = Symbol('foo'); symbolTest;`, callFrameId, /*returnByValue*/ true);
+ },
+ async function testPromiseResolveReturnByVal() {
+ await evalAndLog('Promise.resolve(239)', callFrameId, /*returnByValue*/ true);
+ },
+ async function testPromiseResolve() {
+ await evalAndLog('Promise.resolve(239)', callFrameId);
+ },
+ async function testReleaseObject() {
+ await Protocol.Runtime.evaluate({ expression: 'var a = {x:3};', callFrameId });
+ await Protocol.Runtime.evaluate({ expression: 'var b = {x:4};', callFrameId });
+ const ids = [];
+ let result = await Protocol.Runtime.evaluate({ expression: 'a', callFrameId });
+ const id1 = result.result.result.objectId;
+ ids.push(id1);
+ result = await Protocol.Runtime.evaluate({ expression: 'b', callFrameId });
+ const id2 = result.result.result.objectId;
+ ids.push(id2);
+
+ // Call Function on both objects and log:
+ await objectGroupHelper(ids);
+ Protocol.Runtime.releaseObject({ objectId: id1 });
+ await objectGroupHelper(ids);
+ Protocol.Runtime.releaseObject({ objectId: id2 });
+ await objectGroupHelper(ids);
+ },
+ async function testReleaseObjectInvalid() {
+ const releaseObjectResult = await Protocol.Runtime.releaseObject({});
+ InspectorTest.log('ReleaseObject with invalid params.');
+ InspectorTest.logMessage(releaseObjectResult);
+ },
+ async function testObjectGroups() {
+ await Protocol.Runtime.evaluate({ expression: 'var a = {x:3};', callFrameId });
+ await Protocol.Runtime.evaluate({ expression: 'var b = {x:4};', callFrameId });
+ const ids = [];
+ let result = await Protocol.Runtime.evaluate({ expression: 'a', objectGroup: 'a', callFrameId });
+ const id1 = result.result.result.objectId;
+ ids.push(id1);
+ result = await Protocol.Runtime.evaluate({ expression: 'b', objectGroup: 'b', callFrameId });
+ const id2 = result.result.result.objectId;
+ ids.push(id2);
+
+ // Call Function on both objects and log:
+ await objectGroupHelper(ids);
+ Protocol.Runtime.releaseObjectGroup({ objectGroup: 'a' });
+ await objectGroupHelper(ids);
+ Protocol.Runtime.releaseObjectGroup({ objectGroup: 'b' });
+ await objectGroupHelper(ids);
+ },
+ async function testReleaseObjectGroupInvalid() {
+ const releaseObjectGroupResult = await Protocol.Runtime.releaseObjectGroup({});
+ InspectorTest.log('ReleaseObjectGroup with invalid params');
+ InspectorTest.logMessage(releaseObjectGroupResult);
+ },
+ async function testEvaluateSyntaxError() {
+ const result = await Protocol.Debugger.evaluateOnCallFrame({ expression: `[]]`, callFrameId });
+ InspectorTest.logMessage(result.result.exceptionDetails.exception);
+ },
+ async function testEvaluateReferenceError() {
+ const result = await Protocol.Debugger.evaluateOnCallFrame({ expression: `totalRandomNotRealVariable789`, callFrameId });
+ InspectorTest.logMessage(result.result.exceptionDetails.exception);
+ },
+ async function testCallFrameIdTypeError() {
+ const result = await Protocol.Debugger.evaluateOnCallFrame({ expression: `console.log(42)`, callFrameId: {} });
+ InspectorTest.logMessage(result);
+ },
+ async function testCallFrameIdInvalidInput() {
+ InspectorTest.log('Testing evaluateOnCallFrame with non-existent callFrameId');
+ const result = await Protocol.Debugger.evaluateOnCallFrame({ expression: `console.log(42)`, callFrameId: '1234' });
+ InspectorTest.logMessage(result);
+ },
+ async function testNullExpression() {
+ await evalAndLog(null, callFrameId, /*returnByValue*/ true);
+ }
+ ]);
+
+ async function evalAndLog(expression, callFrameId, returnByValue) {
+ const result = await Protocol.Debugger.evaluateOnCallFrame({ expression, callFrameId, returnByValue });
+ InspectorTest.logMessage(result);
+ }
+
+ // Helper function that calls a function on all objects with ids in objectIds, then returns
+ async function objectGroupHelper(objectIds) {
+ return new Promise(async resolve => {
+ for (let objectId of objectIds) {
+ const result = await Protocol.Runtime.callFunctionOn({ objectId, functionDeclaration: 'function(){ return this;}' });
+ InspectorTest.logMessage(result);
+ }
+ resolve();
+ });
+ }
+})();
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
index b8dfd1ce3d..5ce24aa768 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
@@ -267,7 +267,11 @@ function foo6() { Promise.resolve().then(() => 42^) }
Running test: arrowFunctionReturn
#() => #239#
#
-function foo() { function boo() { #return 239# } #}
+function foo() { function boo() { return 239 } #}
+#
+function foo() { function boo() { #return 239# }; #boo #}
+#
+function foo() { let boo = #function() { #return 239# }; #}
#
#() => { #239 #}
#
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
index c3659a0240..5f52be7d5b 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
@@ -135,6 +135,8 @@ function foo6() { Promise.resolve().then(() => 42) }
checkSource('() => 239\n', { lineNumber: 0, columnNumber: 0 })
.then(() => checkSource('function foo() { function boo() { return 239 } }\n', { lineNumber: 0, columnNumber: 0 }))
+ .then(() => checkSource('function foo() { function boo() { return 239 }; boo }\n', { lineNumber: 0, columnNumber: 0 }))
+ .then(() => checkSource('function foo() { let boo = function() { return 239 }; }\n', { lineNumber: 0, columnNumber: 0 }))
.then(() => checkSource('() => { 239 }\n', { lineNumber: 0, columnNumber: 0 }))
.then(() => checkSource('function foo() { 239 }\n', { lineNumber: 0, columnNumber: 0 }))
// TODO(kozyatinskiy): lineNumber for return position should be only 9, not 8.
diff --git a/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt b/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt
new file mode 100644
index 0000000000..ad655f5dba
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt
@@ -0,0 +1,89 @@
+Checks Runtime.getProperties method while debugger is paused.
+
+Running test: testObject5
+ __proto__ own object undefined
+ foo own string cat
+Internal properties
+ [[PrimitiveValue]] number 5
+
+Running test: testNotOwn
+ __defineGetter__ inherited function undefined
+ __defineSetter__ inherited function undefined
+ __lookupGetter__ inherited function undefined
+ __lookupSetter__ inherited function undefined
+ __proto__ inherited no value, getter, setter
+ a own number 2
+ b own no value, getter, setter
+ c inherited number 4
+ constructor inherited function undefined
+ d inherited no value, getter
+ hasOwnProperty inherited function undefined
+ isPrototypeOf inherited function undefined
+ propertyIsEnumerable inherited function undefined
+ toLocaleString inherited function undefined
+ toString inherited function undefined
+ valueOf inherited function undefined
+
+Running test: testAccessorsOnly
+ b own no value, getter, setter
+ d own no value, setter
+
+Running test: testArray
+ 0 own string red
+ 1 own string green
+ 2 own string blue
+ __proto__ own object undefined
+ length own number 3
+
+Running test: testBound
+ __proto__ own function undefined
+ length own number 0
+ name own string bound Number
+Internal properties
+ [[BoundArgs]] object undefined
+ [[BoundThis]] object undefined
+ [[TargetFunction]] function undefined
+
+Running test: testObjectThrowsLength
+ __proto__ own object undefined
+ length own no value, getter
+
+Running test: testTypedArrayWithoutLength
+ __proto__ own object undefined
+
+Running test: testArrayBuffer
+[[Int8Array]]
+ 0 own number 1
+ 1 own number 1
+ 2 own number 1
+ 3 own number 1
+ 4 own number 1
+ 5 own number 1
+ 6 own number 1
+ 7 own number 1
+ __proto__ own object undefined
+[[Uint8Array]]
+ 0 own number 1
+ 1 own number 1
+ 2 own number 1
+ 3 own number 1
+ 4 own number 1
+ 5 own number 1
+ 6 own number 1
+ 7 own number 1
+ __proto__ own object undefined
+[[Int16Array]]
+ 0 own number 257
+ 1 own number 257
+ 2 own number 257
+ 3 own number 257
+ __proto__ own object undefined
+[[Int32Array]]
+ 0 own number 16843009
+ 1 own number 16843009
+ __proto__ own object undefined
+
+Running test: testArrayBufferWithBrokenUintCtor
+ [[Int8Array]] own object undefined
+ [[Uint8Array]] own object undefined
+ __proto__ own object undefined
diff --git a/deps/v8/test/inspector/debugger/get-properties-paused.js b/deps/v8/test/inspector/debugger/get-properties-paused.js
new file mode 100644
index 0000000000..4c1e7b9429
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-properties-paused.js
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let { Protocol } = InspectorTest.start('Checks Runtime.getProperties method while debugger is paused.');
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({ expression: 'debugger;' });
+ const callFrameId = (await Protocol.Debugger.oncePaused()).params.callFrames[0].callFrameId;
+
+ InspectorTest.runAsyncTestSuite([
+ function testObject5() {
+ return logExpressionProperties(`(function(){var r = Object(5); r.foo = 'cat';return r;})()`);
+ },
+
+ function testNotOwn() {
+ return logExpressionProperties(`({ a: 2, set b(_) {}, get b() {return 5;}, __proto__: { a: 3, c: 4, get d() {return 6;} }})`, { ownProperties: false });
+ },
+
+ function testAccessorsOnly() {
+ return logExpressionProperties(`({ a: 2, set b(_) {}, get b() {return 5;}, c: 'c', set d(_){} })`, { ownProperties: true, accessorPropertiesOnly: true });
+ },
+
+ function testArray() {
+ return logExpressionProperties(`['red', 'green', 'blue']`);
+ },
+
+ function testBound() {
+ return logExpressionProperties('Number.bind({}, 5)');
+ },
+
+ function testObjectThrowsLength() {
+ return logExpressionProperties(`({get length() { throw 'Length called'; }})`);
+ },
+
+ function testTypedArrayWithoutLength() {
+ return logExpressionProperties('({__proto__: Uint8Array.prototype})');
+ },
+
+ async function testArrayBuffer() {
+ let objectId = await evaluateToObjectId('new Uint8Array([1, 1, 1, 1, 1, 1, 1, 1]).buffer');
+ let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
+ for (let prop of props.result.result) {
+ if (prop.name === '__proto__')
+ continue;
+ InspectorTest.log(prop.name);
+ await logGetPropertiesResult(prop.value.objectId);
+ }
+ },
+
+ async function testArrayBufferWithBrokenUintCtor() {
+ await evaluateToObjectId(`(function() {
+ this.uint8array_old = this.Uint8Array;
+ this.Uint8Array = 42;
+ })()`);
+ await logExpressionProperties('new Int8Array([1, 1, 1, 1, 1, 1, 1]).buffer');
+ await evaluateToObjectId(`(function() {
+ this.Uint8Array = this.uint8array_old;
+ delete this.uint8array_old;
+ })()`);
+ }
+ ]);
+
+ async function logExpressionProperties(expression, flags) {
+ const objectId = await evaluateToObjectId(expression);
+ return await logGetPropertiesResult(objectId, flags);
+ }
+
+ async function evaluateToObjectId(expression) {
+ return (await Protocol.Debugger.evaluateOnCallFrame({ expression, callFrameId })).result.result.objectId;
+ }
+
+ async function logGetPropertiesResult(objectId, flags = { ownProperties: true }) {
+ function hasGetterSetter(property, fieldName) {
+ var v = property[fieldName];
+ if (!v) return false;
+ return v.type !== "undefined"
+ }
+
+ flags.objectId = objectId;
+ let props = await Protocol.Runtime.getProperties(flags);
+ var propertyArray = props.result.result;
+ propertyArray.sort(NamedThingComparator);
+ for (var i = 0; i < propertyArray.length; i++) {
+ var p = propertyArray[i];
+ var v = p.value;
+ var own = p.isOwn ? "own" : "inherited";
+ if (v)
+ InspectorTest.log(` ${p.name} ${own} ${v.type} ${v.value}`);
+ else
+ InspectorTest.log(` ${p.name} ${own} no value${(hasGetterSetter(p, "get") ? ", getter" : "")}${(hasGetterSetter(p, "set") ? ", setter" : "")}`);
+ }
+ var internalPropertyArray = props.result.internalProperties;
+ if (internalPropertyArray) {
+ InspectorTest.log('Internal properties');
+ internalPropertyArray.sort(NamedThingComparator);
+ for (var i = 0; i < internalPropertyArray.length; i++) {
+ var p = internalPropertyArray[i];
+ var v = p.value;
+ InspectorTest.log(` ${p.name} ${v.type} ${v.value}`);
+ }
+ }
+
+ function NamedThingComparator(o1, o2) {
+ return o1.name === o2.name ? 0 : (o1.name < o2.name ? -1 : 1);
+ }
+ }
+})();
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index 8f62c754f3..2334213124 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -259,3 +259,39 @@ expression: Promise.resolve(42)
value : 42
}
+Running test: privateNames
+expression: new class { #foo = 1; #bar = 2; baz = 3;}
+{
+ name : #foo
+ type : number
+ value : 1
+}
+{
+ name : #bar
+ type : number
+ value : 2
+}
+{
+ name : baz
+ type : number
+ value : 3
+}
+
+expression: new class extends class { #baz = 3; } { #foo = 1; #bar = 2; }
+{
+ name : #baz
+ type : number
+ value : 3
+}
+{
+ name : #foo
+ type : number
+ value : 1
+}
+{
+ name : #bar
+ type : number
+ value : 2
+}
+
+expression: new class extends class { constructor() { return new Proxy({}, {}); } } { #foo = 1; #bar = 2; }
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
index 5b1cc3b8a2..cfbdba816b 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -1,6 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-class-fields
let {session, contextGroup, Protocol} = InspectorTest.start("Check internal properties reported in object preview.");
@@ -72,6 +74,14 @@ InspectorTest.runTestSuite([
Protocol.Runtime.evaluate({ expression: "Array.prototype.__defineGetter__(\"0\",() => { throw new Error() }) "})
.then(() => checkExpression("Promise.resolve(42)"))
.then(next);
+ },
+
+ function privateNames(next)
+ {
+ checkExpression("new class { #foo = 1; #bar = 2; baz = 3;}")
+ .then(() => checkExpression("new class extends class { #baz = 3; } { #foo = 1; #bar = 2; }"))
+ .then(() => checkExpression("new class extends class { constructor() { return new Proxy({}, {}); } } { #foo = 1; #bar = 2; }"))
+ .then(next);
}
]);
diff --git a/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt b/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt
index 98fccebe68..1b92806a9d 100644
--- a/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt
@@ -1,37 +1,37 @@
Checks that we report not more then maxDepth call chains.
Running test: testPaused
-Actual call chain length: 8
-setAsyncCallStackDepth(maxDepth): 16
-reported: 1
-
-Actual call chain length: 8
+Actual call chain length: 4
setAsyncCallStackDepth(maxDepth): 8
-reported: 1
+reported: 4
+
+Actual call chain length: 4
+setAsyncCallStackDepth(maxDepth): 4
+reported: 4
-Actual call chain length: 8
-setAsyncCallStackDepth(maxDepth): 7
-reported: 1
+Actual call chain length: 4
+setAsyncCallStackDepth(maxDepth): 3
+reported: 3
-Actual call chain length: 8
+Actual call chain length: 4
setAsyncCallStackDepth(maxDepth): 0
reported: 0
Running test: testConsoleTrace
-Actual call chain length: 8
-setAsyncCallStackDepth(maxDepth): 16
-reported: 1
-
-Actual call chain length: 8
+Actual call chain length: 4
setAsyncCallStackDepth(maxDepth): 8
-reported: 1
+reported: 4
+
+Actual call chain length: 4
+setAsyncCallStackDepth(maxDepth): 4
+reported: 4
-Actual call chain length: 8
-setAsyncCallStackDepth(maxDepth): 7
-reported: 1
+Actual call chain length: 4
+setAsyncCallStackDepth(maxDepth): 3
+reported: 3
-Actual call chain length: 8
+Actual call chain length: 4
setAsyncCallStackDepth(maxDepth): 0
reported: 0
diff --git a/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js b/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js
index 0c7567f499..32d86879ac 100644
--- a/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js
+++ b/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js
@@ -2,57 +2,70 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(kozyatinskiy): fix or remove it later with new stack traces it's almost
-// imposible to hit limit.
let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we report not more then maxDepth call chains.');
contextGroup.addScript(`
-function promisesChain(num) {
- var p = Promise.resolve();
- for (var i = 0; i < num - 1; ++i) {
- p = p.then(() => 42);
+function asyncChain(breakAtEnd) {
+ function asyncOpNested() {
+ setTimeout(asyncOpNested1, 0);
}
- return p;
+ function asyncOpNested1() {
+ setTimeout(asyncOpNested2, 0);
+ }
+ function asyncOpNested2() {
+ setTimeout(asyncOpNested3, 0);
+ }
+ function asyncOpNested3() {
+ setTimeout(asyncOpNested4, 0);
+ }
+ function asyncOpNested4() {
+ if (breakAtEnd) {
+ debugger;
+ } else {
+ console.trace(42);
+ }
+ }
+ asyncOpNested();
}
`);
Protocol.Debugger.enable();
InspectorTest.runAsyncTestSuite([
async function testPaused() {
- let callback = '() => { debugger; }';
- startTest({ generated: 8, limit: 16, callback});
+ const breakAtEnd = true;
+ startTest({ limit: 8, breakAtEnd });
dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
await Protocol.Debugger.resume();
- startTest({ generated: 8, limit: 8, callback});
+ startTest({ limit: 4, breakAtEnd });
dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
await Protocol.Debugger.resume();
- startTest({ generated: 8, limit: 7, callback});
+ startTest({ limit: 3, breakAtEnd });
dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
await Protocol.Debugger.resume();
- startTest({ generated: 8, limit: 0, callback});
+ startTest({ limit: 0, breakAtEnd });
dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
await Protocol.Debugger.resume();
},
async function testConsoleTrace() {
await Protocol.Runtime.enable();
- let callback = '() => { console.trace(42); }';
- startTest({ generated: 8, limit: 16, callback});
+ const breakAtEnd = false;
+ startTest({ limit: 8, breakAtEnd});
let msg = await Protocol.Runtime.onceConsoleAPICalled();
dumpCaptured(msg.params.stackTrace.parent);
- startTest({ generated: 8, limit: 8, callback});
+ startTest({ limit: 4, breakAtEnd});
msg = await Protocol.Runtime.onceConsoleAPICalled();
dumpCaptured(msg.params.stackTrace.parent);
- startTest({ generated: 8, limit: 7, callback});
+ startTest({ limit: 3, breakAtEnd});
msg = await Protocol.Runtime.onceConsoleAPICalled();
dumpCaptured(msg.params.stackTrace.parent);
- startTest({ generated: 8, limit: 0, callback});
+ startTest({ limit: 0, breakAtEnd});
msg = await Protocol.Runtime.onceConsoleAPICalled();
dumpCaptured(msg.params.stackTrace.parent);
@@ -61,12 +74,12 @@ InspectorTest.runAsyncTestSuite([
]);
function startTest(params) {
- InspectorTest.log('Actual call chain length: ' + params.generated);
- InspectorTest.log('setAsyncCallStackDepth(maxDepth): ' + params.limit);
+ InspectorTest.log('Actual call chain length: 4');
+ InspectorTest.log(`setAsyncCallStackDepth(maxDepth): ${params.limit}`);
Protocol.Debugger.setAsyncCallStackDepth({maxDepth: params.limit});
Protocol.Runtime.evaluate({expression:
- `promisesChain(${params.generated}).then(${params.callback})`});
+ `asyncChain(${params.breakAtEnd})`});
}
function dumpCaptured(stack) {
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-url-regex-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-url-regex-expected.txt
new file mode 100644
index 0000000000..0696b5bd42
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-url-regex-expected.txt
@@ -0,0 +1,30 @@
+Tests for calling setBreakpoint with urlRegex
+
+Running test: testSetBreakpointByUrlRegex
+[
+ [0] : {
+ columnNumber : 2
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+]
+Successfully paused during eval of: 'test()'
+Successfully completed eval of: 'test()'
+
+Running test: testSetBreakpointByUrlWithConditions
+[
+ [0] : {
+ columnNumber : 2
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+]
+Successfully completed eval of: 'test()'
+[
+ [0] : {
+ columnNumber : 2
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+]
+Successfully paused during eval of: 'test()' \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-url-regex.js b/deps/v8/test/inspector/debugger/set-breakpoint-url-regex.js
new file mode 100644
index 0000000000..046f047a2f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-url-regex.js
@@ -0,0 +1,57 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { contextGroup, Protocol } = InspectorTest.start(
+ `Tests for calling setBreakpoint with urlRegex`);
+
+(async function test(){
+ await Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: `
+function test() {
+ return 42;
+}
+//# sourceURL=some-kind-of-test.js`});
+ await Protocol.Debugger.onceScriptParsed();
+
+ InspectorTest.runAsyncTestSuite([
+ async function testSetBreakpointByUrlRegex() {
+ const result = await Protocol.Debugger.setBreakpointByUrl({ lineNumber: 2, urlRegex: '.*of-test.js' });
+ InspectorTest.logMessage(result.result.locations);
+ await expectBreakInEval('test()');
+ await Protocol.Debugger.removeBreakpoint({ breakpointId: result.result.breakpointId });
+ await expectNoBreakInEval('test()');
+ },
+ async function testSetBreakpointByUrlWithConditions() {
+ // Test Condition false
+ let result = await Protocol.Debugger.setBreakpointByUrl({ lineNumber: 2, urlRegex: '.*of-test.js', condition: 'false' });
+ InspectorTest.logMessage(result.result.locations);
+ await expectNoBreakInEval('test()');
+ await Protocol.Debugger.removeBreakpoint({ breakpointId: result.result.breakpointId });
+
+ // Test condition true
+ result = await Protocol.Debugger.setBreakpointByUrl({ lineNumber: 2, urlRegex: '.*of-test.js', condition: 'true' });
+ InspectorTest.logMessage(result.result.locations);
+ await expectBreakInEval('test()');
+ await Protocol.Debugger.removeBreakpoint({ breakpointId: result.result.breakpointId });
+ },
+ ]);
+
+ // Function that will evaluate an expression and return once completed.
+ // Used to validate that breakpoint is not hit within the evaluated expression.
+ async function expectNoBreakInEval(expression) {
+ await Protocol.Runtime.evaluate({expression});
+ InspectorTest.log(`Successfully completed eval of: '${expression}'`);
+ }
+
+ // Function that will evaluate an expression and return once a paused event is received
+ // and the debugger is resumed.
+ // Used to validate that breakpoint is hit within the evaluated expression.
+ async function expectBreakInEval(expression) {
+ Protocol.Runtime.evaluate({expression});
+ await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`Successfully paused during eval of: '${expression}'`);
+ await Protocol.Debugger.resume();
+ }
+
+})();
diff --git a/deps/v8/test/inspector/debugger/set-variable-value-expected.txt b/deps/v8/test/inspector/debugger/set-variable-value-expected.txt
new file mode 100644
index 0000000000..33bdfd1706
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-variable-value-expected.txt
@@ -0,0 +1,260 @@
+Tests that exercise Debugger.setVariableValue
+
+Running test: testSetVariableValueMain
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 5
+ type : number
+ value : 5
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 10
+ type : number
+ value : 10
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : NaN
+ type : number
+ unserializableValue : NaN
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : boolean
+ value : true
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 3
+ type : number
+ value : 3
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : __proto__
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ writable : true
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Array
+ description : Array(3)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ type : string
+ value : 1
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 1
+ value : {
+ type : string
+ value : 2
+ }
+ writable : true
+ }
+ [2] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 2
+ value : {
+ type : string
+ value : 3
+ }
+ writable : true
+ }
+ [3] : {
+ configurable : false
+ enumerable : false
+ isOwn : true
+ name : length
+ value : {
+ description : 3
+ type : number
+ value : 3
+ }
+ writable : true
+ }
+ [4] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : __proto__
+ value : {
+ className : Array
+ description : Array(0)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+ writable : true
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : b
+ value : {
+ description : 4
+ type : number
+ value : 4
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : __proto__
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ writable : true
+ }
+ ]
+ }
+}
+
+Running test: testInvalidFrame
+setVariableValue with invalid callFrameId
+{
+ error : {
+ code : -32000
+ message : Invalid call frame id
+ }
+ id : <messageId>
+}
+setVariableValue with invalid scopeNumber
+{
+ error : {
+ code : -32602
+ data : scopeNumber: integer value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+}
+setVariableValue with invalid scopeNumber
+{
+ error : {
+ code : -32000
+ message : Could not find scope with given number
+ }
+ id : <messageId>
+}
+setVariableValue with invalid variableName
+{
+ error : {
+ code : -32603
+ message : Internal error
+ }
+ id : <messageId>
+}
+
+Running test: testNewValueErrors
+setVariableValue with invalid unserializableValue
+{
+ error : {
+ code : -32000
+ message : Couldn't parse value object in call argument
+ }
+ id : <messageId>
+}
+setVariableValue with invalid objectId
+{
+ error : {
+ code : -32602
+ data : newValue.objectId: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/set-variable-value.js b/deps/v8/test/inspector/debugger/set-variable-value.js
new file mode 100644
index 0000000000..48bde89fbf
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-variable-value.js
@@ -0,0 +1,90 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-always-opt
+
+const { contextGroup, Protocol } = InspectorTest.start(
+ `Tests that exercise Debugger.setVariableValue`);
+
+(async function test(){
+ await Protocol.Debugger.enable();
+ await Protocol.Runtime.enable();
+ contextGroup.addInlineScript(`
+ function test() {
+ let num = 5;
+ let obj = {b: 4};
+ let bool = true;
+ let set_breakpoint_here = true;
+ debugger;
+ }
+ `, 'test.js');
+ Protocol.Runtime.evaluate({expression: "test();"});
+ const {params:{callFrames:[{callFrameId}]}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.runAsyncTestSuite([
+ async function testSetVariableValueMain() {
+ // Set value to a Number
+ let result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { value: 10 }, callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+
+ // Set Value to NaN
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { unserializableValue: 'NaN' }, callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+
+ // Set Value to boolean:true
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { value: true }, callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+
+ // Set Value to a new object
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { value: { a: 3 } }, callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+ let props = await Protocol.Runtime.getProperties({ objectId: result.result.result.objectId, ownProperties: true });
+ InspectorTest.logMessage(props);
+
+ // Set Value to new Array
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { value: ['1', '2', '3'] }, callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId });
+ InspectorTest.logMessage(result);
+ props = await Protocol.Runtime.getProperties({ objectId: result.result.result.objectId, ownProperties: true });
+ InspectorTest.logMessage(props);
+
+ // Set Value to existing object with objectId
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'obj', callFrameId: callFrameId });
+ let objectId = result.result.result.objectId;
+ await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { objectId: objectId }, callFrameId: callFrameId });
+ result = await Protocol.Debugger.evaluateOnCallFrame({ expression: 'num', callFrameId: callFrameId });
+ InspectorTest.logMessage(result);
+ props = await Protocol.Runtime.getProperties({ objectId: result.result.result.objectId, ownProperties: true });
+ InspectorTest.logMessage(props);
+ },
+ async function testInvalidFrame() {
+ let result = await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { unserializableValue: 'NaN' }, callFrameId: 'fakeCallFrame' });
+ InspectorTest.log('setVariableValue with invalid callFrameId');
+ InspectorTest.logMessage(result);
+ result = await Protocol.Debugger.setVariableValue({ scopeNumber: 'invalidScopeType', variableName: 'num', newValue: { unserializableValue: 'NaN' }, callFrameId });
+ InspectorTest.log('setVariableValue with invalid scopeNumber')
+ InspectorTest.logMessage(result);
+ result = await Protocol.Debugger.setVariableValue({ scopeNumber: 1000, variableName: 'num', newValue: { unserializableValue: 'NaN' }, callFrameId });
+ InspectorTest.log('setVariableValue with invalid scopeNumber');
+ InspectorTest.logMessage(result);
+ result = await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'FakeObjectName', newValue: { unserializableValue: 'NaN' }, callFrameId });
+ InspectorTest.log('setVariableValue with invalid variableName');
+ InspectorTest.logMessage(result);
+ },
+ async function testNewValueErrors() {
+ let result = await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { unserializableValue: 'not unserializable value' }, callFrameId });
+ InspectorTest.log('setVariableValue with invalid unserializableValue');
+ InspectorTest.logMessage(result);
+ result = await Protocol.Debugger.setVariableValue({ scopeNumber: 0, variableName: 'num', newValue: { objectId: 2000 }, callFrameId });
+ InspectorTest.log('setVariableValue with invalid objectId');
+ InspectorTest.logMessage(result);
+ }
+ ]);
+
+})();
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
index 4e7d9e9815..a070334980 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
@@ -10,6 +10,7 @@ function testFunction()
var o = 0;
function f() { return 1; }
function g() { o = 2; return o; }
+ f,g;
debugger;
}
//# sourceURL=foo.js`);
diff --git a/deps/v8/test/inspector/debugger/step-snapshot-expected.txt b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
index 0d8d039538..f75a184252 100644
--- a/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
@@ -11,12 +11,6 @@ paused
}
paused
-function c(f#, ...args) { return f(...args); }
-
-paused
-function c(f, ...args#) { return f(...args); }
-
-paused
function c(f, ...args) { #return f(...args); }
paused
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
index ea2a116be5..f0c20b3955 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
@@ -6,7 +6,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests breakable locations in wasm');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-imports.js b/deps/v8/test/inspector/debugger/wasm-imports.js
index dbe96ce671..0d83917fa9 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports.js
+++ b/deps/v8/test/inspector/debugger/wasm-imports.js
@@ -4,7 +4,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests imports in wasm');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
// Build two modules A and B. A defines function func, which contains a
diff --git a/deps/v8/test/inspector/debugger/wasm-reset-context-group.js b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
index be347c0736..c1353da86b 100644
--- a/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
+++ b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
@@ -4,7 +4,6 @@
InspectorTest.log('Checks resetting context group with wasm.');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index 290bd58412..f7a0df497f 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -30,7 +30,6 @@ async function printPauseLocationsAndContinue() {
}
async function instantiateWasm() {
- utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 0993f11b53..04e5ec88c1 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -6,7 +6,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests how wasm scripts are reported');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
// Add two empty functions. Both should be registered as individual scripts at
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index 76a831392f..a9b676f8a7 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -5,7 +5,6 @@
const {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts.');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-source.js b/deps/v8/test/inspector/debugger/wasm-source.js
index bf7bab735c..6c5b14c4be 100644
--- a/deps/v8/test/inspector/debugger/wasm-source.js
+++ b/deps/v8/test/inspector/debugger/wasm-source.js
@@ -6,7 +6,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests how wasm scrips report the source');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-stack.js b/deps/v8/test/inspector/debugger/wasm-stack.js
index 0234dabe79..0e2ff0fa8e 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack.js
@@ -6,7 +6,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests call stack in wasm scripts');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 7732e1396e..1c9ec95577 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -5,7 +5,6 @@
let {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts with source maps');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index d3a2c64048..0fda6b73be 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
@@ -4,7 +4,6 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests stepping through wasm scripts');
-utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index b4f04ce7b0..4b7c312785 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -35,7 +35,7 @@
}], # variant != default
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# Lite mode does not allocate feedback vector.
'type-profiler/type-profile-start-stop': [SKIP],
'type-profiler/type-profile': [SKIP],
@@ -48,7 +48,15 @@
'debugger/asm-js-breakpoint-before-exec': [SKIP],
'debugger/asm-js-breakpoint-during-exec': [SKIP],
'debugger/wasm-*': [SKIP],
-}], # 'lite_mode'
+ 'cpu-profiler/console-profile-wasm': [SKIP],
+}], # 'lite_mode or variant == jitless'
+
+##############################################################################
+['variant == jitless', {
+ # https://crbug.com/v8/7777
+ 'cpu-profiler/coverage': [SKIP],
+ 'cpu-profiler/coverage-block': [SKIP],
+}], # variant == jitless
##############################################################################
['(arch == arm or arch == arm64) and simulator_run', {
@@ -73,4 +81,9 @@
'debugger/pause-on-oom': [SKIP],
}],
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 2a5f8e1c84..3cdd1f968c 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -111,6 +111,7 @@ v8::Local<v8::Context> IsolateData::GetContext(int context_group_id) {
}
void IsolateData::ResetContextGroup(int context_group_id) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->resetContextGroup(context_group_id);
}
@@ -149,6 +150,7 @@ v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
int IsolateData::ConnectSession(int context_group_id,
const v8_inspector::StringView& state,
v8_inspector::V8Inspector::Channel* channel) {
+ v8::SealHandleScope seal_handle_scope(isolate());
int session_id = ++last_session_id_;
sessions_[session_id] = inspector_->connect(context_group_id, channel, state);
context_group_by_session_[sessions_[session_id].get()] = context_group_id;
@@ -157,6 +159,7 @@ int IsolateData::ConnectSession(int context_group_id,
std::unique_ptr<v8_inspector::StringBuffer> IsolateData::DisconnectSession(
int session_id) {
+ v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
CHECK(it != sessions_.end());
context_group_by_session_.erase(it->second.get());
@@ -167,6 +170,7 @@ std::unique_ptr<v8_inspector::StringBuffer> IsolateData::DisconnectSession(
void IsolateData::SendMessage(int session_id,
const v8_inspector::StringView& message) {
+ v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
if (it != sessions_.end()) it->second->dispatchProtocolMessage(message);
}
@@ -174,6 +178,7 @@ void IsolateData::SendMessage(int session_id,
void IsolateData::BreakProgram(int context_group_id,
const v8_inspector::StringView& reason,
const v8_inspector::StringView& details) {
+ v8::SealHandleScope seal_handle_scope(isolate());
for (int session_id : GetSessionIds(context_group_id)) {
auto it = sessions_.find(session_id);
if (it != sessions_.end()) it->second->breakProgram(reason, details);
@@ -183,6 +188,7 @@ void IsolateData::BreakProgram(int context_group_id,
void IsolateData::SchedulePauseOnNextStatement(
int context_group_id, const v8_inspector::StringView& reason,
const v8_inspector::StringView& details) {
+ v8::SealHandleScope seal_handle_scope(isolate());
for (int session_id : GetSessionIds(context_group_id)) {
auto it = sessions_.find(session_id);
if (it != sessions_.end())
@@ -191,6 +197,7 @@ void IsolateData::SchedulePauseOnNextStatement(
}
void IsolateData::CancelPauseOnNextStatement(int context_group_id) {
+ v8::SealHandleScope seal_handle_scope(isolate());
for (int session_id : GetSessionIds(context_group_id)) {
auto it = sessions_.find(session_id);
if (it != sessions_.end()) it->second->cancelPauseOnNextStatement();
@@ -199,34 +206,41 @@ void IsolateData::CancelPauseOnNextStatement(int context_group_id) {
void IsolateData::AsyncTaskScheduled(const v8_inspector::StringView& name,
void* task, bool recurring) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskScheduled(name, task, recurring);
}
void IsolateData::AsyncTaskStarted(void* task) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskStarted(task);
}
void IsolateData::AsyncTaskFinished(void* task) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskFinished(task);
}
v8_inspector::V8StackTraceId IsolateData::StoreCurrentStackTrace(
const v8_inspector::StringView& description) {
+ v8::SealHandleScope seal_handle_scope(isolate());
return inspector_->storeCurrentStackTrace(description);
}
void IsolateData::ExternalAsyncTaskStarted(
const v8_inspector::V8StackTraceId& parent) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->externalAsyncTaskStarted(parent);
}
void IsolateData::ExternalAsyncTaskFinished(
const v8_inspector::V8StackTraceId& parent) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->externalAsyncTaskFinished(parent);
}
void IsolateData::AddInspectedObject(int session_id,
v8::Local<v8::Value> object) {
+ v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
if (it == sessions_.end()) return;
std::unique_ptr<Inspectable> inspectable(
@@ -235,10 +249,12 @@ void IsolateData::AddInspectedObject(int session_id,
}
void IsolateData::SetMaxAsyncTaskStacksForTest(int limit) {
+ v8::SealHandleScope seal_handle_scope(isolate());
v8_inspector::SetMaxAsyncTaskStacksForTest(inspector_.get(), limit);
}
void IsolateData::DumpAsyncTaskStacksStateForTest() {
+ v8::SealHandleScope seal_handle_scope(isolate());
v8_inspector::DumpAsyncTaskStacksStateForTest(inspector_.get());
}
@@ -275,6 +291,7 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
}
v8_inspector::StringView url(url_string.start(), url_string.length());
+ v8::SealHandleScope seal_handle_scope(isolate);
return inspector->exceptionThrown(
context, message_text, exception, detailed_message, url, line_number,
column_number, inspector->createStackTrace(stack), script_id);
@@ -303,6 +320,7 @@ void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
if (!id->IsInt32()) return;
v8_inspector::V8Inspector* inspector =
IsolateData::FromContext(context)->inspector_.get();
+ v8::SealHandleScope seal_handle_scope(isolate);
const char* reason_str = "Handler added to rejected promise";
inspector->exceptionRevoked(
context, id.As<v8::Int32>()->Value(),
@@ -327,10 +345,12 @@ void IsolateData::FireContextCreated(v8::Local<v8::Context> context,
v8_inspector::V8ContextInfo info(context, context_group_id,
v8_inspector::StringView());
info.hasMemoryOnConsole = true;
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->contextCreated(info);
}
void IsolateData::FireContextDestroyed(v8::Local<v8::Context> context) {
+ v8::SealHandleScope seal_handle_scope(isolate());
inspector_->contextDestroyed(context);
}
@@ -406,10 +426,14 @@ v8::MaybeLocal<v8::Value> IsolateData::memoryInfo(v8::Isolate* isolate,
}
void IsolateData::runMessageLoopOnPause(int) {
+ v8::SealHandleScope seal_handle_scope(isolate());
task_runner_->RunMessageLoop(true);
}
-void IsolateData::quitMessageLoopOnPause() { task_runner_->QuitMessageLoop(); }
+void IsolateData::quitMessageLoopOnPause() {
+ v8::SealHandleScope seal_handle_scope(isolate());
+ task_runner_->QuitMessageLoop();
+}
void IsolateData::consoleAPIMessage(int contextGroupId,
v8::Isolate::MessageErrorLevel level,
diff --git a/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt
new file mode 100644
index 0000000000..4bcde813fc
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt
@@ -0,0 +1,46 @@
+Tests that Runtime throws exceptions after enabling domain on scripts with errors.
+Enabling Runtime Domain.
+{
+ method : Runtime.exceptionThrown
+ params : {
+ exceptionDetails : {
+ columnNumber : 12
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token ;
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ executionContextId : <executionContextId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ text : Uncaught SyntaxError: Unexpected token ;
+ url : syntaxError.js
+ }
+ timestamp : <timestamp>
+ }
+}
+{
+ method : Runtime.exceptionThrown
+ params : {
+ exceptionDetails : {
+ columnNumber : 12
+ exception : {
+ className : ReferenceError
+ description : ReferenceError: y is not defined at referenceError.js:2:13
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ executionContextId : <executionContextId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ text : Uncaught ReferenceError: y is not defined
+ url : referenceError.js
+ }
+ timestamp : <timestamp>
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/exceptionthrown-on-connect.js b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect.js
new file mode 100644
index 0000000000..583459c312
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {Protocol, contextGroup} = InspectorTest.start(
+ `Tests that Runtime throws exceptions after enabling domain on scripts with errors.`);
+
+(async function test(){
+ // Log all exceptions thrown
+ Protocol.Runtime.onExceptionThrown(exception => {
+ InspectorTest.logMessage(exception);
+ });
+ // Add scripts with syntax and reference errors
+ contextGroup.addScript(
+ `
+ var x = ;
+ //# sourceURL=syntaxError.js`);
+ contextGroup.addScript(
+ `
+ var x = y;
+ //# sourceURL=referenceError.js`);
+ InspectorTest.log('Enabling Runtime Domain.');
+ await Protocol.Runtime.enable();
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/release-object-expected.txt b/deps/v8/test/inspector/runtime/release-object-expected.txt
new file mode 100644
index 0000000000..4c479c7558
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/release-object-expected.txt
@@ -0,0 +1,157 @@
+Tests that Runtime can properly release objects and object groups.
+
+Running test: testReleaseObject
+Evaluating 'var a = {x:3};'
+Evaluating 'var b = {x:4};'
+Evaluate 'this' for object a
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Evaluate 'this' for object b
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Release "a"
+Evaluate 'this' for object a
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+Evaluate 'this' for object b
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Release "b"
+Evaluate 'this' for object a
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+Evaluate 'this' for object b
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+
+Running test: testReleaseObjectInvalid
+ReleaseObject with invalid params.
+{
+ error : {
+ code : -32602
+ data : objectId: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+}
+
+Running test: testObjectGroups
+Evaluating 'var a = {x:3};'
+Evaluating 'var b = {x:4};'
+Evaluate "a" in objectGroup "x"
+Evaluate "b" in objectGroup "y"
+Evaluate 'this' for object a
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Evaluate 'this' for object b
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Release objectGroup "x"
+Evaluate 'this' for object a
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+Evaluate 'this' for object b
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+Release objectGroup "y"
+Evaluate 'this' for object a
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+Evaluate 'this' for object b
+{
+ error : {
+ code : -32000
+ message : Could not find object with given id
+ }
+ id : <messageId>
+}
+
+Running test: testReleaseObjectGroupInvalid
+ReleaseObjectGroup with invalid params
+{
+ error : {
+ code : -32602
+ data : objectGroup: string value expected
+ message : Invalid parameters
+ }
+ id : <messageId>
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/release-object.js b/deps/v8/test/inspector/runtime/release-object.js
new file mode 100644
index 0000000000..ae388ff9c4
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/release-object.js
@@ -0,0 +1,79 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {Protocol} = InspectorTest.start(
+ `Tests that Runtime can properly release objects and object groups.`);
+
+(async function test(){
+ await Protocol.Runtime.enable();
+ InspectorTest.runAsyncTestSuite([
+ async function testReleaseObject() {
+ await logAndEvaluate('var a = {x:3};');
+ await logAndEvaluate('var b = {x:4};');
+ const ids = [];
+ let result = await Protocol.Runtime.evaluate({ expression: 'a' });
+ const id1 = result.result.result.objectId;
+ ids.push({id: id1, name: 'a'});
+ result = await Protocol.Runtime.evaluate({ expression: 'b' });
+ const id2 = result.result.result.objectId;
+ ids.push({id: id2, name: 'b'});
+
+ // Call Function on both objects and log:
+ await objectGroupHelper(ids);
+ InspectorTest.log('Release "a"');
+ Protocol.Runtime.releaseObject({ objectId: id1 });
+ await objectGroupHelper(ids);
+ InspectorTest.log('Release "b"');
+ Protocol.Runtime.releaseObject({ objectId: id2 });
+ await objectGroupHelper(ids);
+ },
+ async function testReleaseObjectInvalid() {
+ const releaseObjectResult = await Protocol.Runtime.releaseObject({});
+ InspectorTest.log('ReleaseObject with invalid params.');
+ InspectorTest.logMessage(releaseObjectResult);
+ },
+ async function testObjectGroups() {
+ await logAndEvaluate('var a = {x:3};');
+ await logAndEvaluate('var b = {x:4};');
+ const ids = [];
+ InspectorTest.log('Evaluate "a" in objectGroup "x"');
+ let result = await Protocol.Runtime.evaluate({ expression: 'a', objectGroup: 'x' });
+ const id1 = result.result.result.objectId;
+ ids.push({id: id1, name: 'a'});
+ InspectorTest.log('Evaluate "b" in objectGroup "y"');
+ result = await Protocol.Runtime.evaluate({ expression: 'b', objectGroup: 'y' });
+ const id2 = result.result.result.objectId;
+ ids.push({id: id2, name: 'b'});
+
+ // Call Function on both objects and log:
+ await objectGroupHelper(ids);
+ InspectorTest.log('Release objectGroup "x"');
+ Protocol.Runtime.releaseObjectGroup({ objectGroup: 'x' });
+ await objectGroupHelper(ids);
+ InspectorTest.log('Release objectGroup "y"');
+ Protocol.Runtime.releaseObjectGroup({ objectGroup: 'y' });
+ await objectGroupHelper(ids);
+ },
+ async function testReleaseObjectGroupInvalid() {
+ const releaseObjectGroupResult = await Protocol.Runtime.releaseObjectGroup({});
+ InspectorTest.log('ReleaseObjectGroup with invalid params');
+ InspectorTest.logMessage(releaseObjectGroupResult);
+ }
+ ]);
+
+ // Helper to log and evaluate an expression
+ async function logAndEvaluate(expression) {
+ InspectorTest.logMessage(`Evaluating '${expression}'`);
+ await Protocol.Runtime.evaluate({ expression });
+ }
+
+ // Helper function that calls a function on all objects with ids in objectIds, then returns
+ async function objectGroupHelper(objectIds) {
+ for (const {id , name } of objectIds) {
+ InspectorTest.log(`Evaluate 'this' for object ${name}`);
+ const result = await Protocol.Runtime.callFunctionOn({ objectId: id, functionDeclaration: 'function(){ return this;}' });
+ InspectorTest.logMessage(result);
+ }
+ }
+})();
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 9660147624..e07aa9e616 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -13,25 +13,19 @@ PROTOCOL_TEST_JS = "protocol-test.js"
EXPECTED_SUFFIX = "-expected.txt"
RESOURCES_FOLDER = "resources"
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_files(self):
+ return {PROTOCOL_TEST_JS}
+
+ @property
+ def excluded_dirs(self):
+ return {RESOURCES_FOLDER}
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(
- os.path.join(self.root), followlinks=True):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- if dirname.endswith(os.path.sep + RESOURCES_FOLDER):
- continue
- dirs.sort()
- files.sort()
- for filename in files:
- if filename.endswith(".js") and filename != PROTOCOL_TEST_JS:
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
index c11e7c0bbf..a6367a8cf2 100644
--- a/deps/v8/test/intl/assert.js
+++ b/deps/v8/test/intl/assert.js
@@ -200,15 +200,68 @@ function assertInstanceof(obj, type) {
}
}
-
/**
* Split a BCP 47 language tag into locale and extension.
*/
function splitLanguageTag(tag) {
- var extRe = /(-[0-9A-Za-z](-[0-9A-Za-z]{2,8})+)+$/;
- var match = %regexp_internal_match(extRe, tag);
- if (match) {
- return { locale: tag.slice(0, match.index), extension: match[0] };
+ // Search for the beginning of one or more extension tags, each of which
+ // contains a singleton tag followed by one or more subtags. The equivalent
+ // regexp is: /(-[0-9A-Za-z](-[0-9A-Za-z]{2,8})+)+$/. For example, in
+ // 'de-DE-u-co-phonebk' the matched extension tags are '-u-co-phonebk'.
+ //
+ // The below is a mini-parser that reads backwards from the end of the string.
+
+ function charCode(char) { return char.charCodeAt(0); }
+ function isAlphaNumeric(code) {
+ return (charCode("0") <= code && code <= charCode("9")) ||
+ (charCode("A") <= code && code <= charCode("Z")) ||
+ (charCode("a") <= code && code <= charCode("z"));
+ }
+
+ const MATCH_SUBTAG = 0;
+ const MATCH_SINGLETON_OR_SUBTAG = 1;
+ let state = MATCH_SUBTAG;
+
+ const MINIMUM_TAG_LENGTH = 2;
+ const MAXIMUM_TAG_LENGTH = 8;
+ let currentTagLength = 0;
+
+ // -1 signifies failure, a non-negative integer is the start index of the
+ // extension tag.
+ let extensionTagStartIndex = -1;
+
+ for (let i = tag.length - 1; i >= 0; i--) {
+ const currentCharCode = tag.charCodeAt(i);
+ if (currentCharCode == charCode("-")) {
+ if (state == MATCH_SINGLETON_OR_SUBTAG && currentTagLength == 1) {
+ // Found the singleton tag, the match succeeded.
+ // Save the matched index, and reset the state. After this point, we
+ // definitely have a match, but we may still find another extension tag
+ // sequence.
+ extensionTagStartIndex = i;
+ state = MATCH_SUBTAG;
+ currentTagLength = 0;
+ } else if (MINIMUM_TAG_LENGTH <= currentTagLength &&
+ currentTagLength <= MAXIMUM_TAG_LENGTH) {
+ // Found a valid subtag.
+ state = MATCH_SINGLETON_OR_SUBTAG;
+ currentTagLength = 0;
+ } else {
+ // Invalid subtag (too short or too long).
+ break;
+ }
+ } else if (isAlphaNumeric(currentCharCode)) {
+ // An alphanumeric character is potentially part of a tag.
+ currentTagLength++;
+ } else {
+ // Any other character is invalid.
+ break;
+ }
+ }
+
+ if (extensionTagStartIndex != -1) {
+ return { locale: tag.substring(0, extensionTagStartIndex),
+ extension: tag.substring(extensionTagStartIndex) };
}
return { locale: tag, extension: '' };
diff --git a/deps/v8/test/intl/bigint/tolocalestring.js b/deps/v8/test/intl/bigint/tolocalestring.js
new file mode 100644
index 0000000000..d0b6792ea8
--- /dev/null
+++ b/deps/v8/test/intl/bigint/tolocalestring.js
@@ -0,0 +1,61 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-bigint
+
+var locales = [
+ "en", // "1,234,567,890,123,456"
+ "de", // "1.234.567.890.123.456"
+ "fr", // "1ā€Æ234ā€Æ567ā€Æ890ā€Æ123ā€Æ456"
+ "hi", // "1,23,45,67,89,01,23,456"
+ "fa", // "Ū±Ł¬Ū²Ū³Ū“Ł¬ŪµŪ¶Ū·Ł¬ŪøŪ¹Ū°Ł¬Ū±Ū²Ū³Ł¬Ū“ŪµŪ¶"
+ "th-u-nu-thai", // "ą¹‘,ą¹’ą¹“ą¹”,ą¹•ą¹–ą¹—,ą¹˜ą¹™ą¹,ą¹‘ą¹’ą¹“,ą¹”ą¹•ą¹–"
+];
+
+var data = [
+ Number.MAX_SAFE_INTEGER,
+ -Number.MAX_SAFE_INTEGER,
+ Math.floor(Number.MAX_SAFE_INTEGER / 2),
+ 0,
+ /// -0, // this case is broken now.
+];
+
+for (var locale of locales) {
+ let nf = new Intl.NumberFormat(locale);
+
+ let percentOption = {style: "percent"};
+ let nfPercent = new Intl.NumberFormat(locale, percentOption);
+ for (var n of data) {
+ let bigint = BigInt(n);
+ // Test NumberFormat w/ number output the same as
+ // BigInt.prototype.toLocaleString()
+ assertEquals(nf.format(n), bigint.toLocaleString(locale));
+
+ // Test NumberFormat output the same regardless pass in as number or BigInt
+ assertEquals(nf.format(n), nf.format(bigint));
+
+ // Test formatToParts
+ assertEquals(nf.formatToParts(n), nf.formatToParts(bigint));
+
+ // Test output with option
+ // Test NumberFormat w/ number output the same as
+ // BigInt.prototype.toLocaleString()
+ assertEquals(nfPercent.format(n),
+ bigint.toLocaleString(locale, percentOption));
+
+ // Test NumberFormat output the same regardless pass in as number or BigInt
+ assertEquals(nfPercent.format(n), nfPercent.format(bigint));
+ assertEquals(nfPercent.formatToParts(n), nfPercent.formatToParts(bigint));
+ }
+
+ // Test very big BigInt
+ let veryBigInt = BigInt(Number.MAX_SAFE_INTEGER) *
+ BigInt(Number.MAX_SAFE_INTEGER) *
+ BigInt(Number.MAX_SAFE_INTEGER);
+ assertEquals(nf.format(veryBigInt), veryBigInt.toLocaleString(locale));
+ // It should output different than toString
+ assertFalse(veryBigInt.toLocaleString(locale) == veryBigInt.toString());
+ assertTrue(veryBigInt.toLocaleString(locale).length >
+ veryBigInt.toString().length);
+}
diff --git a/deps/v8/test/intl/break-iterator/subclass.js b/deps/v8/test/intl/break-iterator/subclass.js
index b5ffe61a48..4358721027 100644
--- a/deps/v8/test/intl/break-iterator/subclass.js
+++ b/deps/v8/test/intl/break-iterator/subclass.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
var locales = ["tlh", "id", "en"];
var input = "foo and bar";
var refBreakIterator = new Intl.v8BreakIterator(locales);
diff --git a/deps/v8/test/intl/date-format/check-hc-option.js b/deps/v8/test/intl/date-format/check-hc-option.js
index 276bfe6a23..7a1e917816 100644
--- a/deps/v8/test/intl/date-format/check-hc-option.js
+++ b/deps/v8/test/intl/date-format/check-hc-option.js
@@ -26,7 +26,8 @@ let locales = [
];
invalid_hc.forEach(function(hc) {
- let df = new Intl.DateTimeFormat(["en-u-hc-" + hc + "-fo-obar"]);
+ let df = new Intl.DateTimeFormat(
+ ["en-u-hc-" + hc + "-fo-obar"], {hour: "2-digit"});
assertEquals("en", df.resolvedOptions().locale);
}
);
@@ -34,7 +35,8 @@ invalid_hc.forEach(function(hc) {
valid_hc.forEach(function(hc) {
locales.forEach(function(base) {
let l = base + "-u-hc-" + hc;
- let df = new Intl.DateTimeFormat([l + "-fo-obar"]);
+ let df = new Intl.DateTimeFormat(
+ [l + "-fo-obar"], {hour: "2-digit"});
assertEquals(l, df.resolvedOptions().locale);
});
}
diff --git a/deps/v8/test/intl/date-format/constructor-date-style-order.js b/deps/v8/test/intl/date-format/constructor-date-style-order.js
new file mode 100644
index 0000000000..8e601b48d3
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-date-style-order.js
@@ -0,0 +1,108 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let weekday = new Array();
+let year = new Array();
+let month = new Array();
+let day = new Array();
+let hour = new Array();
+let minute = new Array();
+let second = new Array();
+let localeMatcher = new Array();
+let hour12 = new Array();
+let hourCycle = new Array();
+let dateStyle = new Array();
+let timeStyle = new Array();
+let timeZone = new Array();
+let era = new Array();
+let timeZoneName = new Array();
+let formatMatcher = new Array();
+
+new Intl.DateTimeFormat(['en-US'], {
+ get weekday() {
+ weekday.push(++getCount);
+ },
+ get year() {
+ year.push(++getCount);
+ },
+ get month() {
+ month.push(++getCount);
+ },
+ get day() {
+ day.push(++getCount);
+ },
+ get hour() {
+ hour.push(++getCount);
+ },
+ get minute() {
+ minute.push(++getCount);
+ },
+ get second() {
+ second.push(++getCount);
+ },
+ get localeMatcher() {
+ localeMatcher.push(++getCount);
+ },
+ get hour12() {
+ hour12.push(++getCount);
+ },
+ get hourCycle() {
+ hourCycle.push(++getCount);
+ },
+ get timeZone() {
+ timeZone.push(++getCount);
+ },
+ get dateStyle() {
+ dateStyle.push(++getCount);
+ return "full";
+ },
+ get timeStyle() {
+ timeStyle.push(++getCount);
+ },
+ get era() {
+ era.push(++getCount);
+ },
+ get timeZoneName() {
+ timeZoneName.push(++getCount);
+ },
+ get formatMatcher() {
+ formatMatcher.push(++getCount);
+ }
+});
+
+assertEquals(1, weekday.length);
+assertEquals(1, weekday[0]);
+assertEquals(1, year.length);
+assertEquals(2, year[0]);
+assertEquals(1, month.length);
+assertEquals(3, month[0]);
+assertEquals(1, day.length);
+assertEquals(4, day[0]);
+assertEquals(1, hour.length);
+assertEquals(5, hour[0]);
+assertEquals(1, minute.length);
+assertEquals(6, minute[0]);
+assertEquals(1, second.length);
+assertEquals(7, second[0]);
+assertEquals(1, localeMatcher.length);
+assertEquals(8, localeMatcher[0]);
+assertEquals(1, hour12.length);
+assertEquals(9, hour12[0]);
+assertEquals(1, hourCycle.length);
+assertEquals(10, hourCycle[0]);
+assertEquals(1, timeZone.length);
+assertEquals(11, timeZone[0]);
+assertEquals(1, dateStyle.length);
+assertEquals(12, dateStyle[0]);
+assertEquals(1, timeStyle.length);
+assertEquals(13, timeStyle[0]);
+assertEquals(0, era.length);
+assertEquals(0, timeZoneName.length);
+assertEquals(0, formatMatcher.length);
diff --git a/deps/v8/test/intl/date-format/constructor-date-time-style-order.js b/deps/v8/test/intl/date-format/constructor-date-time-style-order.js
new file mode 100644
index 0000000000..d4d114662f
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-date-time-style-order.js
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let weekday = new Array();
+let year = new Array();
+let month = new Array();
+let day = new Array();
+let hour = new Array();
+let minute = new Array();
+let second = new Array();
+let localeMatcher = new Array();
+let hour12 = new Array();
+let hourCycle = new Array();
+let dateStyle = new Array();
+let timeStyle = new Array();
+let timeZone = new Array();
+let era = new Array();
+let timeZoneName = new Array();
+let formatMatcher = new Array();
+
+new Intl.DateTimeFormat(['en-US'], {
+ get weekday() {
+ weekday.push(++getCount);
+ },
+ get year() {
+ year.push(++getCount);
+ },
+ get month() {
+ month.push(++getCount);
+ },
+ get day() {
+ day.push(++getCount);
+ },
+ get hour() {
+ hour.push(++getCount);
+ },
+ get minute() {
+ minute.push(++getCount);
+ },
+ get second() {
+ second.push(++getCount);
+ },
+ get localeMatcher() {
+ localeMatcher.push(++getCount);
+ },
+ get hour12() {
+ hour12.push(++getCount);
+ },
+ get hourCycle() {
+ hourCycle.push(++getCount);
+ },
+ get timeZone() {
+ timeZone.push(++getCount);
+ },
+ get dateStyle() {
+ dateStyle.push(++getCount);
+ return "full";
+ },
+ get timeStyle() {
+ timeStyle.push(++getCount);
+ return "full";
+ },
+ get era() {
+ era.push(++getCount);
+ },
+ get timeZoneName() {
+ timeZoneName.push(++getCount);
+ },
+ get formatMatcher() {
+ formatMatcher.push(++getCount);
+ }
+});
+
+assertEquals(1, weekday.length);
+assertEquals(1, weekday[0]);
+assertEquals(1, year.length);
+assertEquals(2, year[0]);
+assertEquals(1, month.length);
+assertEquals(3, month[0]);
+assertEquals(1, day.length);
+assertEquals(4, day[0]);
+assertEquals(1, hour.length);
+assertEquals(5, hour[0]);
+assertEquals(1, minute.length);
+assertEquals(6, minute[0]);
+assertEquals(1, second.length);
+assertEquals(7, second[0]);
+assertEquals(1, localeMatcher.length);
+assertEquals(8, localeMatcher[0]);
+assertEquals(1, hour12.length);
+assertEquals(9, hour12[0]);
+assertEquals(1, hourCycle.length);
+assertEquals(10, hourCycle[0]);
+assertEquals(1, timeZone.length);
+assertEquals(11, timeZone[0]);
+assertEquals(1, dateStyle.length);
+assertEquals(12, dateStyle[0]);
+assertEquals(1, timeStyle.length);
+assertEquals(13, timeStyle[0]);
+assertEquals(0, era.length);
+assertEquals(0, timeZoneName.length);
+assertEquals(0, formatMatcher.length);
diff --git a/deps/v8/test/intl/date-format/constructor-date-time-style.js b/deps/v8/test/intl/date-format/constructor-date-time-style.js
new file mode 100644
index 0000000000..f4bc40b396
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-date-time-style.js
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+var validStyle = ["full", "long", "medium", "short", undefined];
+var invalidStyle = ["narrow", "numeric"];
+
+validStyle.forEach(function(dateStyle) {
+ validStyle.forEach(function(timeStyle) {
+ assertDoesNotThrow(() =>
+ new Intl.DateTimeFormat("en", {dateStyle, timeStyle}));
+ });
+
+ invalidStyle.forEach(function(timeStyle) {
+ assertThrows(() =>
+ new Intl.DateTimeFormat("en", {dateStyle, timeStyle}), RangeError);
+ });
+}
+);
+
+invalidStyle.forEach(function(dateStyle) {
+ validStyle.forEach(function(timeStyle) {
+ assertThrows(() =>
+ new Intl.DateTimeFormat("en", {dateStyle, timeStyle}), RangeError);
+ });
+ invalidStyle.forEach(function(timeStyle) {
+ assertThrows(() =>
+ new Intl.DateTimeFormat("en", {dateStyle, timeStyle}), RangeError);
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/constructor-no-style-order.js b/deps/v8/test/intl/date-format/constructor-no-style-order.js
new file mode 100644
index 0000000000..bd4bc4cc37
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-no-style-order.js
@@ -0,0 +1,114 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let weekday = new Array();
+let year = new Array();
+let month = new Array();
+let day = new Array();
+let hour = new Array();
+let minute = new Array();
+let second = new Array();
+let localeMatcher = new Array();
+let hour12 = new Array();
+let hourCycle = new Array();
+let dateStyle = new Array();
+let timeStyle = new Array();
+let timeZone = new Array();
+let era = new Array();
+let timeZoneName = new Array();
+let formatMatcher = new Array();
+
+new Intl.DateTimeFormat(['en-US'], {
+ get weekday() {
+ weekday.push(++getCount);
+ },
+ get year() {
+ year.push(++getCount);
+ },
+ get month() {
+ month.push(++getCount);
+ },
+ get day() {
+ day.push(++getCount);
+ },
+ get hour() {
+ hour.push(++getCount);
+ },
+ get minute() {
+ minute.push(++getCount);
+ },
+ get second() {
+ second.push(++getCount);
+ },
+ get localeMatcher() {
+ localeMatcher.push(++getCount);
+ },
+ get hour12() {
+ hour12.push(++getCount);
+ },
+ get hourCycle() {
+ hourCycle.push(++getCount);
+ },
+ get timeZone() {
+ timeZone.push(++getCount);
+ },
+ get dateStyle() {
+ dateStyle.push(++getCount);
+ },
+ get timeStyle() {
+ timeStyle.push(++getCount);
+ },
+ get era() {
+ era.push(++getCount);
+ },
+ get timeZoneName() {
+ timeZoneName.push(++getCount);
+ },
+ get formatMatcher() {
+ formatMatcher.push(++getCount);
+ }
+});
+
+assertEquals(2, weekday.length);
+assertEquals(1, weekday[0]);
+assertEquals(1, year.length);
+assertEquals(2, year[0]);
+assertEquals(1, month.length);
+assertEquals(3, month[0]);
+assertEquals(1, day.length);
+assertEquals(4, day[0]);
+assertEquals(2, hour.length);
+assertEquals(5, hour[0]);
+assertEquals(2, minute.length);
+assertEquals(6, minute[0]);
+assertEquals(2, second.length);
+assertEquals(7, second[0]);
+assertEquals(1, localeMatcher.length);
+assertEquals(8, localeMatcher[0]);
+assertEquals(1, hour12.length);
+assertEquals(9, hour12[0]);
+assertEquals(1, hourCycle.length);
+assertEquals(10, hourCycle[0]);
+assertEquals(1, timeZone.length);
+assertEquals(11, timeZone[0]);
+assertEquals(1, dateStyle.length);
+assertEquals(12, dateStyle[0]);
+assertEquals(1, timeStyle.length);
+assertEquals(13, timeStyle[0]);
+assertEquals(14, weekday[1]);
+assertEquals(1, era.length);
+assertEquals(15, era[0]);
+assertEquals(16, hour[1]);
+assertEquals(17, minute[1]);
+assertEquals(18, second[1]);
+assertEquals(1, timeZoneName.length);
+assertEquals(19, timeZoneName[0]);
+assertEquals(1, formatMatcher.length);
+assertEquals(20, formatMatcher[0]);
diff --git a/deps/v8/test/intl/date-format/constructor-time-style-order.js b/deps/v8/test/intl/date-format/constructor-time-style-order.js
new file mode 100644
index 0000000000..d35f21a196
--- /dev/null
+++ b/deps/v8/test/intl/date-format/constructor-time-style-order.js
@@ -0,0 +1,108 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let weekday = new Array();
+let year = new Array();
+let month = new Array();
+let day = new Array();
+let hour = new Array();
+let minute = new Array();
+let second = new Array();
+let localeMatcher = new Array();
+let hour12 = new Array();
+let hourCycle = new Array();
+let dateStyle = new Array();
+let timeStyle = new Array();
+let timeZone = new Array();
+let era = new Array();
+let timeZoneName = new Array();
+let formatMatcher = new Array();
+
+new Intl.DateTimeFormat(['en-US'], {
+ get weekday() {
+ weekday.push(++getCount);
+ },
+ get year() {
+ year.push(++getCount);
+ },
+ get month() {
+ month.push(++getCount);
+ },
+ get day() {
+ day.push(++getCount);
+ },
+ get hour() {
+ hour.push(++getCount);
+ },
+ get minute() {
+ minute.push(++getCount);
+ },
+ get second() {
+ second.push(++getCount);
+ },
+ get localeMatcher() {
+ localeMatcher.push(++getCount);
+ },
+ get hour12() {
+ hour12.push(++getCount);
+ },
+ get hourCycle() {
+ hourCycle.push(++getCount);
+ },
+ get timeZone() {
+ timeZone.push(++getCount);
+ },
+ get dateStyle() {
+ dateStyle.push(++getCount);
+ },
+ get timeStyle() {
+ timeStyle.push(++getCount);
+ return "full";
+ },
+ get era() {
+ era.push(++getCount);
+ },
+ get timeZoneName() {
+ timeZoneName.push(++getCount);
+ },
+ get formatMatcher() {
+ formatMatcher.push(++getCount);
+ }
+});
+
+assertEquals(1, weekday.length);
+assertEquals(1, weekday[0]);
+assertEquals(1, year.length);
+assertEquals(2, year[0]);
+assertEquals(1, month.length);
+assertEquals(3, month[0]);
+assertEquals(1, day.length);
+assertEquals(4, day[0]);
+assertEquals(1, hour.length);
+assertEquals(5, hour[0]);
+assertEquals(1, minute.length);
+assertEquals(6, minute[0]);
+assertEquals(1, second.length);
+assertEquals(7, second[0]);
+assertEquals(1, localeMatcher.length);
+assertEquals(8, localeMatcher[0]);
+assertEquals(1, hour12.length);
+assertEquals(9, hour12[0]);
+assertEquals(1, hourCycle.length);
+assertEquals(10, hourCycle[0]);
+assertEquals(1, timeZone.length);
+assertEquals(11, timeZone[0]);
+assertEquals(1, dateStyle.length);
+assertEquals(12, dateStyle[0]);
+assertEquals(1, timeStyle.length);
+assertEquals(13, timeStyle[0]);
+assertEquals(0, era.length);
+assertEquals(0, timeZoneName.length);
+assertEquals(0, formatMatcher.length);
diff --git a/deps/v8/test/intl/date-format/property-override-date-style.js b/deps/v8/test/intl/date-format/property-override-date-style.js
new file mode 100644
index 0000000000..67d9bc5361
--- /dev/null
+++ b/deps/v8/test/intl/date-format/property-override-date-style.js
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+//
+// First get supported properties.
+// Some of the properties are optional, so we request them.
+var properties = [];
+var options = Intl.DateTimeFormat(
+ 'en-US', {dateStyle: 'full'}).resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+// In the order of Table 6 of
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
+var expectedProperties = [
+ 'locale',
+ 'calendar',
+ 'numberingSystem',
+ 'timeZone',
+ 'hourCycle',
+ 'hour12',
+ 'weekday',
+ 'year',
+ 'month',
+ 'day',
+ 'dateStyle',
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.DateTimeFormat().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/date-format/property-override-date-time-style.js b/deps/v8/test/intl/date-format/property-override-date-time-style.js
new file mode 100644
index 0000000000..f51d6f31a6
--- /dev/null
+++ b/deps/v8/test/intl/date-format/property-override-date-time-style.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+
+// First get supported properties.
+// Some of the properties are optional, so we request them.
+var properties = [];
+var options = Intl.DateTimeFormat(
+ 'en-US', {dateStyle: 'full', timeStyle: 'full'}).resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+// In the order of Table 6 of
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
+var expectedProperties = [
+ 'locale',
+ 'calendar',
+ 'numberingSystem',
+ 'timeZone',
+ 'hourCycle',
+ 'hour12',
+ 'weekday',
+ 'year',
+ 'month',
+ 'day',
+ 'hour',
+ 'minute',
+ 'second',
+ 'timeZoneName',
+ 'dateStyle',
+ 'timeStyle',
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.DateTimeFormat().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/date-format/property-override-time-style.js b/deps/v8/test/intl/date-format/property-override-time-style.js
new file mode 100644
index 0000000000..1b93ac633f
--- /dev/null
+++ b/deps/v8/test/intl/date-format/property-override-time-style.js
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-datetime-style
+
+// Checks for security holes introduced by Object.property overrides.
+// For example:
+// Object.defineProperty(Array.prototype, 'locale', {
+// set: function(value) {
+// throw new Error('blah');
+// },
+// configurable: true,
+// enumerable: false
+// });
+//
+// would throw in case of (JS) x.locale = 'us' or (C++) x->Set('locale', 'us').
+
+// First get supported properties.
+// Some of the properties are optional, so we request them.
+var properties = [];
+var options = Intl.DateTimeFormat(
+ 'en-US', {timeStyle: 'full'}).resolvedOptions();
+for (var prop in options) {
+ if (options.hasOwnProperty(prop)) {
+ properties.push(prop);
+ }
+}
+
+// In the order of Table 6 of
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
+var expectedProperties = [
+ 'locale',
+ 'calendar',
+ 'numberingSystem',
+ 'timeZone',
+ 'hourCycle',
+ 'hour12',
+ 'hour',
+ 'minute',
+ 'second',
+ 'timeZoneName',
+ 'timeStyle',
+];
+
+assertEquals(expectedProperties.length, properties.length);
+
+properties.forEach(function(prop) {
+ assertFalse(expectedProperties.indexOf(prop) === -1);
+});
+
+taintProperties(properties);
+
+var locale = Intl.DateTimeFormat().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 83e546db76..53f42e4b66 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -46,6 +46,9 @@
# Unable to change locale on Windows:
'default_locale': [SKIP],
+
+ # Unable to change locale and TZ on Windows:
+ 'regress-7770': [SKIP],
}], # system == windows'
['system == android', {
@@ -56,5 +59,12 @@
'relative-time-format/default-locale-fr-CA': [SKIP],
'relative-time-format/default-locale-pt-BR': [SKIP],
'default_locale': [SKIP],
+ # Unable to change locale and TZ on Android:
+ 'regress-7770': [SKIP],
}], # 'system == android'
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/intl/list-format/constructor.js b/deps/v8/test/intl/list-format/constructor.js
index d730516c9c..05ddf932a9 100644
--- a/deps/v8/test/intl/list-format/constructor.js
+++ b/deps/v8/test/intl/list-format/constructor.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
// ListFormat constructor can't be called as function.
assertThrows(() => Intl.ListFormat(['sr']), TypeError);
diff --git a/deps/v8/test/intl/list-format/format-en.js b/deps/v8/test/intl/list-format/format-en.js
index d628537990..5aa16d2933 100644
--- a/deps/v8/test/intl/list-format/format-en.js
+++ b/deps/v8/test/intl/list-format/format-en.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
// The following test are not part of the comformance. Just some output in
// English to verify the format does return something reasonable for English.
// It may be changed when we update the CLDR data.
diff --git a/deps/v8/test/intl/list-format/format-to-parts.js b/deps/v8/test/intl/list-format/format-to-parts.js
index 64eac823ba..038fb761ea 100644
--- a/deps/v8/test/intl/list-format/format-to-parts.js
+++ b/deps/v8/test/intl/list-format/format-to-parts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
function assertListFormat(listFormat, input) {
var result;
try {
diff --git a/deps/v8/test/intl/list-format/format.js b/deps/v8/test/intl/list-format/format.js
index fef05c38e0..a10e9a092e 100644
--- a/deps/v8/test/intl/list-format/format.js
+++ b/deps/v8/test/intl/list-format/format.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
function assertListFormat(listFormat, input) {
try {
let result = listFormat.format(input);
diff --git a/deps/v8/test/intl/list-format/formatToParts-zh.js b/deps/v8/test/intl/list-format/formatToParts-zh.js
index a7204b0b29..1279c86204 100644
--- a/deps/v8/test/intl/list-format/formatToParts-zh.js
+++ b/deps/v8/test/intl/list-format/formatToParts-zh.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
// The following test are not part of the comformance. Just some output in
// Chinese to verify the format does return something reasonable for Chinese.
// It may be changed when we update the CLDR data.
diff --git a/deps/v8/test/intl/list-format/resolved-options.js b/deps/v8/test/intl/list-format/resolved-options.js
index b5662718e5..42687990f9 100644
--- a/deps/v8/test/intl/list-format/resolved-options.js
+++ b/deps/v8/test/intl/list-format/resolved-options.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
let listFormat = new Intl.ListFormat();
// The default style is 'long'
assertEquals('long', listFormat.resolvedOptions().style);
diff --git a/deps/v8/test/intl/list-format/supported-locale.js b/deps/v8/test/intl/list-format/supported-locale.js
index 1eac25d618..0aebeb86a1 100644
--- a/deps/v8/test/intl/list-format/supported-locale.js
+++ b/deps/v8/test/intl/list-format/supported-locale.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
assertEquals(typeof Intl.ListFormat.supportedLocalesOf, "function",
"Intl.ListFormat.supportedLocalesOf should be a function");
diff --git a/deps/v8/test/intl/regress-7770.js b/deps/v8/test/intl/regress-7770.js
new file mode 100644
index 0000000000..2e7c2ce22d
--- /dev/null
+++ b/deps/v8/test/intl/regress-7770.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Environment Variables: TZ=Indian/Kerguelen LANG=uk
+assertEquals(
+ "Fri Feb 01 2019 00:00:00 GMT+0500 (Š·Š° чŠ°ŃŠ¾Š¼ Š½Š° Š¤Ń€Š°Š½Ń†ŃƒŠ·ŃŒŠŗŠøх ŠŸŃ–Š²Š“ŠµŠ½Š½Šøх і ŠŠ½Ń‚Š°Ń€ŠŗтŠøчŠ½Šøх тŠµŃ€ŠøтŠ¾Ń€Ń–ŃŃ…)",
+ new Date(2019, 1,1).toString());
diff --git a/deps/v8/test/intl/regress-8030.js b/deps/v8/test/intl/regress-8030.js
index eac6b84f81..cf0e1aa2a9 100644
--- a/deps/v8/test/intl/regress-8030.js
+++ b/deps/v8/test/intl/regress-8030.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
var locales = ["tlh", "id", "en"];
var referenceRelativeTimeFormat = new Intl.RelativeTimeFormat(locales);
var referenceFormatted = referenceRelativeTimeFormat.format(3, "day");
diff --git a/deps/v8/test/intl/regress-8031.js b/deps/v8/test/intl/regress-8031.js
index 0898026d99..513ef025fe 100644
--- a/deps/v8/test/intl/regress-8031.js
+++ b/deps/v8/test/intl/regress-8031.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-list-format
-
var locales = ["tlh", "id", "en"];
var input = ["a", "b", "c"];
var referenceListFormat = new Intl.ListFormat(locales);
diff --git a/deps/v8/test/intl/regress-930304.js b/deps/v8/test/intl/regress-930304.js
new file mode 100644
index 0000000000..85bcfea76e
--- /dev/null
+++ b/deps/v8/test/intl/regress-930304.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertDoesNotThrow(() => Intl.DateTimeFormat('en-u-nu-ethi'));
diff --git a/deps/v8/test/intl/relative-time-format/constructor.js b/deps/v8/test/intl/relative-time-format/constructor.js
index ba03e1dd70..f1a4057426 100644
--- a/deps/v8/test/intl/relative-time-format/constructor.js
+++ b/deps/v8/test/intl/relative-time-format/constructor.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// RelativeTimeFormat constructor can't be called as function.
assertThrows(() => Intl.RelativeTimeFormat('sr'), TypeError);
diff --git a/deps/v8/test/intl/relative-time-format/default-locale-fr-CA.js b/deps/v8/test/intl/relative-time-format/default-locale-fr-CA.js
index 32f64ee02d..9f24329b50 100644
--- a/deps/v8/test/intl/relative-time-format/default-locale-fr-CA.js
+++ b/deps/v8/test/intl/relative-time-format/default-locale-fr-CA.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format --harmony-locale
// Environment Variables: LC_ALL=fr_CA
assertEquals(
'fr-CA',
diff --git a/deps/v8/test/intl/relative-time-format/default-locale-pt-BR.js b/deps/v8/test/intl/relative-time-format/default-locale-pt-BR.js
index 89f7aa14f0..ea66b6a0e5 100644
--- a/deps/v8/test/intl/relative-time-format/default-locale-pt-BR.js
+++ b/deps/v8/test/intl/relative-time-format/default-locale-pt-BR.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format --harmony-locale
// Environment Variables: LC_ALL=pt_BR
assertEquals(
'pt-BR',
diff --git a/deps/v8/test/intl/relative-time-format/format-en.js b/deps/v8/test/intl/relative-time-format/format-en.js
index 2af755dcbf..a365749f0a 100644
--- a/deps/v8/test/intl/relative-time-format/format-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-en.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// The following test are not part of the comformance. Just some output in
// English to verify the format does return something reasonable for English.
// It may be changed when we update the CLDR data.
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts-en.js b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
index 689059f4cd..7c2076b312 100644
--- a/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// The following test are not part of the comformance. Just some output in
// English to verify the format does return something reasonable for English.
// It may be changed when we update the CLDR data.
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js b/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js
index 7e5e1b79a6..bd70f75421 100644
--- a/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts-plural.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// Check plural w/ formatToParts
// http://tc39.github.io/proposal-intl-relative-time/
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts.js b/deps/v8/test/intl/relative-time-format/format-to-parts.js
index 071c4468c0..ccc9170225 100644
--- a/deps/v8/test/intl/relative-time-format/format-to-parts.js
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// Make sure that RelativeTimeFormat exposes all required properties. Those not specified
// should have undefined value.
// http://tc39.github.io/proposal-intl-relative-time/
diff --git a/deps/v8/test/intl/relative-time-format/format.js b/deps/v8/test/intl/relative-time-format/format.js
index 769358423d..e458ad728d 100644
--- a/deps/v8/test/intl/relative-time-format/format.js
+++ b/deps/v8/test/intl/relative-time-format/format.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// Make sure that RelativeTimeFormat exposes all required properties. Those not specified
// should have undefined value.
// http://tc39.github.io/proposal-intl-relative-time/
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options-nu.js b/deps/v8/test/intl/relative-time-format/resolved-options-nu.js
index fb1fa72a93..a01cb5d9c4 100644
--- a/deps/v8/test/intl/relative-time-format/resolved-options-nu.js
+++ b/deps/v8/test/intl/relative-time-format/resolved-options-nu.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
// For locale default the numberingSystem to 'latn'
assertEquals(
"latn",
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options.js b/deps/v8/test/intl/relative-time-format/resolved-options.js
index 391b83ae0a..1caa4f86c9 100644
--- a/deps/v8/test/intl/relative-time-format/resolved-options.js
+++ b/deps/v8/test/intl/relative-time-format/resolved-options.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
-
let rtf = new Intl.RelativeTimeFormat();
// Test 1.4.5 Intl.RelativeTimeFormat.prototype.resolvedOptions ()
// The default style is 'long'
diff --git a/deps/v8/test/intl/relative-time-format/supported-locale.js b/deps/v8/test/intl/relative-time-format/supported-locale.js
index b24cfb27af..5c177b4777 100644
--- a/deps/v8/test/intl/relative-time-format/supported-locale.js
+++ b/deps/v8/test/intl/relative-time-format/supported-locale.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-relative-time-format
assertEquals(typeof Intl.RelativeTimeFormat.supportedLocalesOf, "function",
"Intl.RelativeTimeFormat.supportedLocalesOf should be a function");
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 8cf26f1a61..66da4c77b5 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -33,23 +33,16 @@ from testrunner.objects import testcase
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
+
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_files(self):
+ return {"assert.js", "utils.js"}
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- dirs.sort()
- files.sort()
- for filename in files:
- if (filename.endswith(".js") and filename != "assert.js" and
- filename != "utils.js"):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/js-perf-test/ArrayInOperator/run.js b/deps/v8/test/js-perf-test/ArrayInOperator/run.js
new file mode 100644
index 0000000000..db63584499
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayInOperator/run.js
@@ -0,0 +1,245 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Compare `in` operator on different types of arrays.
+
+const size = 1e5;
+let packed_smi = [];
+let packed_double = [];
+let packed_elements = [];
+let holey_smi = new Array(size);
+let holey_double = new Array(size);
+let holey_elements = new Array(size);
+let sparse_smi = new Array(size);
+let sparse_double = new Array(size);
+let sparse_elements = new Array(size);
+let typed_uint8 = new Uint8Array(size);
+let typed_int32 = new Int32Array(size);
+let typed_float = new Float64Array(size);
+
+for (let i = 0; i < size; ++i) {
+ packed_smi[i] = i;
+ packed_double[i] = i + 0.1;
+ packed_elements[i] = "" + i;
+ holey_smi[i] = i;
+ holey_double[i] = i + 0.1;
+ holey_elements[i] = "" + i;
+ typed_uint8[i] = i % 0x100;
+ typed_int32[i] = i;
+ typed_float[i] = i + 0.1;
+}
+
+let sparse = 0;
+for (let i = 0; i < size; i += 100) {
+ ++sparse;
+ sparse_smi[i] = i;
+ sparse_double[i] = i + 0.1;
+ sparse_elements[i] = "" + i;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Packed SMI
+// ----------------------------------------------------------------------------
+
+function PackedSMI() {
+ let cnt = 0;
+ let ary = packed_smi;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Packed Double
+// ----------------------------------------------------------------------------
+
+function PackedDouble() {
+ let cnt = 0;
+ let ary = packed_double;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Packed Elements
+// ----------------------------------------------------------------------------
+
+function PackedElements() {
+ let cnt = 0;
+ let ary = packed_elements;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Holey SMI
+// ----------------------------------------------------------------------------
+
+function HoleySMI() {
+ let cnt = 0;
+ let ary = holey_smi;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Holey Double
+// ----------------------------------------------------------------------------
+
+function HoleyDouble() {
+ let cnt = 0;
+ let ary = holey_double;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Holey Elements
+// ----------------------------------------------------------------------------
+
+function HoleyElements() {
+ let cnt = 0;
+ let ary = holey_elements;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Sparse SMI
+// ----------------------------------------------------------------------------
+
+function SparseSMI() {
+ let cnt = 0;
+ let ary = sparse_smi;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != sparse) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Sparse Double
+// ----------------------------------------------------------------------------
+
+function SparseDouble() {
+ let cnt = 0;
+ let ary = sparse_double;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != sparse) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Sparse Elements
+// ----------------------------------------------------------------------------
+
+function SparseElements() {
+ let cnt = 0;
+ let ary = sparse_elements;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != sparse) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Typed Uint8
+// ----------------------------------------------------------------------------
+
+function TypedUint8() {
+ let cnt = 0;
+ let ary = typed_uint8;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Typed Int32
+// ----------------------------------------------------------------------------
+
+function TypedInt32() {
+ let cnt = 0;
+ let ary = typed_int32;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Typed Float64
+// ----------------------------------------------------------------------------
+
+function TypedFloat64() {
+ let cnt = 0;
+ let ary = typed_float;
+ for (let i = 0; i < ary.length; ++i) {
+ if (i in ary) ++cnt;
+ }
+
+ if (cnt != size) throw 666;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayInOperator(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 5, f) ]);
+}
+
+CreateBenchmark('PackedSMI', PackedSMI);
+CreateBenchmark('PackedDouble', PackedDouble);
+CreateBenchmark('PackedElements', PackedElements);
+CreateBenchmark('HoleySMI', HoleySMI);
+CreateBenchmark('HoleyDouble', HoleyDouble);
+CreateBenchmark('HoleyElements', HoleyElements);
+CreateBenchmark('SparseSMI', SparseSMI);
+CreateBenchmark('SparseDouble', SparseDouble);
+CreateBenchmark('SparseElements', SparseElements);
+CreateBenchmark('TypedUint8', TypedUint8);
+CreateBenchmark('TypedInt32', TypedInt32);
+CreateBenchmark('TypedFloat64', TypedFloat64);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = true;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/Intl/constructor.js b/deps/v8/test/js-perf-test/Intl/constructor.js
new file mode 100644
index 0000000000..e5b3a86694
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Intl/constructor.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+function NewIntlCollator() {
+ let obj = new Intl.Collator();
+}
+createSuite('NewIntlCollator', 100, NewIntlCollator, ()=>{});
+
+function NewIntlDateTimeFormat() {
+ let obj = new Intl.DateTimeFormat();
+}
+createSuite('NewIntlDateTimeFormat', 100, NewIntlDateTimeFormat, ()=>{});
+
+function NewIntlNumberFormat() {
+ let obj = new Intl.NumberFormat();
+}
+createSuite('NewIntlNumberFormat', 100, NewIntlNumberFormat, ()=>{});
+
+function NewIntlPluralRules() {
+ let obj = new Intl.PluralRules();
+}
+createSuite('NewIntlPluralRules', 100, NewIntlPluralRules, ()=>{});
+
+function NewIntlListFormat() {
+ let obj = new Intl.ListFormat();
+}
+createSuite('NewIntlListFormat', 100, NewIntlListFormat, ()=>{});
+
+function NewIntlRelativeTimeFormat() {
+ let obj = new Intl.RelativeTimeFormat();
+}
+createSuite('NewIntlRelativeTimeFormat', 100, NewIntlRelativeTimeFormat, ()=>{});
diff --git a/deps/v8/test/js-perf-test/Intl/run.js b/deps/v8/test/js-perf-test/Intl/run.js
new file mode 100644
index 0000000000..61ac92a267
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Intl/run.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+load('../base.js');
+load('constructor.js');
+
+function PrintResult(name, result) {
+ console.log(name + '-Intl(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 88f39b9fa8..84f5a9304b 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -483,6 +483,20 @@
]
},
{
+ "name": "StringNormalize",
+ "main": "run.js",
+ "resources": [ "string-normalize.js" ],
+ "test_flags": [ "string-normalize" ],
+ "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+ "run_count": 1,
+ "tests": [
+ {"name": "StringNormalize"},
+ {"name": "StringNormalizeNFD"},
+ {"name": "StringNormalizeNFKC"},
+ {"name": "StringNormalizeNFKD"}
+ ]
+ },
+ {
"name": "StringLocaleCompare",
"main": "run.js",
"resources": [ "string-localeCompare.js" ],
@@ -494,6 +508,20 @@
]
},
{
+ "name": "StringToLocaleCase",
+ "main": "run.js",
+ "resources": [ "string-toLocaleCase.js" ],
+ "test_flags": [ "string-toLocaleCase" ],
+ "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+ "run_count": 1,
+ "tests": [
+ {"name": "StringToLocaleLowerCase"},
+ {"name": "StringToLocaleUpperCase"},
+ {"name": "StringToLocaleLowerCaseTR"},
+ {"name": "StringToLocaleUpperCaseTR"}
+ ]
+ },
+ {
"name": "StringMatchAll",
"main": "run.js",
"resources": [ "string-matchall.js" ],
@@ -760,6 +788,20 @@
"test_flags": ["construct-all-typedarrays"]
},
{
+ "name": "FilterNoSpecies",
+ "main": "run.js",
+ "resources": ["filter-nospecies.js"],
+ "test_flags": ["filter-nospecies"],
+ "results_regexp": "^TypedArrays\\-%s\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Uint8Array"},
+ {"name": "Uint16Array"},
+ {"name": "Uint32Array"},
+ {"name": "Float32Array"},
+ {"name": "Float64Array"}
+ ]
+ },
+ {
"name": "JoinBigIntTypes",
"main": "run.js",
"resources": ["base.js", "join.js", "join-bigint.js"],
@@ -994,6 +1036,27 @@
]
},
{
+ "name": "ArrayInOperator",
+ "path": ["ArrayInOperator"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayInOperator\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "PackedSMI"},
+ {"name": "PackedDouble"},
+ {"name": "PackedElements"},
+ {"name": "HoleySMI"},
+ {"name": "HoleyDouble"},
+ {"name": "HoleyElements"},
+ {"name": "SparseSMI"},
+ {"name": "SparseDouble"},
+ {"name": "SparseElements"},
+ {"name": "TypedUint8"},
+ {"name": "TypedInt32"},
+ {"name": "TypedFloat64"}
+ ]
+ },
+ {
"name": "ArraySort",
"path": ["ArraySort"],
"main": "run.js",
@@ -1305,6 +1368,22 @@
]
},
{
+ "name": "Intl",
+ "path": ["Intl"],
+ "main": "run.js",
+ "resources": [ "constructor.js" ],
+ "flags": [],
+ "results_regexp": "^%s\\-Intl\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "NewIntlCollator"},
+ {"name": "NewIntlDateTimeFormat"},
+ {"name": "NewIntlNumberFormat"},
+ {"name": "NewIntlPluralRules"},
+ {"name": "NewIntlListFormat"},
+ {"name": "NewIntlRelativeTimeFormat"}
+ ]
+ },
+ {
"name": "Inspector",
"path": ["Inspector"],
"main": "run.js",
@@ -1358,6 +1437,20 @@
]
},
{
+ "name": "ObjectFreeze",
+ "path": ["ObjectFreeze"],
+ "main": "run.js",
+ "flags": [],
+ "resources": [
+ "tagged-template.js"
+ ],
+ "results_regexp": "^%s\\-Numbers\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "TaggedTemplate"},
+ {"name": "TaggedTemplateLoose"}
+ ]
+ },
+ {
"name": "TurboFan",
"path": ["TurboFan"],
"main": "run.js",
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/run.js b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
new file mode 100644
index 0000000000..63eb1d69ec
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+load('../base.js');
+load('tagged-template.js');
+
+function PrintResult(name, result) {
+ console.log(name);
+ console.log(name + '-Numbers(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/tagged-template.js b/deps/v8/test/js-perf-test/ObjectFreeze/tagged-template.js
new file mode 100644
index 0000000000..83fbb8aa47
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/tagged-template.js
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function tag(strings, ...values) {
+ let a = 0;
+ for (let i = 0; i < strings.length; ++i) a += strings[i].length;
+ return a;
+}
+
+function driver(n) {
+ let result = 0;
+ for (let i = 0; i < n; ++i) {
+ result += tag`${"Hello"} ${"cruel"} ${"slow"} ${"world"}!\n`;
+ result += tag`${"Why"} ${"is"} ${"this"} ${"so"} ${"damn"} ${"slow"}?!\n`;
+ }
+ return result;
+}
+
+function TaggedTemplate() {
+ driver(1e4);
+}
+
+function TaggedTemplateWarmUp() {
+ driver(1e1);
+ driver(1e2);
+ driver(1e3);
+}
+
+createSuite('TaggedTemplate', 10, TaggedTemplate, TaggedTemplateWarmUp);
+
+var _templateObject = _taggedTemplateLiteralLoose(
+ ["", " ", " ", " ", "!\n"],
+ ["", " ", " ", " ", "!\\n"]
+),
+_templateObject2 = _taggedTemplateLiteralLoose(
+ ["", " ", " ", " ", " ", " ", "?!\n"],
+ ["", " ", " ", " ", " ", " ", "?!\\n"]
+);
+
+function _taggedTemplateLiteralLoose(strings, raw) {
+ strings.raw = raw;
+ return strings;
+}
+
+function driverLoose(n) {
+ var result = 0;
+ for (var i = 0; i < n; ++i) {
+ result += tag(_templateObject, "Hello", "cruel", "slow", "world");
+ result += tag(_templateObject2, "Why", "is", "this", "so", "damn", "slow");
+ }
+ return result;
+}
+
+function TaggedTemplateLoose() {
+ driverLoose(1e4);
+}
+
+function TaggedTemplateLooseWarmUp() {
+ driverLoose(1e1);
+ driverLoose(1e2);
+ driverLoose(1e3);
+}
+
+createSuite('TaggedTemplateLoose', 10, TaggedTemplateLoose, TaggedTemplateLooseWarmUp);
diff --git a/deps/v8/test/js-perf-test/Proxies/proxies.js b/deps/v8/test/js-perf-test/Proxies/proxies.js
index 79d064e931..6d5e808449 100644
--- a/deps/v8/test/js-perf-test/Proxies/proxies.js
+++ b/deps/v8/test/js-perf-test/Proxies/proxies.js
@@ -373,7 +373,8 @@ newBenchmark("SetStringWithTrap", {
setup() {
p = new Proxy(obj, {
set: function(target, propertyKey, value, receiver) {
- target[propertyKey] = SOME_OTHER_NUMBER
+ target[propertyKey] = SOME_OTHER_NUMBER;
+ return true;
}
});
},
@@ -412,7 +413,8 @@ newBenchmark("SetIndexWithTrap", {
setup() {
p = new Proxy(obj, {
set: function(target, propertyKey, value, receiver) {
- target[propertyKey] = SOME_OTHER_NUMBER
+ target[propertyKey] = SOME_OTHER_NUMBER;
+ return true;
}
});
},
@@ -450,7 +452,8 @@ newBenchmark("SetSymbolWithTrap", {
setup() {
p = new Proxy(obj, {
set: function(target, propertyKey, value, receiver) {
- target[propertyKey] = SOME_OTHER_NUMBER
+ target[propertyKey] = SOME_OTHER_NUMBER;
+ return true;
}
});
},
diff --git a/deps/v8/test/js-perf-test/Strings/string-normalize.js b/deps/v8/test/js-perf-test/Strings/string-normalize.js
new file mode 100644
index 0000000000..21a8a590c9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Strings/string-normalize.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('StringNormalize', [5], [
+ new Benchmark('StringNormalize', false, false, 0,
+ StringNormalize),
+]);
+new BenchmarkSuite('StringNormalizeNFD', [5], [
+ new Benchmark('StringNormalizeNFD', false, false, 0,
+ StringNormalizeNFD),
+]);
+new BenchmarkSuite('StringNormalizeNFKC', [5], [
+ new Benchmark('StringNormalizeNFKC', false, false, 0,
+ StringNormalizeNFKC),
+]);
+new BenchmarkSuite('StringNormalizeNFKD', [5], [
+ new Benchmark('StringNormalizeNFKD', false, false, 0,
+ StringNormalizeNFKD),
+]);
+
+const shortString = "Ć ĆØĆ¬Ć²Ć¹Ć”Ć©Ć­Ć³ĆŗƤƫĆÆĆ¶Ć¼ĆæĆ¢ĆŖĆ®Ć“Ć»Ć£ĆµĆ±";
+
+function StringNormalize() {
+ return shortString.normalize();
+}
+
+function StringNormalizeNFD() {
+ return shortString.normalize("NFD");
+}
+
+function StringNormalizeNFKC() {
+ return shortString.normalize("NFKC");
+}
+
+function StringNormalizeNFKD() {
+ return shortString.normalize("NFKD");
+}
diff --git a/deps/v8/test/js-perf-test/Strings/string-toLocaleCase.js b/deps/v8/test/js-perf-test/Strings/string-toLocaleCase.js
new file mode 100644
index 0000000000..67919fda06
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Strings/string-toLocaleCase.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('StringToLocaleUpperCaseTR', [5], [
+ new Benchmark('StringToLocaleUpperCaseTR', false, false, 0,
+ StringToLocaleUpperCaseTR)
+]);
+new BenchmarkSuite('StringToLocaleLowerCaseTR', [5], [
+ new Benchmark('StringToLocaleLowerCaseTR', false, false, 0,
+ StringToLocaleLowerCaseTR),
+]);
+new BenchmarkSuite('StringToLocaleUpperCase', [5], [
+ new Benchmark('StringToLocaleUpperCase', false, false, 0,
+ StringToLocaleUpperCase)
+]);
+new BenchmarkSuite('StringToLocaleLowerCase', [5], [
+ new Benchmark('StringToLocaleLowerCase', false, false, 0,
+ StringToLocaleLowerCase),
+]);
+
+var shortString = "ĆŽĆ±Å£Ć©rĆ±Ć„Å£Ć®Ć¶Ć±Ć„Ä¼Ć®Å¾Ć„Å£Ć®Ć¶Ć± Ä»Ć¶Ć§Ć„Ä¼Ć®Å¾Ć„Å£Ć®Ć¶Ć± החןןם שםוןמ Ī“ĻĪµĪµĪŗ Ī¹Ļƒ Ļ†ĪøĪ½ äø€äŗŒäø‰";
+
+function StringToLocaleUpperCase() {
+ return shortString.toLocaleUpperCase();
+}
+function StringToLocaleLowerCase() {
+ return shortString.toLocaleLowerCase();
+}
+function StringToLocaleUpperCaseTR() {
+ return shortString.toLocaleUpperCase(["tr"]);
+}
+function StringToLocaleLowerCaseTR() {
+ return shortString.toLocaleLowerCase(["tr"]);
+}
diff --git a/deps/v8/test/js-perf-test/TypedArrays/filter-nospecies.js b/deps/v8/test/js-perf-test/TypedArrays/filter-nospecies.js
new file mode 100644
index 0000000000..180fe5b2ad
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/filter-nospecies.js
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const SIZE = 1024;
+let input;
+let output;
+
+function CreateSetup(TAConstructor) {
+ return () => {
+ // Create an Typed Array with a sequence of number 0 to SIZE.
+ const values = Array.from({ length: SIZE }).map((_, i) =>
+ TAConstructor === BigUint64Array ? BigInt(i) : i
+ );
+ input = new TAConstructor(values);
+ };
+}
+
+// Creates a run function that is unpolluted by IC feedback.
+function CreateRun() {
+ // Filters out every other (odd indexed) elements.
+ return new Function(`
+ output = input.filter((el, i) => el < SIZE && (i % 2) === 0);
+ `);
+}
+
+function isOutputInvalid() {
+ if (output.length !== input.length / 2) return true;
+
+ // Verfies every other (odd indexed) element has been filtered out.
+ for (let i = 0; i < SIZE / 2; i++) {
+ if (output[i] !== input[i * 2]) return true;
+ }
+}
+
+function TearDown() {
+ if (isOutputInvalid()) throw new TypeError(`Unexpected result!\n${output}`);
+
+ input = void 0;
+ output = void 0;
+}
+
+createSuite(
+ 'Uint8Array', 1000, CreateRun(), CreateSetup(Uint8Array), TearDown);
+createSuite(
+ 'Uint16Array', 1000, CreateRun(), CreateSetup(Uint16Array), TearDown);
+createSuite(
+ 'Uint32Array', 1000, CreateRun(), CreateSetup(Uint32Array), TearDown);
+createSuite(
+ 'Float32Array', 1000, CreateRun(), CreateSetup(Float32Array), TearDown);
+createSuite(
+ 'Float64Array', 1000, CreateRun(), CreateSetup(Float64Array), TearDown);
+createSuite(
+ 'BigUint64Array', 1000, CreateRun(), CreateSetup(BigUint64Array),
+ TearDown);
diff --git a/deps/v8/test/message/asm-function-undefined.out b/deps/v8/test/message/asm-function-undefined.out
index ad11ee66fe..f2f47f8d31 100644
--- a/deps/v8/test/message/asm-function-undefined.out
+++ b/deps/v8/test/message/asm-function-undefined.out
@@ -2,4 +2,4 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:12: Invalid asm.js: Undefined function
+*%(basename)s:13: Invalid asm.js: Undefined function
diff --git a/deps/v8/test/message/asm-table-undefined.out b/deps/v8/test/message/asm-table-undefined.out
index 447968535c..c94f45d2a3 100644
--- a/deps/v8/test/message/asm-table-undefined.out
+++ b/deps/v8/test/message/asm-table-undefined.out
@@ -2,4 +2,4 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:13: Invalid asm.js: Undefined function table
+*%(basename)s:14: Invalid asm.js: Undefined function table
diff --git a/deps/v8/test/message/fail/computed-prop-fni.js b/deps/v8/test/message/fail/computed-prop-fni.js
new file mode 100644
index 0000000000..a331f56850
--- /dev/null
+++ b/deps/v8/test/message/fail/computed-prop-fni.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let a = { b: {} };
+let foo = "b";
+a[foo].c = () => { throw Error(); };
+let fn = a.b.c;
+fn();
diff --git a/deps/v8/test/message/fail/computed-prop-fni.out b/deps/v8/test/message/fail/computed-prop-fni.out
new file mode 100644
index 0000000000..da637c09fb
--- /dev/null
+++ b/deps/v8/test/message/fail/computed-prop-fni.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: Error
+a[foo].c = () => { throw Error(); };
+ ^
+Error
+ at a.<computed>.c (*%(basename)s:7:26)
+ at *%(basename)s:9:1
diff --git a/deps/v8/test/message/fail/default-parameter-tdz-arrow.out b/deps/v8/test/message/fail/default-parameter-tdz-arrow.out
index 7d5f894ef5..a17bcaa05f 100644
--- a/deps/v8/test/message/fail/default-parameter-tdz-arrow.out
+++ b/deps/v8/test/message/fail/default-parameter-tdz-arrow.out
@@ -1,6 +1,6 @@
-*%(basename)s:7: ReferenceError: a is not defined
+*%(basename)s:7: ReferenceError: Cannot access 'a' before initialization
((a=-a) => { })();
^
-ReferenceError: a is not defined
+ReferenceError: Cannot access 'a' before initialization
at *%(basename)s:7:6
at *%(basename)s:7:16
diff --git a/deps/v8/test/message/fail/default-parameter-tdz.out b/deps/v8/test/message/fail/default-parameter-tdz.out
index 8a6d56abae..08d606d63a 100644
--- a/deps/v8/test/message/fail/default-parameter-tdz.out
+++ b/deps/v8/test/message/fail/default-parameter-tdz.out
@@ -1,6 +1,6 @@
-*%(basename)s:7: ReferenceError: a is not defined
+*%(basename)s:7: ReferenceError: Cannot access 'a' before initialization
(function(a=+a) { })();
^
-ReferenceError: a is not defined
+ReferenceError: Cannot access 'a' before initialization
at *%(basename)s:7:14
at *%(basename)s:7:21
diff --git a/deps/v8/test/message/fail/destructuring-object-private-name.js b/deps/v8/test/message/fail/destructuring-object-private-name.js
new file mode 100644
index 0000000000..3e30bd2321
--- /dev/null
+++ b/deps/v8/test/message/fail/destructuring-object-private-name.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-class-fields
+
+class Foo {
+ #x = 1;
+ destructureX() {
+ const { #x: x } = this;
+ return x;
+ }
+}
diff --git a/deps/v8/test/message/fail/destructuring-object-private-name.out b/deps/v8/test/message/fail/destructuring-object-private-name.out
new file mode 100644
index 0000000000..83b6b8eb80
--- /dev/null
+++ b/deps/v8/test/message/fail/destructuring-object-private-name.out
@@ -0,0 +1,4 @@
+*%(basename)s:10: SyntaxError: Unexpected identifier
+ const { #x: x } = this;
+ ^^
+SyntaxError: Unexpected identifier
diff --git a/deps/v8/test/message/fail/json-stringify-circular-ellipsis.js b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.js
new file mode 100644
index 0000000000..30f3e6e9e8
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Outer {
+ constructor(o) { this.x = o; }
+}
+
+class Inner {
+ constructor(o) { this.y = o; }
+}
+
+class ArrayHolder {
+ constructor(o) {
+ this.array = [];
+ this.array[1] = o;
+ }
+}
+
+const root = {};
+root.first = new Outer(
+ new ArrayHolder(
+ new Inner(root)
+ )
+);
+
+JSON.stringify(root);
diff --git a/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out
new file mode 100644
index 0000000000..c288ee95ea
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out
@@ -0,0 +1,18 @@
+*%(basename)s:27: TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Outer'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | ...
+ | index 1 -> object with constructor 'Inner'
+ --- property 'y' closes the circle
+JSON.stringify(root);
+ ^
+TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Outer'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | ...
+ | index 1 -> object with constructor 'Inner'
+ --- property 'y' closes the circle
+ at JSON.stringify (<anonymous>)
+ at *%(basename)s:27:6
diff --git a/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.js b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.js
new file mode 100644
index 0000000000..762b5916da
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Outer {
+ constructor(o) { this.x = o; }
+}
+
+class ArrayHolder {
+ constructor(o) {
+ this.array = [];
+ this.array[1] = o;
+ }
+}
+
+const root = {};
+root.first = new Outer(
+ new ArrayHolder(root)
+);
+
+JSON.stringify(root);
diff --git a/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out
new file mode 100644
index 0000000000..bf81266dd5
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out
@@ -0,0 +1,16 @@
+*%(basename)s:21: TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Outer'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | property 'array' -> object with constructor 'Array'
+ --- index 1 closes the circle
+JSON.stringify(root);
+ ^
+TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Outer'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | property 'array' -> object with constructor 'Array'
+ --- index 1 closes the circle
+ at JSON.stringify (<anonymous>)
+ at *%(basename)s:21:6
diff --git a/deps/v8/test/message/fail/json-stringify-circular-proxy.js b/deps/v8/test/message/fail/json-stringify-circular-proxy.js
new file mode 100644
index 0000000000..b3488155d6
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-proxy.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Outer {
+ constructor(o) { this.x = o; }
+}
+
+class Inner {
+ constructor(o) { this.y = o; }
+}
+
+class ArrayHolder {
+ constructor(o) {
+ this.array = [];
+ this.array[1] = o;
+ }
+}
+
+const root = {};
+const outer = new Outer(
+ new ArrayHolder(
+ new Inner(root)
+ )
+);
+root.first = new Proxy(outer, outer);
+
+JSON.stringify(root);
diff --git a/deps/v8/test/message/fail/json-stringify-circular-proxy.out b/deps/v8/test/message/fail/json-stringify-circular-proxy.out
new file mode 100644
index 0000000000..6004cfb42d
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-proxy.out
@@ -0,0 +1,18 @@
+*%(basename)s:28: TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Object'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | ...
+ | index 1 -> object with constructor 'Inner'
+ --- property 'y' closes the circle
+JSON.stringify(root);
+ ^
+TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ | property 'first' -> object with constructor 'Object'
+ | property 'x' -> object with constructor 'ArrayHolder'
+ | ...
+ | index 1 -> object with constructor 'Inner'
+ --- property 'y' closes the circle
+ at JSON.stringify (<anonymous>)
+ at *%(basename)s:28:6
diff --git a/deps/v8/test/message/fail/json-stringify-circular-substructure.js b/deps/v8/test/message/fail/json-stringify-circular-substructure.js
new file mode 100644
index 0000000000..af512fdb94
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-substructure.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const object = {};
+object.substructure = {};
+object.substructure.key = object.substructure;
+
+JSON.stringify(object);
diff --git a/deps/v8/test/message/fail/json-stringify-circular-substructure.out b/deps/v8/test/message/fail/json-stringify-circular-substructure.out
new file mode 100644
index 0000000000..7633ea24e8
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular-substructure.out
@@ -0,0 +1,10 @@
+*%(basename)s:9: TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property 'key' closes the circle
+JSON.stringify(object);
+ ^
+TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property 'key' closes the circle
+ at JSON.stringify (<anonymous>)
+ at *%(basename)s:9:6
diff --git a/deps/v8/test/message/fail/json-stringify-circular.js b/deps/v8/test/message/fail/json-stringify-circular.js
new file mode 100644
index 0000000000..88efbb68d0
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const object = {};
+object.key = object;
+
+JSON.stringify(object);
diff --git a/deps/v8/test/message/fail/json-stringify-circular.out b/deps/v8/test/message/fail/json-stringify-circular.out
new file mode 100644
index 0000000000..bfea54ef00
--- /dev/null
+++ b/deps/v8/test/message/fail/json-stringify-circular.out
@@ -0,0 +1,10 @@
+*%(basename)s:8: TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property 'key' closes the circle
+JSON.stringify(object);
+ ^
+TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property 'key' closes the circle
+ at JSON.stringify (<anonymous>)
+ at *%(basename)s:8:6
diff --git a/deps/v8/test/message/fail/list-format-style-narrow.js b/deps/v8/test/message/fail/list-format-style-narrow.js
index f9af8ff4a7..9b731441ed 100644
--- a/deps/v8/test/message/fail/list-format-style-narrow.js
+++ b/deps/v8/test/message/fail/list-format-style-narrow.js
@@ -1,7 +1,4 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-intl-list-format
-
new Intl.ListFormat("en", {style: 'narrow'})
diff --git a/deps/v8/test/message/fail/list-format-style-narrow.out b/deps/v8/test/message/fail/list-format-style-narrow.out
index b762f8d664..90047338c0 100644
--- a/deps/v8/test/message/fail/list-format-style-narrow.out
+++ b/deps/v8/test/message/fail/list-format-style-narrow.out
@@ -1,8 +1,8 @@
-*%(basename)s:7: RangeError: When style is 'narrow', 'unit' is the only allowed value for the type option.
+*%(basename)s:4: RangeError: When style is 'narrow', 'unit' is the only allowed value for the type option.
new Intl.ListFormat("en", {style: 'narrow'})
^
RangeError: When style is 'narrow', 'unit' is the only allowed value for the type option.
at new ListFormat (<anonymous>)
- at *%(basename)s:7:1
+ at *%(basename)s:4:1
diff --git a/deps/v8/test/message/fail/wasm-function-name.js b/deps/v8/test/message/fail/wasm-function-name.js
index 0573db02e4..f946358c27 100644
--- a/deps/v8/test/message/fail/wasm-function-name.js
+++ b/deps/v8/test/message/fail/wasm-function-name.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/fail/wasm-function-name.out b/deps/v8/test/message/fail/wasm-function-name.out
index 00626c01f3..ff9b04eaab 100644
--- a/deps/v8/test/message/fail/wasm-function-name.out
+++ b/deps/v8/test/message/fail/wasm-function-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
at main (wasm-function[0]:1)
- at *%(basename)s:12:31
+ at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-and-function-name.js b/deps/v8/test/message/fail/wasm-module-and-function-name.js
index cab3252427..b1832eb925 100644
--- a/deps/v8/test/message/fail/wasm-module-and-function-name.js
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/fail/wasm-module-and-function-name.out b/deps/v8/test/message/fail/wasm-module-and-function-name.out
index 42ba7b077c..0bff25cfd0 100644
--- a/deps/v8/test/message/fail/wasm-module-and-function-name.out
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
at test-module.main (wasm-function[0]:1)
- at *%(basename)s:13:31
+ at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-name.js b/deps/v8/test/message/fail/wasm-module-name.js
index 1e32a5d437..c872c32cb8 100644
--- a/deps/v8/test/message/fail/wasm-module-name.js
+++ b/deps/v8/test/message/fail/wasm-module-name.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/fail/wasm-module-name.out b/deps/v8/test/message/fail/wasm-module-name.out
index bc3a6c01a5..9163088efe 100644
--- a/deps/v8/test/message/fail/wasm-module-name.out
+++ b/deps/v8/test/message/fail/wasm-module-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
at test-module (wasm-function[0]:1)
- at *%(basename)s:15:31
+ at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-no-name.js b/deps/v8/test/message/fail/wasm-no-name.js
index 121a7cbfe4..83ab942c84 100644
--- a/deps/v8/test/message/fail/wasm-no-name.js
+++ b/deps/v8/test/message/fail/wasm-no-name.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/fail/wasm-no-name.out b/deps/v8/test/message/fail/wasm-no-name.out
index f6b9f8d032..90d068d557 100644
--- a/deps/v8/test/message/fail/wasm-no-name.out
+++ b/deps/v8/test/message/fail/wasm-no-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
at wasm-function[0]:1
- at *%(basename)s:14:31
+ at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-trap.js b/deps/v8/test/message/fail/wasm-trap.js
index 53013a7d22..6fdb582dd0 100644
--- a/deps/v8/test/message/fail/wasm-trap.js
+++ b/deps/v8/test/message/fail/wasm-trap.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/fail/wasm-trap.out b/deps/v8/test/message/fail/wasm-trap.out
index 33d6309d13..41c017f100 100644
--- a/deps/v8/test/message/fail/wasm-trap.out
+++ b/deps/v8/test/message/fail/wasm-trap.out
@@ -1,5 +1,5 @@
wasm-function[0]:5: RuntimeError: divide by zero
RuntimeError: divide by zero
at main (wasm-function[0]:5)
- at *%(basename)s:15:16
+ at *%(basename)s:{NUMBER}:16
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory1.js b/deps/v8/test/message/fail/weak-refs-finalizationgroup1.js
index 5359aee736..a97abb3f8b 100644
--- a/deps/v8/test/message/fail/weak-refs-weakfactory1.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationgroup1.js
@@ -4,4 +4,4 @@
// Flags: --harmony-weak-refs
-let wf = new WeakFactory();
+let fg = new FinalizationGroup();
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out b/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out
new file mode 100644
index 0000000000..ddaa32328f
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: FinalizationGroup: cleanup must be callable
+let fg = new FinalizationGroup();
+ ^
+TypeError: FinalizationGroup: cleanup must be callable
+ at new FinalizationGroup (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory2.js b/deps/v8/test/message/fail/weak-refs-finalizationgroup2.js
index fabb7f1e41..87a6183de9 100644
--- a/deps/v8/test/message/fail/weak-refs-weakfactory2.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationgroup2.js
@@ -4,4 +4,4 @@
// Flags: --harmony-weak-refs
-let wf = new WeakFactory({});
+let fg = new FinalizationGroup({});
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out b/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out
new file mode 100644
index 0000000000..799199aff8
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: FinalizationGroup: cleanup must be callable
+let fg = new FinalizationGroup({});
+ ^
+TypeError: FinalizationGroup: cleanup must be callable
+ at new FinalizationGroup (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-makecell1.out b/deps/v8/test/message/fail/weak-refs-makecell1.out
deleted file mode 100644
index 5c74c1f7fa..0000000000
--- a/deps/v8/test/message/fail/weak-refs-makecell1.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:8: TypeError: WeakFactory.prototype.makeCell: target must be an object
-wf.makeCell(1);
- ^
-TypeError: WeakFactory.prototype.makeCell: target must be an object
- at WeakFactory.makeCell (<anonymous>)
- at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-makecell2.out b/deps/v8/test/message/fail/weak-refs-makecell2.out
deleted file mode 100644
index 2ea8033183..0000000000
--- a/deps/v8/test/message/fail/weak-refs-makecell2.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:9: TypeError: WeakFactory.prototype.makeCell: target and holdings must not be same
-wf.makeCell(o, o);
- ^
-TypeError: WeakFactory.prototype.makeCell: target and holdings must not be same
- at WeakFactory.makeCell (<anonymous>)
- at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-makecell1.js b/deps/v8/test/message/fail/weak-refs-register1.js
index 416fcca255..a90e4aa47c 100644
--- a/deps/v8/test/message/fail/weak-refs-makecell1.js
+++ b/deps/v8/test/message/fail/weak-refs-register1.js
@@ -4,5 +4,5 @@
// Flags: --harmony-weak-refs
-let wf = new WeakFactory(() => {});
-wf.makeCell(1);
+let fg = new FinalizationGroup(() => {});
+fg.register(1);
diff --git a/deps/v8/test/message/fail/weak-refs-register1.out b/deps/v8/test/message/fail/weak-refs-register1.out
new file mode 100644
index 0000000000..96983664c2
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-register1.out
@@ -0,0 +1,6 @@
+*%(basename)s:8: TypeError: FinalizationGroup.prototype.register: target must be an object
+fg.register(1);
+ ^
+TypeError: FinalizationGroup.prototype.register: target must be an object
+ at FinalizationGroup.register (<anonymous>)
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-makecell2.js b/deps/v8/test/message/fail/weak-refs-register2.js
index df0352554f..8934a46511 100644
--- a/deps/v8/test/message/fail/weak-refs-makecell2.js
+++ b/deps/v8/test/message/fail/weak-refs-register2.js
@@ -4,6 +4,6 @@
// Flags: --harmony-weak-refs
-let wf = new WeakFactory(() => {});
+let fg = new FinalizationGroup(() => {});
let o = {};
-wf.makeCell(o, o);
+fg.register(o, o);
diff --git a/deps/v8/test/message/fail/weak-refs-register2.out b/deps/v8/test/message/fail/weak-refs-register2.out
new file mode 100644
index 0000000000..c7b9e10909
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-register2.out
@@ -0,0 +1,6 @@
+*%(basename)s:9: TypeError: FinalizationGroup.prototype.register: target and holdings must not be same
+fg.register(o, o);
+ ^
+TypeError: FinalizationGroup.prototype.register: target and holdings must not be same
+ at FinalizationGroup.register (<anonymous>)
+ at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory1.out b/deps/v8/test/message/fail/weak-refs-weakfactory1.out
deleted file mode 100644
index e865df3053..0000000000
--- a/deps/v8/test/message/fail/weak-refs-weakfactory1.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:7: TypeError: WeakFactory: cleanup must be callable
-let wf = new WeakFactory();
- ^
-TypeError: WeakFactory: cleanup must be callable
- at new WeakFactory (<anonymous>)
- at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory2.out b/deps/v8/test/message/fail/weak-refs-weakfactory2.out
deleted file mode 100644
index 7a6ee459b3..0000000000
--- a/deps/v8/test/message/fail/weak-refs-weakfactory2.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:7: TypeError: WeakFactory: cleanup must be callable
-let wf = new WeakFactory({});
- ^
-TypeError: WeakFactory: cleanup must be callable
- at new WeakFactory (<anonymous>)
- at *%(basename)s:7:10
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index cc604a5a3b..25c87b5e5c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -54,13 +54,19 @@
}], # no_i18n == True
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'mjsunit/fail/assert-promise-result-wasm-compile-fail': [SKIP],
'fail/wasm-*': [SKIP],
'wasm-*': [SKIP],
# Test output requires --validate-asm, which is disabled in jitless mode.
'asm-*': [SKIP],
-}], # lite_mode
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js
new file mode 100644
index 0000000000..ebfa83d042
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test wasm compilation explicitly, since this creates a promise which is only
+// resolved later, i.e. the message queue gets empty in-between.
+// The important part here is that d8 exits with a non-zero exit code.
+
+load('test/mjsunit/mjsunit.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+assertPromiseResult((async function test() {
+ let ok_buffer = (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction(undefined, kSig_i_v).addBody([kExprI32Const, 42]);
+ return builder.toBuffer();
+ })();
+ let bad_buffer = new ArrayBuffer(0);
+ let kNumCompiles = 3;
+
+ // Three compilations of the OK module should succeed.
+ for (var i = 0; i < kNumCompiles; ++i) {
+ await WebAssembly.compile(ok_buffer);
+ }
+
+ // Three compilations of the bad module should fail.
+ for (var i = 0; i < kNumCompiles; ++i) {
+ await WebAssembly.compile(bad_buffer);
+ }
+})());
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out
new file mode 100644
index 0000000000..ecee922cbc
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out
@@ -0,0 +1,9 @@
+test/mjsunit/mjsunit.js:{NUMBER}: CompileError: WebAssembly.compile(): BufferSource argument is empty
+ throw concatenateErrors(stack, e);
+ ^
+Error
+ at assertPromiseResult (test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER})
+ at *%(basename)s:{NUMBER}:1
+
+CompileError: WebAssembly.compile(): BufferSource argument is empty
+ at test (*%(basename)s:{NUMBER}:23)
diff --git a/deps/v8/test/message/regress/fail/regress-900383.out b/deps/v8/test/message/regress/fail/regress-900383.out
index 490ca03a80..f718b4f46d 100644
--- a/deps/v8/test/message/regress/fail/regress-900383.out
+++ b/deps/v8/test/message/regress/fail/regress-900383.out
@@ -1,4 +1,4 @@
*%(basename)s:8: SyntaxError: Identifier '*default*' has already been declared
export default x = 1;
- ^
+ ^^^^^^^^^^^^^
SyntaxError: Identifier '*default*' has already been declared
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index e27a3ed2a2..74c26b8525 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -39,21 +39,8 @@ MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- dirs.sort()
- files.sort()
- for filename in files:
- if filename.endswith(".js"):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return testsuite.JSTestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/message/wasm-finish-compilation.js b/deps/v8/test/message/wasm-finish-compilation.js
index ca0f952bc1..0144b2c786 100644
--- a/deps/v8/test/message/wasm-finish-compilation.js
+++ b/deps/v8/test/message/wasm-finish-compilation.js
@@ -4,7 +4,6 @@
// Flags: --no-stress-opt
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Test that d8 does not terminate until wasm compilation has finished and the
diff --git a/deps/v8/test/message/wasm-function-name-async.js b/deps/v8/test/message/wasm-function-name-async.js
index ddff21ee21..3b80a4d27d 100644
--- a/deps/v8/test/message/wasm-function-name-async.js
+++ b/deps/v8/test/message/wasm-function-name-async.js
@@ -5,7 +5,6 @@
// Flags: --expose-wasm --no-stress-opt
load('test/mjsunit/mjsunit.js');
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/wasm-function-name-async.out b/deps/v8/test/message/wasm-function-name-async.out
index 4627c7fcf3..b025f650c6 100644
--- a/deps/v8/test/message/wasm-function-name-async.out
+++ b/deps/v8/test/message/wasm-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at main (wasm-function[0]:1)
- at *%(basename)s:16:27
+ at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-function-name-streaming.out b/deps/v8/test/message/wasm-function-name-streaming.out
index 2e33b0808b..f5dde3dd87 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at main (wasm-function[0]:1)
- at test/message/wasm-function-name-async.js:16:27
+ at test/message/wasm-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.js b/deps/v8/test/message/wasm-module-and-function-name-async.js
index fa29cab216..d84d67da6e 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.js
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.js
@@ -5,7 +5,6 @@
// Flags: --expose-wasm --no-stress-opt
load('test/mjsunit/mjsunit.js');
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.out b/deps/v8/test/message/wasm-module-and-function-name-async.out
index d1d62dd5be..e1ca097e64 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module.main (wasm-function[0]:1)
- at *%(basename)s:17:27
+ at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.out b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
index fc7360383a..4afaa66bbe 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module.main (wasm-function[0]:1)
- at test/message/wasm-module-and-function-name-async.js:17:27
+ at test/message/wasm-module-and-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-async.js b/deps/v8/test/message/wasm-module-name-async.js
index d24357965b..a0d501fc25 100644
--- a/deps/v8/test/message/wasm-module-name-async.js
+++ b/deps/v8/test/message/wasm-module-name-async.js
@@ -5,7 +5,6 @@
// Flags: --expose-wasm --no-stress-opt
load('test/mjsunit/mjsunit.js');
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/wasm-module-name-async.out b/deps/v8/test/message/wasm-module-name-async.out
index 6301dba480..9163b31277 100644
--- a/deps/v8/test/message/wasm-module-name-async.out
+++ b/deps/v8/test/message/wasm-module-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module (wasm-function[0]:1)
- at *%(basename)s:19:27
+ at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-streaming.out b/deps/v8/test/message/wasm-module-name-streaming.out
index e16c7ad373..98fba539db 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module (wasm-function[0]:1)
- at test/message/wasm-module-name-async.js:19:27
+ at test/message/wasm-module-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-async.js b/deps/v8/test/message/wasm-no-name-async.js
index f60ba35133..0078fbd46e 100644
--- a/deps/v8/test/message/wasm-no-name-async.js
+++ b/deps/v8/test/message/wasm-no-name-async.js
@@ -5,7 +5,6 @@
// Flags: --expose-wasm --no-stress-opt
load('test/mjsunit/mjsunit.js');
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/message/wasm-no-name-async.out b/deps/v8/test/message/wasm-no-name-async.out
index 0a299aaaed..4c622a7583 100644
--- a/deps/v8/test/message/wasm-no-name-async.out
+++ b/deps/v8/test/message/wasm-no-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at wasm-function[0]:1
- at *%(basename)s:18:27
+ at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-streaming.out b/deps/v8/test/message/wasm-no-name-streaming.out
index e4bcb2f48e..59e070b0b8 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.out
+++ b/deps/v8/test/message/wasm-no-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at wasm-function[0]:1
- at test/message/wasm-no-name-async.js:18:27
+ at test/message/wasm-no-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index 53c46073ec..23425f4ddb 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -5,7 +5,6 @@
// Flags: --no-stress-opt --trace-wasm-memory --no-liftoff --no-future
// Flags: --no-wasm-tier-up
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/arguments.js b/deps/v8/test/mjsunit/arguments.js
index ad12540c6d..6f04115c8b 100644
--- a/deps/v8/test/mjsunit/arguments.js
+++ b/deps/v8/test/mjsunit/arguments.js
@@ -366,3 +366,14 @@ assertEquals(117, arg_set(0xFFFFFFFF));
f7(1,2,3,4,5,6,7);
f7(1,2,3,4,5,6,7,8);
})();
+
+(function testArgumentsHole() {
+ function f(a) {
+ arguments[3] = 1;
+ return arguments[2];
+ };
+
+ assertEquals(undefined, f(1));
+ assertEquals(undefined, f(1));
+ assertEquals(undefined, f(1));
+})();
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index 44d132820c..aa6fbbed54 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -92,40 +92,6 @@ function assertKind(expected, obj, name_opt) {
})();
-// Test: Ensure that inlined array calls in crankshaft learn from deopts
-// based on the move to a dictionary for the array.
-(function() {
- function bar(len) {
- return new Array(len);
- }
- a = bar(10);
- a[0] = "a string";
- a = bar(10);
- assertKind(elements_kind.fast, a);
- %OptimizeFunctionOnNextCall(bar);
- a = bar(10);
- assertKind(elements_kind.fast, a);
- assertOptimized(bar);
- bar(10000);
- assertOptimized(bar);
-
- function barn(one, two, three) {
- return new Array(one, two, three);
- }
-
- a = barn(1, 2, 3);
- a[1] = "a string";
- a = barn(1, 2, 3);
- assertKind(elements_kind.fast, a);
- %OptimizeFunctionOnNextCall(barn);
- a = barn(1, 2, 3);
- assertKind(elements_kind.fast, a);
- assertOptimized(barn);
- a = barn(1, "oops", 3);
- assertOptimized(barn);
-})();
-
-
// Test: When a method with array constructor is crankshafted, the type
// feedback for elements kind is baked in. Verify that transitions don't
// change it anymore
diff --git a/deps/v8/test/mjsunit/array-push5.js b/deps/v8/test/mjsunit/array-push5.js
index 9961ff98c3..88ebd3a4a2 100644
--- a/deps/v8/test/mjsunit/array-push5.js
+++ b/deps/v8/test/mjsunit/array-push5.js
@@ -14,6 +14,7 @@ my_array_proto.__proto__ = [].__proto__;
function push_wrapper_2(array, value) {
array.push(value);
}
+%PrepareFunctionForOptimization(push_wrapper_2);
array = [];
array.__proto__ = my_array_proto;
push_wrapper_2(array, 66);
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index dd7c378847..f9e8aeee22 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -509,6 +509,13 @@ testReduce("reduce", "ArrayManipulationShort", 3,
[1, 2, 1, [1, 2], 3],
], arr, manipulator, 0);
+var arr = [1, 2, 3, 4];
+testReduce("reduceRight", "RightArrayManipulationShort", 7,
+ [[0, 4, 3, [1, 2, 3, 4], 4],
+ [4, 2, 1, [1, 2], 6],
+ [6, 1, 0, [1], 7],
+ ], arr, manipulator, 0);
+
var arr = [1, 2, 3, 4, 5];
testReduce("reduce", "ArrayManipulationLonger", 10,
[[0, 1, 0, [1, 2, 3, 4, 5], 1],
diff --git a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
index 184c7d52b7..b6ae4620ea 100644
--- a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
+++ b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
@@ -57,3 +57,19 @@ fib(5);
[{"start":0,"end":80,"count":1},
{"start":0,"end":72,"count":1}]
);
+
+TestCoverageNoGC(
+"https://crbug.com/927464",
+`
+!function f() { // 0000
+ function unused() { nop(); } // 0100
+ nop(); // 0150
+}(); // 0200
+`,
+[{"start":0,"end":199,"count":1},
+ {"start":1,"end":151,"count":1}
+ // The unused function is unfortunately not marked as unused in best-effort
+ // code coverage, as the information about its source range is discarded
+ // entirely.
+]
+);
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index ee21ff6a80..99f0a55e74 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-bytecode --turbo-inlining
// Files: test/mjsunit/code-coverage-utils.js
if (isNeverOptimizeLiteMode()) {
diff --git a/deps/v8/test/mjsunit/code-coverage-precise.js b/deps/v8/test/mjsunit/code-coverage-precise.js
index 2593ed64a0..3c70408174 100644
--- a/deps/v8/test/mjsunit/code-coverage-precise.js
+++ b/deps/v8/test/mjsunit/code-coverage-precise.js
@@ -50,4 +50,17 @@ for (var i = 0; i < 10; i++) {
[{"start":0,"end":63,"count":1},{"start":41,"end":48,"count":5}]
);
+TestCoverage(
+"https://crbug.com/927464",
+`
+!function f() { // 0000
+ function unused() { nop(); } // 0100
+ nop(); // 0150
+}(); // 0200
+`,
+[{"start":0,"end":199,"count":1},
+ {"start":1,"end":151,"count":1},
+ {"start":52,"end":80,"count":0}]
+);
+
%DebugTogglePreciseCoverage(false);
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
index 1026b68342..1388de7c10 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
@@ -11,6 +11,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +39,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -50,6 +53,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -63,6 +67,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -76,6 +81,7 @@
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -96,6 +102,7 @@
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -116,6 +123,7 @@
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -136,6 +144,7 @@
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -159,6 +168,7 @@
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
index c1057e1d1b..2a7c9643ec 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
@@ -11,6 +11,7 @@
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
function foo() { return a != b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -38,6 +40,7 @@
function foo(a) { return a == b; }
// Warmup
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -52,6 +55,7 @@
assertUnoptimized(foo);
// Make sure TurboFan learns the new feedback
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertFalse(foo("a"));
assertOptimized(foo);
@@ -65,6 +69,7 @@
function foo(a) { return a != b; }
// Warmup
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(b));
assertTrue(foo(a));
assertFalse(foo(b));
@@ -78,6 +83,7 @@
assertUnoptimized(foo);
// Make sure TurboFan learns the new feedback
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo("a"));
assertOptimized(foo);
@@ -91,6 +97,7 @@
function foo(a, b) { return a == b; }
// Warmup
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
@@ -104,6 +111,7 @@
assertUnoptimized(foo);
// Make sure TurboFan learns the new feedback
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertFalse(foo("a", b));
assertOptimized(foo);
@@ -116,6 +124,7 @@
function foo(a, b) { return a != b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(b, b));
assertTrue(foo(a, b));
assertFalse(foo(a, a));
@@ -129,6 +138,7 @@
assertUnoptimized(foo);
// Make sure TurboFan learns the new feedback
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo("a", b));
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
index 1e1bb6ba2d..ad866aa7be 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
@@ -13,6 +13,7 @@ const undetectable = %GetUndetectable();
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -26,6 +27,7 @@ const undetectable = %GetUndetectable();
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -39,6 +41,7 @@ const undetectable = %GetUndetectable();
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -52,6 +55,7 @@ const undetectable = %GetUndetectable();
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -65,6 +69,7 @@ const undetectable = %GetUndetectable();
function foo() { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -78,6 +83,7 @@ const undetectable = %GetUndetectable();
function foo(a) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -98,6 +104,7 @@ const undetectable = %GetUndetectable();
function foo(a, b) { return a == b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
diff --git a/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js b/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js
index 716d229aba..42b2f11cb3 100644
--- a/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js
+++ b/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js
@@ -15,6 +15,7 @@ function foo(o) {
return x;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(7, foo(o));
assertEquals(7, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js b/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js
index ed6e3e21c0..2d784e63e5 100644
--- a/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js
+++ b/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js
@@ -15,6 +15,7 @@ function foo(o) {
return x;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(7, foo(o));
assertEquals(7, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/alloc-object-huge.js b/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
index 74b600cc7d..1333b64145 100644
--- a/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
+++ b/deps/v8/test/mjsunit/compiler/alloc-object-huge.js
@@ -36,6 +36,7 @@
function test() {
return new huge();
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/compiler/alloc-object.js b/deps/v8/test/mjsunit/compiler/alloc-object.js
index 8d19816536..78e9c51398 100644
--- a/deps/v8/test/mjsunit/compiler/alloc-object.js
+++ b/deps/v8/test/mjsunit/compiler/alloc-object.js
@@ -37,6 +37,7 @@ function test_helper(construct, a, b) {
function test(construct) {
%DeoptimizeFunction(test);
+ %PrepareFunctionForOptimization(test_helper);
test_helper(construct, 0, 0);
test_helper(construct, 0, 0);
%OptimizeFunctionOnNextCall(test_helper);
diff --git a/deps/v8/test/mjsunit/compiler/arguments-object.js b/deps/v8/test/mjsunit/compiler/arguments-object.js
new file mode 100644
index 0000000000..4562c08826
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/arguments-object.js
@@ -0,0 +1,171 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noturbo-inlining
+
+// Ensure that arguments in sloppy mode function works
+// properly when called directly from optimized code.
+(function() {
+ function g() { return arguments; }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+
+// Ensure that arguments in strict mode function works
+// properly when called directly from optimized code.
+(function() {
+ "use strict";
+ function g() { return arguments; }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+
+// Ensure that arguments in sloppy mode function works
+// properly when called directly from optimized code,
+// and the access to "arguments" is hidden inside eval().
+(function() {
+ function g() { return eval("arguments"); }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+
+// Ensure that arguments in strict mode function works
+// properly when called directly from optimized code,
+// and the access to "arguments" is hidden inside eval().
+(function() {
+ "use strict";
+ function g() { return eval("arguments"); }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+
+// Ensure that `Function.arguments` accessor does the
+// right thing in sloppy mode functions called directly
+// from optimized code.
+(function() {
+ function h() { return g.arguments; }
+ function g() { return h(); }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+(function() {
+ function h() { return g.arguments; }
+ function g() { return h(); }
+ function f() { "use strict"; return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+(function() {
+ function h() { "use strict"; return g.arguments; }
+ function g() { return h(); }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+(function() {
+ function h() { "use strict"; return g.arguments; }
+ function g() { return h(); }
+ function f() { "use strict"; return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+
+// Ensure that `Function.arguments` works properly in
+// combination with the `Function.caller` proper.
+(function() {
+ function h() { return h.caller.arguments; }
+ function g() { return h(); }
+ function f() { return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
+(function() {
+ function h() { return h.caller.arguments; }
+ function g() { return h(); }
+ function f() { "use strict"; return g(1, 2, 3); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(g(1, 2, 3), f());
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(g(1, 2, 3), f());
+ %PrepareFunctionForOptimization(g);
+ assertEquals(g(1, 2, 3), f());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(g(1, 2, 3), f());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js b/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js
index b56763b5b2..f2e5c30101 100644
--- a/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js
+++ b/deps/v8/test/mjsunit/compiler/array-buffer-is-view.js
@@ -15,6 +15,7 @@
return ArrayBuffer.isView({x}.x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(Symbol()));
assertFalse(foo("some string"));
assertFalse(foo(new Object()));
@@ -41,6 +42,7 @@
return ArrayBuffer.isView(x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1));
assertFalse(foo(1.1));
assertFalse(foo(Symbol()));
diff --git a/deps/v8/test/mjsunit/compiler/array-constructor.js b/deps/v8/test/mjsunit/compiler/array-constructor.js
index eef242714b..b45f5c4965 100644
--- a/deps/v8/test/mjsunit/compiler/array-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/array-constructor.js
@@ -8,6 +8,7 @@
(() => {
function foo(x) { return Array(!!x); }
+ %PrepareFunctionForOptimization(foo);
assertEquals([true], foo(true));
assertEquals([false], foo(false));
%OptimizeFunctionOnNextCall(foo);
@@ -19,6 +20,7 @@
(() => {
function foo(x) { return new Array(!!x); }
+ %PrepareFunctionForOptimization(foo);
assertEquals([true], foo(true));
assertEquals([false], foo(false));
%OptimizeFunctionOnNextCall(foo);
@@ -30,6 +32,7 @@
(() => {
function foo(x) { return Array("" + x); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(["a"], foo("a"));
assertEquals(["b"], foo("b"));
%OptimizeFunctionOnNextCall(foo);
@@ -41,6 +44,7 @@
(() => {
function foo(x) { return new Array("" + x); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(["a"], foo("a"));
assertEquals(["b"], foo("b"));
%OptimizeFunctionOnNextCall(foo);
@@ -52,6 +56,7 @@
(() => {
function foo() { return Array(2); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo().length);
assertEquals(2, foo().length);
%OptimizeFunctionOnNextCall(foo);
@@ -62,6 +67,7 @@
(() => {
function foo() { return new Array(2); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo().length);
assertEquals(2, foo().length);
%OptimizeFunctionOnNextCall(foo);
@@ -72,6 +78,7 @@
(() => {
function foo(x, y, z) { return Array(x, y, z); }
+ %PrepareFunctionForOptimization(foo);
assertEquals([1, 2, 3], foo(1, 2, 3));
assertEquals([1, 2, 3], foo(1, 2, 3));
%OptimizeFunctionOnNextCall(foo);
@@ -82,6 +89,7 @@
(() => {
function foo(x, y, z) { return new Array(x, y, z); }
+ %PrepareFunctionForOptimization(foo);
assertEquals([1, 2, 3], foo(1, 2, 3));
assertEquals([1, 2, 3], foo(1, 2, 3));
%OptimizeFunctionOnNextCall(foo);
@@ -92,6 +100,7 @@
(() => {
function foo(x) { try { return new Array(x) } catch (e) { return e } }
+ %PrepareFunctionForOptimization(foo);
assertEquals([], foo(0));
assertEquals([], foo(0));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-every.js b/deps/v8/test/mjsunit/compiler/array-every.js
index 5064bd557d..a3f609e4df 100644
--- a/deps/v8/test/mjsunit/compiler/array-every.js
+++ b/deps/v8/test/mjsunit/compiler/array-every.js
@@ -10,6 +10,7 @@
return a.every(x => x === o.x);
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo([3, 3, 3], {x:3}));
assertFalse(foo([3, 3, 2], {x:3}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-find.js b/deps/v8/test/mjsunit/compiler/array-find.js
index 419a758ac7..94fb2cb5df 100644
--- a/deps/v8/test/mjsunit/compiler/array-find.js
+++ b/deps/v8/test/mjsunit/compiler/array-find.js
@@ -10,6 +10,7 @@
return a.find(x => x === o.x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2, 3], {x:3}));
assertEquals(undefined, foo([0, 1, 2], {x:3}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-findindex.js b/deps/v8/test/mjsunit/compiler/array-findindex.js
index 583f553ce4..881078f24b 100644
--- a/deps/v8/test/mjsunit/compiler/array-findindex.js
+++ b/deps/v8/test/mjsunit/compiler/array-findindex.js
@@ -10,6 +10,7 @@
return a.findIndex(x => x === o.x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo([1, 2, 3], {x:3}));
assertEquals(-1, foo([0, 1, 2], {x:3}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-is-array.js b/deps/v8/test/mjsunit/compiler/array-is-array.js
index 37c916ddac..f41120b029 100644
--- a/deps/v8/test/mjsunit/compiler/array-is-array.js
+++ b/deps/v8/test/mjsunit/compiler/array-is-array.js
@@ -11,6 +11,7 @@
return Array.isArray([]);
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
return Array.isArray(new Proxy([], {}));
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +39,7 @@
return Array.isArray({});
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -50,6 +53,7 @@
return Array.isArray(new Proxy({}, {}));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -63,6 +67,7 @@
return Array.isArray(x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo({}));
assertFalse(foo(new Proxy({}, {})));
assertTrue(foo([]));
@@ -97,6 +102,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo([]), TypeError);
assertInstanceof(foo({}), TypeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-length.js b/deps/v8/test/mjsunit/compiler/array-length.js
index 462a1e7739..10be42a439 100644
--- a/deps/v8/test/mjsunit/compiler/array-length.js
+++ b/deps/v8/test/mjsunit/compiler/array-length.js
@@ -38,11 +38,15 @@ function Test(a0, a2, a5) {
var a0 = [];
var a2 = [1,2];
var a5 = [1,2,3,4,5];
+%PrepareFunctionForOptimization(ArrayLength);
for (var i = 0; i < 5; i++) Test(a0, a2, a5);
%OptimizeFunctionOnNextCall(ArrayLength);
+Test(a0, a2, a5);
+%PrepareFunctionForOptimization(Test);
%OptimizeFunctionOnNextCall(Test);
Test(a0, a2, a5);
assertEquals("undefined", typeof(ArrayLength(0)));
+%PrepareFunctionForOptimization(Test);
for (var i = 0; i < 5; i++) Test(a0, a2, a5);
%OptimizeFunctionOnNextCall(Test);
Test(a0, a2, a5);
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
index c26aeda7dc..8f386d06ef 100644
--- a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -20,6 +20,7 @@ function runTest(f, message, mkICTraining, deoptArg) {
let t2 = t();
let t3 = t();
+ %PrepareFunctionForOptimization(f);
for (let a of t1) {
f(a.arr, () => a.el);
}
@@ -45,6 +46,7 @@ function runTest(f, message, mkICTraining, deoptArg) {
message_optimized = message + " should have been unoptimized"
f(a1.arr, () => a1.el);
assertUnoptimized(f, undefined, message_unoptimized);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
// No speculation should protect against further deopts.
f(a2.arr, () => a2.el);
diff --git a/deps/v8/test/mjsunit/compiler/array-push-1.js b/deps/v8/test/mjsunit/compiler/array-push-1.js
index 58afd6ffe6..3851745e6f 100644
--- a/deps/v8/test/mjsunit/compiler/array-push-1.js
+++ b/deps/v8/test/mjsunit/compiler/array-push-1.js
@@ -10,6 +10,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(0, push0([]));
assertEquals(1, push0([1]));
%OptimizeFunctionOnNextCall(push0);
@@ -19,6 +20,7 @@
return a.push(1);
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(1, push1([]));
assertEquals(2, push1([1]));
%OptimizeFunctionOnNextCall(push1);
@@ -28,6 +30,7 @@
return a.push(1, 2);
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(2, push2([]));
assertEquals(3, push2([1]));
%OptimizeFunctionOnNextCall(push2);
@@ -37,6 +40,7 @@
return a.push(1, 2, 3);
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(3, push3([]));
assertEquals(4, push3([1]));
%OptimizeFunctionOnNextCall(push3);
@@ -49,6 +53,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(1, push0(new Array(1)));
assertEquals(2, push0(new Array(2)));
%OptimizeFunctionOnNextCall(push0);
@@ -58,6 +63,7 @@
return a.push(1);
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(2, push1(new Array(1)));
assertEquals(3, push1(new Array(2)));
%OptimizeFunctionOnNextCall(push1);
@@ -67,6 +73,7 @@
return a.push(1, 2);
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(3, push2(new Array(1)));
assertEquals(4, push2(new Array(2)));
%OptimizeFunctionOnNextCall(push2);
@@ -76,6 +83,7 @@
return a.push(1, 2, 3);
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(4, push3(new Array(1)));
assertEquals(5, push3(new Array(2)));
%OptimizeFunctionOnNextCall(push3);
@@ -88,6 +96,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(1, push0([1.1]));
assertEquals(2, push0([1.1, 2.2]));
%OptimizeFunctionOnNextCall(push0);
@@ -97,6 +106,7 @@
return a.push(1.1);
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(2, push1([1.1]));
assertEquals(3, push1([1.1, 2.2]));
%OptimizeFunctionOnNextCall(push1);
@@ -106,6 +116,7 @@
return a.push(1.1, 2.2);
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(3, push2([1.1]));
assertEquals(4, push2([1.1, 2.2]));
%OptimizeFunctionOnNextCall(push2);
@@ -115,6 +126,7 @@
return a.push(1.1, 2.2, 3.3);
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(4, push3([1.1]));
assertEquals(5, push3([1.1, 2.2]));
%OptimizeFunctionOnNextCall(push3);
@@ -127,6 +139,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(2, push0([, 1.1]));
assertEquals(3, push0([, 1.1, 2.2]));
%OptimizeFunctionOnNextCall(push0);
@@ -136,6 +149,7 @@
return a.push(1.1);
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(3, push1([, 1.1]));
assertEquals(4, push1([, 1.1, 2.2]));
%OptimizeFunctionOnNextCall(push1);
@@ -145,6 +159,7 @@
return a.push(1.1, 2.2);
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(4, push2([, 1.1]));
assertEquals(5, push2([, 1.1, 2.2]));
%OptimizeFunctionOnNextCall(push2);
@@ -154,6 +169,7 @@
return a.push(1.1, 2.2, 3.3);
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(5, push3([, 1.1]));
assertEquals(6, push3([, 1.1, 2.2]));
%OptimizeFunctionOnNextCall(push3);
@@ -166,6 +182,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(1, push0(['1']));
assertEquals(2, push0(['1', '2']));
%OptimizeFunctionOnNextCall(push0);
@@ -175,6 +192,7 @@
return a.push('1');
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(2, push1(['1']));
assertEquals(3, push1(['1', '2']));
%OptimizeFunctionOnNextCall(push1);
@@ -184,6 +202,7 @@
return a.push('1', '2');
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(3, push2(['1']));
assertEquals(4, push2(['1', '2']));
%OptimizeFunctionOnNextCall(push2);
@@ -193,6 +212,7 @@
return a.push('1', '2', '3');
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(4, push3(['1']));
assertEquals(5, push3(['1', '2']));
%OptimizeFunctionOnNextCall(push3);
@@ -205,6 +225,7 @@
return a.push();
}
+ %PrepareFunctionForOptimization(push0);
assertEquals(2, push0([, '1']));
assertEquals(3, push0([, '1', '2']));
%OptimizeFunctionOnNextCall(push0);
@@ -214,6 +235,7 @@
return a.push('1');
}
+ %PrepareFunctionForOptimization(push1);
assertEquals(3, push1([, '1']));
assertEquals(4, push1([, '1', '2']));
%OptimizeFunctionOnNextCall(push1);
@@ -223,6 +245,7 @@
return a.push('1', '2');
}
+ %PrepareFunctionForOptimization(push2);
assertEquals(4, push2([, '1']));
assertEquals(5, push2([, '1', '2']));
%OptimizeFunctionOnNextCall(push2);
@@ -232,6 +255,7 @@
return a.push('1', '2', '3');
}
+ %PrepareFunctionForOptimization(push3);
assertEquals(5, push3([, '1']));
assertEquals(6, push3([, '1', '2']));
%OptimizeFunctionOnNextCall(push3);
diff --git a/deps/v8/test/mjsunit/compiler/array-push-2.js b/deps/v8/test/mjsunit/compiler/array-push-2.js
index cb18d71d63..a99e1d065e 100644
--- a/deps/v8/test/mjsunit/compiler/array-push-2.js
+++ b/deps/v8/test/mjsunit/compiler/array-push-2.js
@@ -8,6 +8,7 @@
(function() {
const a = [];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
@@ -17,6 +18,7 @@
(function() {
const a = [];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
@@ -28,6 +30,7 @@
(function() {
const a = [];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +40,7 @@
(function() {
const a = [];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
@@ -48,6 +52,7 @@
(function() {
const a = [0.5];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
@@ -57,6 +62,7 @@
(function() {
const a = [0.5];
const foo = (x, y) => a.push(x, y);
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(3, 4);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-push-3.js b/deps/v8/test/mjsunit/compiler/array-push-3.js
index f648eb9ed9..3fa59d11ff 100644
--- a/deps/v8/test/mjsunit/compiler/array-push-3.js
+++ b/deps/v8/test/mjsunit/compiler/array-push-3.js
@@ -9,6 +9,7 @@
const a = [];
const bar = x => { a.push(x); return x; };
const foo = x => a.push(bar(x), bar(x));
+ %PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
@@ -22,6 +23,7 @@
const a = [];
const bar = x => { a.push(y); return x; }
const foo = x => a.push(bar(x), bar(x));
+ %PrepareFunctionForOptimization(foo);
foo(1);
y = 2;
foo(2);
@@ -38,6 +40,7 @@
const a = [0.5];
const bar = x => { a.push(y); return x; }
const foo = x => a.push(bar(x), bar(x));
+ %PrepareFunctionForOptimization(foo);
foo(1);
y = 2;
foo(2);
diff --git a/deps/v8/test/mjsunit/compiler/array-slice-clone.js b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
index 610fa17c1a..de5d29f541 100644
--- a/deps/v8/test/mjsunit/compiler/array-slice-clone.js
+++ b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
@@ -20,12 +20,16 @@
return arr.slice(0);
}
+ %PrepareFunctionForOptimization(slice0);
+
assertEquals(arr, slice());
assertFalse(arr === slice());
assertEquals(slice(), slice0());
assertEquals(slice0(), slice());
%OptimizeFunctionOnNextCall(slice0);
+ assertEquals(slice(), slice0());
+ %PrepareFunctionForOptimization(slice);
%OptimizeFunctionOnNextCall(slice);
assertEquals(slice(), slice0());
@@ -41,6 +45,8 @@
return arr.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(arr, slice());
assertEquals(slice(), arr);
@@ -60,6 +66,8 @@
return arr.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(arr, slice());
assertEquals(slice(), arr);
@@ -71,6 +79,7 @@
arr.push(7.2);
slice();
+ %PrepareFunctionForOptimization(slice);
%OptimizeFunctionOnNextCall(slice);
// Trigger opt again
slice();
@@ -92,6 +101,8 @@
class MyArray extends Array {};
array.constructor = MyArray;
+ %PrepareFunctionForOptimization(slice);
+
slice(); slice();
%OptimizeFunctionOnNextCall(slice);
@@ -106,6 +117,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
slice(); slice();
%OptimizeFunctionOnNextCall(slice);
@@ -128,6 +141,8 @@
return arr.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
slice(); slice();
arr.foo = 6.2;
@@ -155,6 +170,8 @@
return arr.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
slice(iarr); slice(darr);
slice(iarr); slice(darr);
@@ -182,6 +199,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -205,6 +224,8 @@
return x.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
slice(); slice();
%OptimizeFunctionOnNextCall(slice);
@@ -221,6 +242,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -238,6 +261,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -259,6 +284,8 @@
return arr.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
// make array's map is_prototype_map()
var x = {__proto__ : array};
@@ -284,6 +311,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -305,6 +334,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -328,6 +359,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
@@ -348,6 +381,8 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
+
assertEquals(slice(),array);
slice();
diff --git a/deps/v8/test/mjsunit/compiler/array-some.js b/deps/v8/test/mjsunit/compiler/array-some.js
index 411a5881de..c667211490 100644
--- a/deps/v8/test/mjsunit/compiler/array-some.js
+++ b/deps/v8/test/mjsunit/compiler/array-some.js
@@ -10,6 +10,7 @@
return a.some(x => x === o.x);
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo([1, 2, 3], {x:3}));
assertFalse(foo([0, 1, 2], {x:3}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/array-species.js b/deps/v8/test/mjsunit/compiler/array-species.js
index d1e8a1d180..37c14b588a 100644
--- a/deps/v8/test/mjsunit/compiler/array-species.js
+++ b/deps/v8/test/mjsunit/compiler/array-species.js
@@ -12,6 +12,8 @@ function f() {
let y;
+%PrepareFunctionForOptimization(f);
+
y = f();
assertFalse(y instanceof Foo);
assertInstanceof(y, Array);
diff --git a/deps/v8/test/mjsunit/compiler/array-subclass.js b/deps/v8/test/mjsunit/compiler/array-subclass.js
index d20b669661..b85551c362 100644
--- a/deps/v8/test/mjsunit/compiler/array-subclass.js
+++ b/deps/v8/test/mjsunit/compiler/array-subclass.js
@@ -10,6 +10,7 @@
function foo() { return new A; }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(0, foo().length);
assertInstanceof(foo(), A);
@@ -26,6 +27,7 @@
function foo() { return new A(L); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(L, foo().length);
assertInstanceof(foo(), A);
@@ -42,6 +44,7 @@
function foo() { return new A(L); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(L, foo().length);
assertInstanceof(foo(), A);
@@ -57,6 +60,7 @@
function foo() { return new A(true); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertEquals(true, foo()[0]);
@@ -75,6 +79,7 @@
function foo() { return new A(""); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertEquals("", foo()[0]);
@@ -94,6 +99,7 @@
function foo() { return new A(O); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertSame(O, foo()[0]);
@@ -112,6 +118,7 @@
function foo() { return new A(1, 2, 3); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(3, foo().length);
assertEquals(1, foo()[0]);
@@ -131,6 +138,7 @@
function foo() { return new A(1.1, 2.2, 3.3); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(3, foo().length);
assertEquals(1.1, foo()[0]);
@@ -150,6 +158,7 @@
function foo() { return new A("a", "b", "c", "d"); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(4, foo().length);
assertEquals("a", foo()[0]);
@@ -176,6 +185,7 @@
function foo() { return new A; }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(0, foo().length);
assertEquals(1, foo().bar);
@@ -200,6 +210,7 @@
function foo() { return new A(L); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(L, foo().length);
assertEquals(1, foo().bar);
@@ -224,6 +235,7 @@
function foo() { return new A(L); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(L, foo().length);
assertEquals(1, foo().bar);
@@ -247,6 +259,7 @@
function foo() { return new A(true); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertEquals(true, foo()[0]);
@@ -273,6 +286,7 @@
function foo() { return new A(""); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertEquals("", foo()[0]);
@@ -300,6 +314,7 @@
function foo() { return new A(O); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(1, foo().length);
assertSame(O, foo()[0]);
@@ -326,6 +341,7 @@
function foo() { return new A(1, 2, 3); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(3, foo().length);
assertEquals(1, foo()[0]);
@@ -352,6 +368,7 @@
function foo() { return new A(1.1, 2.2, 3.3); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(3, foo().length);
assertEquals(1.1, foo()[0]);
@@ -378,6 +395,7 @@
function foo() { return new A("a", "b", "c", "d"); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertEquals(4, foo().length);
assertEquals("a", foo()[0]);
diff --git a/deps/v8/test/mjsunit/compiler/assignment-deopt.js b/deps/v8/test/mjsunit/compiler/assignment-deopt.js
index 2b00625417..175a344f99 100644
--- a/deps/v8/test/mjsunit/compiler/assignment-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/assignment-deopt.js
@@ -42,6 +42,8 @@ assertEquals(2.1, o.x);
// Test deopt with count operation on named property.
function assign2(p) { p.x += 1 }
+%PrepareFunctionForOptimization(assign2);
+
o.x = "42";
assign2(o);
assertEquals("421", o.x);
@@ -60,6 +62,8 @@ assertEquals(max_smi + 10, o.x);
// Test deopt with count operation on keyed property.
function assign3(a, b) { a[b] += 1; }
+%PrepareFunctionForOptimization(assign3);
+
o = ["42"];
assign3(o, 0);
assertEquals("421", o[0]);
@@ -83,12 +87,14 @@ o[0] = 0;
for(var i = 0; i < 5; i++) {
assign3(o, 0);
}
+%PrepareFunctionForOptimization(assign3);
%OptimizeFunctionOnNextCall(assign3);
assign3(o, 0);
assign3(o, 1);
// Test bailout with count operation in a value context.
function assign5(x,y) { return (x += 1) + y; }
+%PrepareFunctionForOptimization(assign5);
for (var i = 0; i < 5; ++i) assertEquals(4, assign5(2, 1));
%OptimizeFunctionOnNextCall(assign5);
assertEquals(4, assign5(2, 1));
@@ -97,6 +103,7 @@ assertEquals(4.1, assign5(2, 1.1));
assertEquals(4.1, assign5(2.1, 1));
function assign7(o,y) { return (o.x += 1) + y; }
+%PrepareFunctionForOptimization(assign7);
o = {x:0};
for (var i = 0; i < 5; ++i) {
o.x = 42;
@@ -112,6 +119,7 @@ o.x = 42.1;
assertEquals(44.1, assign7(o, 1));
function assign9(o,y) { return (o[0] += 1) + y; }
+%PrepareFunctionForOptimization(assign9);
q = [0];
for (var i = 0; i < 5; ++i) {
q[0] = 42;
@@ -128,6 +136,7 @@ assertEquals(44.1, assign9(q, 1));
// Test deopt because of a failed map check on the load.
function assign10(p) { return p.x += 1 }
+%PrepareFunctionForOptimization(assign10);
var g1 = {x:0};
var g2 = {y:0, x:42};
for (var i = 0; i < 5; ++i) {
@@ -148,6 +157,7 @@ assertEquals(43, g2.x);
o = {x:0};
var g3 = { valueOf: function() { o.y = "bar"; return 42; }};
function assign11(p) { return p.x += 1; }
+%PrepareFunctionForOptimization(assign11);
for (var i = 0; i < 5; i++) {
o.x = "a";
@@ -165,6 +175,7 @@ assertEquals("bar", o.y);
o = [0];
var g4 = { valueOf: function() { o.y = "bar"; return 42; }};
function assign12(p) { return p[0] += 1; }
+%PrepareFunctionForOptimization(assign12);
for (var i = 0; i < 5; i++) {
o[0] = "a";
diff --git a/deps/v8/test/mjsunit/compiler/boolean-protototype.js b/deps/v8/test/mjsunit/compiler/boolean-protototype.js
index 5e940d75ae..cc84eb89ea 100644
--- a/deps/v8/test/mjsunit/compiler/boolean-protototype.js
+++ b/deps/v8/test/mjsunit/compiler/boolean-protototype.js
@@ -7,6 +7,7 @@
function test1(s) {
return s.toString;
}
+%PrepareFunctionForOptimization(test1);
assertSame(test1(false), Boolean.prototype.toString);
assertSame(test1(true), Boolean.prototype.toString);
%OptimizeFunctionOnNextCall(test1);
@@ -16,6 +17,7 @@ assertSame(test1(true), Boolean.prototype.toString);
function test2(s) {
return s.valueOf;
}
+%PrepareFunctionForOptimization(test2);
assertSame(test2(false), Boolean.prototype.valueOf);
assertSame(test2(true), Boolean.prototype.valueOf);
%OptimizeFunctionOnNextCall(test2);
@@ -26,6 +28,7 @@ Boolean.prototype.foo = 42;
function test3(s) {
return s["foo"];
}
+%PrepareFunctionForOptimization(test3);
assertEquals(test3(false), 42);
assertEquals(test3(true), 42);
%OptimizeFunctionOnNextCall(test3);
@@ -36,6 +39,7 @@ Boolean.prototype.bar = function bar() { "use strict"; return this; }
function test4(s) {
return s.bar();
}
+%PrepareFunctionForOptimization(test4);
assertEquals(test4(false), false);
assertEquals(test4(true), true);
%OptimizeFunctionOnNextCall(test4);
diff --git a/deps/v8/test/mjsunit/compiler/capture-context.js b/deps/v8/test/mjsunit/compiler/capture-context.js
index dae6c42f32..615f12efb3 100644
--- a/deps/v8/test/mjsunit/compiler/capture-context.js
+++ b/deps/v8/test/mjsunit/compiler/capture-context.js
@@ -12,5 +12,6 @@ var foo = (function() {
var baz = (function (s) { return foo(s) });
+%PrepareFunctionForOptimization(baz);
%OptimizeFunctionOnNextCall(baz);
assertEquals(42 + 12, baz(12));
diff --git a/deps/v8/test/mjsunit/compiler/compare-map-elim.js b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
index 288d4811a6..89d9cd8926 100644
--- a/deps/v8/test/mjsunit/compiler/compare-map-elim.js
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
@@ -38,6 +38,8 @@ function x(z) {
return z.f();
}
+%PrepareFunctionForOptimization(x);
+
x(a);
x(b);
x(a);
diff --git a/deps/v8/test/mjsunit/compiler/compare-map-elim2.js b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
index 0c0540ccab..cbe841b987 100644
--- a/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
@@ -33,6 +33,8 @@ function test_empty() {
return { value: o.value };
}
+ %PrepareFunctionForOptimization(foo);
+
function Base() {
this.v_ = 5;
}
@@ -63,6 +65,8 @@ function test_narrow1() {
return { value: o.value };
}
+ %PrepareFunctionForOptimization(foo);
+
function Base() {
this.v_ = 5;
}
@@ -97,6 +101,8 @@ function test_narrow2() {
return { value: o.value(flag) };
}
+ %PrepareFunctionForOptimization(foo);
+
function Base() {
this.v_ = 5;
}
diff --git a/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
index 4492df45c3..faace8307a 100644
--- a/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
+++ b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
@@ -62,6 +62,8 @@ function F4(a, b) {
%NeverOptimizeFunction(test);
function test(f, a, b) {
+ %PrepareFunctionForOptimization(f);
+
f(a, a);
f(a, b);
f(b, a);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 5d3c73b4e6..eca898c966 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -47,6 +47,7 @@ function add_field(obj) {
obj.c = 0;
obj.c = 3;
}
+%PrepareFunctionForOptimization(add_field);
var obj1 = new_object();
var obj2 = new_object();
add_field(obj1);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
index 0a016ac3ca..df8d629f32 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
@@ -36,6 +36,8 @@ if (!%IsConcurrentRecompilationSupported()) {
function f(foo) { return foo.bar(); }
+%PrepareFunctionForOptimization(f);
+
var o = {};
o.__proto__ = { __proto__: { bar: function() { return 1; } } };
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
index 1ab022611c..9db0374c6d 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
@@ -9,6 +9,7 @@
(function() {
const a = [1, 2, 3];
const foo = () => a[0];
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
(function() {
const a = [1, 2, 3];
const foo = () => a[0];
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js b/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js
index e9c6d916ce..e1a66a6694 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-tostring.js
@@ -7,6 +7,7 @@
// Check that constant-folding of ToString operations works properly for NaN.
(function() {
const foo = () => `${NaN}`;
+ %PrepareFunctionForOptimization(foo);
assertEquals("NaN", foo());
assertEquals("NaN", foo());
%OptimizeFunctionOnNextCall(foo);
@@ -16,6 +17,7 @@
// Check that constant-folding of ToString operations works properly for 0/-0.
(function() {
const foo = x => `${x ? 0 : -0}`;
+ %PrepareFunctionForOptimization(foo);
assertEquals("0", foo(true));
assertEquals("0", foo(false));
assertEquals("0", foo(true));
diff --git a/deps/v8/test/mjsunit/compiler/constructor-inlining.js b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
index 05b5762a53..1064997a8b 100644
--- a/deps/v8/test/mjsunit/compiler/constructor-inlining.js
+++ b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
@@ -77,17 +77,22 @@ function testConstructorInlining(){
}
assertEquals(a, new Derived(true, 5, a));
+ %PrepareFunctionForOptimization(Derived);
%OptimizeFunctionOnNextCall(Derived);
assertEquals(b, new DerivedDeoptCreate(true, a, b));
+ %PrepareFunctionForOptimization(Derived);
%OptimizeFunctionOnNextCall(Derived);
assertEquals(a, new DerivedDeoptCreate(true, a, undefined));
+ %PrepareFunctionForOptimization(Derived);
%OptimizeFunctionOnNextCall(Derived);
assertEquals(5, new DerivedDeoptCreate(false, 5, 7).x);
+ %PrepareFunctionForOptimization(Derived);
%OptimizeFunctionOnNextCall(Derived);
assertEquals(7, new DerivedDeoptCreate(false, 5, 7).y);
}
testConstructorInlining();
+%PrepareFunctionForOptimization(testConstructorInlining);
%OptimizeFunctionOnNextCall(testConstructorInlining);
testConstructorInlining();
@@ -95,6 +100,7 @@ var last = undefined;
for(var i = 0; deopt_at < 0; ++i) {
deopt_at = i;
counter = 0;
+ %PrepareFunctionForOptimization(testConstructorInlining);
%OptimizeFunctionOnNextCall(testConstructorInlining);
testConstructorInlining();
if (last !== undefined) {
diff --git a/deps/v8/test/mjsunit/compiler/context-sensitivity.js b/deps/v8/test/mjsunit/compiler/context-sensitivity.js
index 1f0f1f274a..e404628482 100644
--- a/deps/v8/test/mjsunit/compiler/context-sensitivity.js
+++ b/deps/v8/test/mjsunit/compiler/context-sensitivity.js
@@ -17,6 +17,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y + x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(object1));
assertThrows(() => foo(thrower));
@@ -36,6 +37,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y - x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -55,6 +57,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y * x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(object1));
assertThrows(() => foo(thrower));
@@ -74,6 +77,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y / x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(object1));
assertThrows(() => foo(thrower));
@@ -93,6 +97,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y % x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -112,6 +117,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y ** x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(object1));
assertThrows(() => foo(thrower));
@@ -131,6 +137,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y | x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(object1));
assertThrows(() => foo(thrower));
@@ -150,6 +157,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y & x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(object1));
assertThrows(() => foo(thrower));
@@ -169,6 +177,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y ^ x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -188,6 +197,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y << x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1));
assertEquals(2, foo(object1));
assertThrows(() => foo(thrower));
@@ -207,6 +217,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y >> x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -226,6 +237,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y >>> x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -245,6 +257,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y == x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertTrue(foo(object1));
assertThrows(() => foo(thrower));
@@ -264,6 +277,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y < x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertFalse(foo(object1));
assertThrows(() => foo(thrower));
@@ -283,6 +297,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => x > y);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertFalse(foo(object1));
assertThrows(() => foo(thrower));
@@ -302,6 +317,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => y <= x);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertTrue(foo(object1));
assertThrows(() => foo(thrower));
@@ -321,6 +337,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(y => x >= y);
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertTrue(foo(object1));
assertThrows(() => foo(thrower));
@@ -340,6 +357,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(o => o instanceof c);
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(Object));
assertFalse(foo(Array));
assertThrows(() => foo({[Symbol.hasInstance]() { throw new Error(); }}));
@@ -359,6 +377,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => ~x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(-1));
assertEquals(~1, foo(object1));
assertThrows(() => foo(thrower));
@@ -378,6 +397,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => -x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(-1));
assertEquals(-1, foo(object1));
assertThrows(() => foo(thrower));
@@ -397,6 +417,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => ++x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(object1));
assertThrows(() => foo(thrower));
@@ -416,6 +437,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => --x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(2));
assertEquals(0, foo(object1));
assertThrows(() => foo(thrower));
@@ -436,6 +458,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => arguments)[0];
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 2));
assertEquals(undefined, foo());
@@ -455,6 +478,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
return bar(() => args)[0];
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 2));
assertEquals(undefined, foo());
@@ -491,6 +515,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
});
}
+ %PrepareFunctionForOptimization(foo);
assertEquals('Some value', foo('Another value'));
assertEquals('Another value', actualValue);
assertEquals('Another value', foo('Some value'));
@@ -516,6 +541,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
});
}
+ %PrepareFunctionForOptimization(foo);
assertEquals('abc', foo({a: 1, b: 2, c: 3}));
assertEquals('ab', foo(Object.create({a: 1, b: 2})));
%OptimizeFunctionOnNextCall(foo);
@@ -542,6 +568,7 @@ const thrower = {[Symbol.toPrimitive]() { throw new Error(); }};
});
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(6, foo(1, 2, 3));
assertEquals("abc", foo("a", "b", "c"));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/count-deopt.js b/deps/v8/test/mjsunit/compiler/count-deopt.js
index 415dadc0cf..c0977caefa 100644
--- a/deps/v8/test/mjsunit/compiler/count-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/count-deopt.js
@@ -42,6 +42,8 @@ assertEquals(2.1, o.x);
// Test deopt with count operation on named property.
function inc2(p) { p.x++ }
+%PrepareFunctionForOptimization(inc2);
+
o.x = "42";
inc2(o);
assertEquals(43, o.x);
@@ -60,6 +62,8 @@ assertEquals(max_smi + 10, o.x);
// Test deopt with count operation on keyed property.
function inc3(a, b) { a[b]++; }
+%PrepareFunctionForOptimization(inc3);
+
o = ["42"];
inc3(o, 0);
assertEquals(43, o[0]);
@@ -83,18 +87,21 @@ o[0] = 0;
for(var i = 0; i < 5; i++) {
inc3(o, 0);
}
+%PrepareFunctionForOptimization(inc3);
%OptimizeFunctionOnNextCall(inc3);
inc3(o, 0);
inc3(o, 1);
// Test bailout with count operation in a value context.
function inc4(x,y) { return (x++) + y; }
+%PrepareFunctionForOptimization(inc4);
for (var i = 0; i < 5; ++i) assertEquals(3, inc4(2, 1));
%OptimizeFunctionOnNextCall(inc4);
inc4(2, 1);
assertEquals(3.1, inc4(2, 1.1));
function inc5(x,y) { return (++x) + y; }
+%PrepareFunctionForOptimization(inc5);
for (var i = 0; i < 5; ++i) assertEquals(4, inc5(2, 1));
%OptimizeFunctionOnNextCall(inc5);
assertEquals(4, inc5(2, 1));
@@ -102,6 +109,7 @@ assertEquals(4.1, inc5(2, 1.1));
assertEquals(4.1, inc5(2.1, 1));
function inc6(o,y) { return (o.x++) + y; }
+%PrepareFunctionForOptimization(inc6);
o = {x:0};
for (var i = 0; i < 5; ++i) {
o.x = 42;
@@ -116,6 +124,7 @@ o.x = 42.1;
assertEquals(43.1, inc6(o, 1));
function inc7(o,y) { return (++o.x) + y; }
+%PrepareFunctionForOptimization(inc7);
o = {x:0};
for (var i = 0; i < 5; ++i) {
o.x = 42;
@@ -130,6 +139,7 @@ o.x = 42.1;
assertEquals(44.1, inc7(o, 1));
function inc8(o,y) { return (o[0]++) + y; }
+%PrepareFunctionForOptimization(inc8);
var q = [0];
for (var i = 0; i < 5; ++i) {
q[0] = 42;
@@ -144,6 +154,7 @@ q[0] = 42.1;
assertEquals(43.1, inc8(q, 1));
function inc9(o,y) { return (++o[0]) + y; }
+%PrepareFunctionForOptimization(inc9);
q = [0];
for (var i = 0; i < 5; ++i) {
q[0] = 42;
@@ -159,6 +170,7 @@ assertEquals(44.1, inc9(q, 1));
// Test deopt because of a failed map check.
function inc10(p) { return p.x++ }
+%PrepareFunctionForOptimization(inc10);
var g1 = {x:0};
var g2 = {y:0, x:42}
for (var i = 0; i < 5; ++i) {
diff --git a/deps/v8/test/mjsunit/compiler/dataview-constant.js b/deps/v8/test/mjsunit/compiler/dataview-constant.js
index f5f0b5e955..5a93ca8560 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-constant.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-constant.js
@@ -16,6 +16,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(1));
assertEquals(43, foo(0));
@@ -37,6 +38,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(1));
assertEquals(43, foo(0));
@@ -58,6 +60,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(2));
assertEquals(43, foo(0));
@@ -79,6 +82,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(2));
assertEquals(43, foo(0));
@@ -100,6 +104,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(4));
assertEquals(43, foo(0));
@@ -121,6 +126,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(4));
assertEquals(43, foo(0));
@@ -142,6 +148,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(4));
assertEquals(43, foo(0));
@@ -163,6 +170,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(0));
assertEquals(24, foo(8));
assertEquals(43, foo(0));
diff --git a/deps/v8/test/mjsunit/compiler/dataview-deopt.js b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
index b19de30e5d..cf8132d3ee 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
@@ -16,6 +16,7 @@ function readUint8(offset) {
}
function warmupRead(f) {
+ %PrepareFunctionForOptimization(f);
f(0);
f(1);
%OptimizeFunctionOnNextCall(f);
@@ -40,6 +41,7 @@ function writeUint8(offset, value) {
}
function warmupWrite(f) {
+ %PrepareFunctionForOptimization(f);
f(0, 0);
f(0, 1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/dataview-get.js b/deps/v8/test/mjsunit/compiler/dataview-get.js
index 09094399df..6bfad2d832 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-get.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-get.js
@@ -54,6 +54,7 @@ function readFloat64(offset, little_endian) {
}
function warmup(f) {
+ %PrepareFunctionForOptimization(f);
f(0);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/dataview-neutered.js b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
index ef485c69db..b5fe3102c2 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-neutered.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
@@ -16,6 +16,7 @@
return dv.getInt8(0);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -38,6 +40,7 @@
return dv.getUint8(0);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -46,6 +49,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -60,6 +64,7 @@
return dv.getInt16(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -68,6 +73,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -82,6 +88,7 @@
return dv.getUint16(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -90,6 +97,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -104,6 +112,7 @@
return dv.getInt32(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -112,6 +121,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -126,6 +136,7 @@
return dv.getUint32(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -134,6 +145,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -148,6 +160,7 @@
return dv.getFloat32(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -156,6 +169,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -170,6 +184,7 @@
return dv.getFloat64(0, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(dv));
assertEquals(0, foo(dv));
%OptimizeFunctionOnNextCall(foo);
@@ -178,6 +193,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv), TypeError);
assertOptimized(foo);
@@ -192,6 +208,7 @@
return dv.setInt8(0, x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getInt8(0));
assertEquals(undefined, foo(dv, 2));
@@ -202,6 +219,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -216,6 +234,7 @@
return dv.setUint8(0, x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getUint8(0));
assertEquals(undefined, foo(dv, 2));
@@ -226,6 +245,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -240,6 +260,7 @@
return dv.setInt16(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getInt16(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -250,6 +271,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -264,6 +286,7 @@
return dv.setUint16(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getUint16(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -274,6 +297,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -288,6 +312,7 @@
return dv.setInt32(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getInt32(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -298,6 +323,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -312,6 +338,7 @@
return dv.setUint32(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getUint32(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -322,6 +349,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -336,6 +364,7 @@
return dv.setFloat32(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getFloat32(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -346,6 +375,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
@@ -360,6 +390,7 @@
return dv.setFloat64(0, x, true);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(dv, 1));
assertEquals(1, dv.getFloat64(0, true));
assertEquals(undefined, foo(dv, 2));
@@ -370,6 +401,7 @@
%ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(() => foo(dv, 5), TypeError);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js b/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js
index 0420660c83..7d05ea4ed8 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-nonconstant.js
@@ -16,6 +16,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 1));
assertEquals(43, foo(dv, 0));
@@ -37,6 +38,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 1));
assertEquals(43, foo(dv, 0));
@@ -58,6 +60,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 2));
assertEquals(43, foo(dv, 0));
@@ -79,6 +82,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 2));
assertEquals(43, foo(dv, 0));
@@ -100,6 +104,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 4));
assertEquals(43, foo(dv, 0));
@@ -121,6 +126,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 4));
assertEquals(43, foo(dv, 0));
@@ -142,6 +148,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 4));
assertEquals(43, foo(dv, 0));
@@ -163,6 +170,7 @@
return x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo(dv, 0));
assertEquals(24, foo(dv, 8));
assertEquals(43, foo(dv, 0));
diff --git a/deps/v8/test/mjsunit/compiler/dataview-set.js b/deps/v8/test/mjsunit/compiler/dataview-set.js
index 4759597881..8cd3bf12eb 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-set.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-set.js
@@ -44,6 +44,7 @@ function writeFloat64(offset, value, little_endian) {
}
function warmup(f) {
+ %PrepareFunctionForOptimization(f);
f(0, 0);
f(0, 1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code6.js b/deps/v8/test/mjsunit/compiler/dead-code6.js
index ec9b8433dd..d36818fac0 100644
--- a/deps/v8/test/mjsunit/compiler/dead-code6.js
+++ b/deps/v8/test/mjsunit/compiler/dead-code6.js
@@ -57,16 +57,19 @@ function dead3(a, b) {
return a;
}
+%PrepareFunctionForOptimization(dead1);
assertTrue(dead1(33, 32) == 33);
assertTrue(dead1(33, 32) == 33);
%OptimizeFunctionOnNextCall(dead1);
assertTrue(dead1(33, 32) == 33);
+%PrepareFunctionForOptimization(dead2);
assertTrue(dead2(34, 11) == 34);
assertTrue(dead2(34, 11) == 34);
%OptimizeFunctionOnNextCall(dead2);
assertTrue(dead2(34, 11) == 34);
+%PrepareFunctionForOptimization(dead3);
assertTrue(dead3(35, 12) == 35);
assertTrue(dead3(35, 12) == 35);
%OptimizeFunctionOnNextCall(dead3);
diff --git a/deps/v8/test/mjsunit/compiler/dead-loops-neg.js b/deps/v8/test/mjsunit/compiler/dead-loops-neg.js
index dbf500b48e..67acd90fa8 100644
--- a/deps/v8/test/mjsunit/compiler/dead-loops-neg.js
+++ b/deps/v8/test/mjsunit/compiler/dead-loops-neg.js
@@ -83,6 +83,7 @@ var params_loops = [loop6, loop7, loop8];
for (var i = 0; i < no_params_loops.length; i++) {
var f = no_params_loops[i];
+ %PrepareFunctionForOptimization(f);
f();
f();
f();
@@ -92,6 +93,7 @@ for (var i = 0; i < no_params_loops.length; i++) {
for (var i = 0; i < params_loops.length; i++) {
var f = params_loops[i];
+ %PrepareFunctionForOptimization(f);
f(3);
f(7);
f(11);
diff --git a/deps/v8/test/mjsunit/compiler/dead-loops.js b/deps/v8/test/mjsunit/compiler/dead-loops.js
index 2301b129dd..878f90841a 100644
--- a/deps/v8/test/mjsunit/compiler/dead-loops.js
+++ b/deps/v8/test/mjsunit/compiler/dead-loops.js
@@ -80,6 +80,7 @@ var loops = [loop1, loop2, loop3, loop4, loop5, loop6, loop7, loop8];
for (var i = 0; i < loops.length; i++) {
var f = loops[i];
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/dead-string-add-warm.js b/deps/v8/test/mjsunit/compiler/dead-string-add-warm.js
index c211ebddc5..4abe641b90 100644
--- a/deps/v8/test/mjsunit/compiler/dead-string-add-warm.js
+++ b/deps/v8/test/mjsunit/compiler/dead-string-add-warm.js
@@ -68,9 +68,14 @@ function run() {
assertEquals("1", dead3("true", 0));
}
+%PrepareFunctionForOptimization(dead1);
run();
run();
%OptimizeFunctionOnNextCall(dead1);
+run();
+%PrepareFunctionForOptimization(dead2);
%OptimizeFunctionOnNextCall(dead2);
+run();
+%PrepareFunctionForOptimization(dead3);
%OptimizeFunctionOnNextCall(dead3);
run();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors1.js b/deps/v8/test/mjsunit/compiler/deopt-accessors1.js
index 3589258656..a1cbb88210 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors1.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors1.js
@@ -21,6 +21,7 @@ function foo(o) {
return o.x++;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(o));
assertEquals(2, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors2.js b/deps/v8/test/mjsunit/compiler/deopt-accessors2.js
index 74d41397bf..580d2872bb 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors2.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors2.js
@@ -21,6 +21,7 @@ function foo(o) {
return ++o.x;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(2, foo(o));
assertEquals(3, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors3.js b/deps/v8/test/mjsunit/compiler/deopt-accessors3.js
index 035cf2b359..33f7607400 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors3.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors3.js
@@ -22,6 +22,7 @@ function foo(o) {
return o[x]++;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(o));
assertEquals(2, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors4.js b/deps/v8/test/mjsunit/compiler/deopt-accessors4.js
index 5a8453f237..2c2b98af4b 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors4.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors4.js
@@ -22,6 +22,7 @@ function foo(o) {
return ++o[x];
}
+%PrepareFunctionForOptimization(foo);
assertEquals(2, foo(o));
assertEquals(3, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors5.js b/deps/v8/test/mjsunit/compiler/deopt-accessors5.js
index bf71585e25..2334c9bdc0 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors5.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors5.js
@@ -17,6 +17,7 @@ function test() {
assertEquals(1, o.q = 1);
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors6.js b/deps/v8/test/mjsunit/compiler/deopt-accessors6.js
index 784123ae6c..059402e8ed 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors6.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors6.js
@@ -18,6 +18,7 @@ function test() {
assertEquals(1, o[q] = 1);
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors7.js b/deps/v8/test/mjsunit/compiler/deopt-accessors7.js
index 8c7d7a1e3c..a40c9a5156 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-accessors7.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors7.js
@@ -21,6 +21,7 @@ function foo(o, x) {
return bar(1, (o[x], 2), 3);
}
+%PrepareFunctionForOptimization(foo);
assertEquals(4, foo(o, "v"));
assertEquals(4, foo(o, "v"));
assertEquals(4, foo(o, "x"));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-args.js b/deps/v8/test/mjsunit/compiler/deopt-args.js
index 17c397c152..867346515e 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-args.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-args.js
@@ -35,6 +35,7 @@ function f(a,b,c) {
return 42;
}
+%PrepareFunctionForOptimization(g);
var object = { };
object.f = f;
for (var i = 0; i < 5; i++) {
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
index b737b17ed0..68158e05e3 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-builtins.js
@@ -13,10 +13,12 @@
function g() {
[1,2,3].forEach(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -31,10 +33,12 @@
function g() {
[1,2,3].find(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -48,10 +52,12 @@
function g() {
[1,2,3].map(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -65,10 +71,12 @@
function g() {
[1,2,3].filter(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -84,10 +92,12 @@
function g() {
[1,2,3].forEach(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -102,10 +112,12 @@
function g() {
[1,2,3].find(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -119,10 +131,12 @@
function g() {
[1,2,3].map(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
assertOptimized(g);
@@ -136,11 +150,13 @@
function g() {
[1,2,3].filter(f);
}
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
g();
g();
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
g();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js b/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js
index b75f3185ac..430bb80dba 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js
@@ -13,6 +13,7 @@
return array[Symbol.iterator]().next();
}
+ %PrepareFunctionForOptimization(next);
assertEquals(next().value, undefined);
assertEquals(next().value, undefined);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js
index 0ebf126fa6..693d75d220 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js
@@ -15,6 +15,7 @@
return array.every(v => v > 0);
}
+ %PrepareFunctionForOptimization(every);
every(); every();
%OptimizeFunctionOnNextCall(every);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js
index fcae939eb8..232a3c2bf2 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js
@@ -15,6 +15,7 @@
return array.filter(v => v > 0);
}
+ %PrepareFunctionForOptimization(filter);
filter(); filter();
%OptimizeFunctionOnNextCall(filter);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js
index e8b5b9bd1b..c12b977027 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js
@@ -15,6 +15,7 @@
return array.find(v => v > 0);
}
+ %PrepareFunctionForOptimization(find);
find(); find();
%OptimizeFunctionOnNextCall(find);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js
index 37866a4506..893774f1ee 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js
@@ -15,6 +15,7 @@
return array.findIndex(v => v > 0);
}
+ %PrepareFunctionForOptimization(findIndex);
findIndex(); findIndex();
%OptimizeFunctionOnNextCall(findIndex);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js
index 6db9078e1b..1d0a808243 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js
@@ -15,6 +15,7 @@
function forEach() {
array.forEach(increment);
}
+ %PrepareFunctionForOptimization(forEach);
forEach(); forEach();
%OptimizeFunctionOnNextCall(forEach);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js
index 77aedfe1e6..5931840a82 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js
@@ -16,6 +16,7 @@
return arr.includes(val);
}
+ %PrepareFunctionForOptimization(includes);
assertTrue(includes(iarr, 0)); assertTrue(includes(darr, 0));
assertTrue(includes(iarr, 2)); assertTrue(includes(darr, 2));
@@ -33,6 +34,7 @@
return arr.includes(val);
}
+ %PrepareFunctionForOptimization(includes);
assertTrue(includes(iarr, 0));
assertTrue(includes(iarr, 2));
@@ -55,6 +57,7 @@
iarr.__proto__ = [2];
// get feedback
+ %PrepareFunctionForOptimization(includes);
assertFalse(includes(iarr, 0));
assertTrue(includes(iarr, 2));
@@ -72,6 +75,7 @@
return arr.includes(val);
}
+ %PrepareFunctionForOptimization(includes);
assertFalse(includes(iarr, 2));
assertTrue(includes(iarr, 3));
@@ -92,6 +96,7 @@
return array.includes(val);
}
+ %PrepareFunctionForOptimization(includes);
includes(6); includes(6);
%OptimizeFunctionOnNextCall(includes);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js
index 78cf60507c..e5e2752048 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js
@@ -16,6 +16,7 @@
return arr.indexOf(val);
}
+ %PrepareFunctionForOptimization(indexOf);
assertEquals(0, indexOf(iarr, 0));
assertEquals(0, indexOf(darr, 0));
assertEquals(2, indexOf(iarr, 2));
@@ -36,6 +37,7 @@
return arr.indexOf(val);
}
+ %PrepareFunctionForOptimization(indexOf);
assertEquals(0, indexOf(iarr, 0));
assertEquals(2, indexOf(iarr, 2));
@@ -55,6 +57,7 @@
return arr.indexOf(val);
}
+ %PrepareFunctionForOptimization(indexOf);
iarr.__proto__ = [2];
assertEquals(-1, indexOf(iarr, 0));
assertEquals(0, indexOf(iarr, 2));
@@ -73,6 +76,7 @@
return arr.indexOf(val);
}
+ %PrepareFunctionForOptimization(indexOf);
assertEquals(-1, indexOf(iarr, 2));
assertEquals(1, indexOf(iarr, 3));
@@ -93,6 +97,7 @@
return array.indexOf(val);
}
+ %PrepareFunctionForOptimization(indexOf);
indexOf(6); indexOf(6);
%OptimizeFunctionOnNextCall(indexOf);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js
index d47926cd3d..0def5efb1e 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js
@@ -15,6 +15,7 @@
return array.lastIndexOf(val);
}
+ %PrepareFunctionForOptimization(lastIndexOf);
lastIndexOf(6); lastIndexOf(6);
%OptimizeFunctionOnNextCall(lastIndexOf);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js
index 29c7d32174..836df344f4 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js
@@ -15,6 +15,7 @@
return array.map(x => x + 1);
}
+ %PrepareFunctionForOptimization(map);
map(); map();
%OptimizeFunctionOnNextCall(map);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js
index 6d0fe068e4..aa578a09c8 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js
@@ -15,6 +15,8 @@
return array.pop();
}
+ %PrepareFunctionForOptimization(pop);
+
assertEquals(pop(), undefined);
assertEquals(pop(), undefined);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js
index d2ba0db0f8..44a83ec728 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js
@@ -16,6 +16,7 @@
array.reduce(accumulate);
}
+ %PrepareFunctionForOptimization(reduce);
reduce(); reduce();
%OptimizeFunctionOnNextCall(reduce);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js
index 6b2d5fa22d..0e75f74f08 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js
@@ -15,6 +15,7 @@
return array.slice();
}
+ %PrepareFunctionForOptimization(slice);
slice(); slice();
%OptimizeFunctionOnNextCall(slice);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js
index d9604d0c76..9636e94edd 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js
@@ -15,6 +15,7 @@
return array.some(v => v > 0);
}
+ %PrepareFunctionForOptimization(some);
some(); some();
%OptimizeFunctionOnNextCall(some);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-push.js b/deps/v8/test/mjsunit/compiler/deopt-array-push.js
index e34d99a325..d13538c532 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-array-push.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-push.js
@@ -7,10 +7,12 @@
(function test() {
function foo(a) { a.push(a.length = 2); }
+ %PrepareFunctionForOptimization(foo);
foo([1]);
foo([1]);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
assertOptimized(foo);
@@ -19,10 +21,12 @@
(function testElementTypeCheckSmi() {
function foo(a) { a.push('a'); }
+ %PrepareFunctionForOptimization(foo);
foo([1]);
foo([1]);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
assertOptimized(foo);
@@ -31,10 +35,12 @@
(function testElementTypeCheckDouble() {
function foo(a) { a.push('a'); }
+ %PrepareFunctionForOptimization(foo);
foo([0.3413312]);
foo([0.3413312]);
%OptimizeFunctionOnNextCall(foo);
foo([0.3413312]);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([0.3413312]);
assertOptimized(foo);
@@ -44,10 +50,12 @@
%NeverOptimizeFunction(bar);
function foo(a) { a.push(bar(a)); }
+ %PrepareFunctionForOptimization(foo);
foo(["1"]);
foo(["1"]);
%OptimizeFunctionOnNextCall(foo);
foo(["1"]);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(["1"]);
assertOptimized(foo);
@@ -56,10 +64,12 @@
(function test() {
function foo(a) { a.push(a.length = 2); }
+ %PrepareFunctionForOptimization(foo);
foo([0.34234]);
foo([0.34234]);
%OptimizeFunctionOnNextCall(foo);
foo([0.34234]);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([0.34234]);
assertOptimized(foo);
@@ -70,28 +80,47 @@
function foo(a) { a.push(1); }
+ %PrepareFunctionForOptimization(foo);
foo(new Array(N));
foo(new Array(N));
%OptimizeFunctionOnNextCall(foo);
foo(new Array(N));
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(new Array(N));
assertOptimized(foo);
})();
(function test() {
- function mkArray() {
- const N = 128 * 1024;
+ // Conservative arrays lengths in slow and fast mode.
+ const kFastModeLength = 1024;
+ const kSlowModeLength = 512 * 1024;
+ function mkArray(length) {
let a = [0.1];
- a.length = N;
+ a.length = length;
return a;
}
function foo(a) { a.push(0.23441233123); }
- foo(mkArray());
- foo(mkArray());
+
+
+ // 1. Optimize foo to handle fast mode arrays.
+ %PrepareFunctionForOptimization(foo);
+ foo(mkArray(kFastModeLength));
+ foo(mkArray(kFastModeLength));
%OptimizeFunctionOnNextCall(foo);
- foo(mkArray());
+ foo(mkArray(kFastModeLength));
+ assertOptimized(foo);
+
+ // Prepare foo to be re-optimized, ensuring it's bytecode / feedback vector
+ // doesn't get flushed after deoptimization.
+ %PrepareFunctionForOptimization(foo);
+
+ // 2. Given a slow mode array, foo will deopt.
+ foo(mkArray(kSlowModeLength));
+
+ // 3. Optimize foo again.
%OptimizeFunctionOnNextCall(foo);
- foo(mkArray());
+ foo(mkArray(kSlowModeLength));
+ // 4. It should stay optimized.
assertOptimized(foo);
})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-bool.js b/deps/v8/test/mjsunit/compiler/deopt-bool.js
index 13a4a97034..2bd4d3e39c 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-bool.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-bool.js
@@ -15,6 +15,8 @@ function foo(a, b) {
return passed;
}
+%PrepareFunctionForOptimization(foo);
+
assertTrue(foo(3, 4));
assertTrue(foo(3, 4));
assertFalse(foo(3.1, 4));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-bool2.js b/deps/v8/test/mjsunit/compiler/deopt-bool2.js
index 4d1c41e323..0967112360 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-bool2.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-bool2.js
@@ -20,6 +20,8 @@ function foo(expected, x) {
return passed;
}
+%PrepareFunctionForOptimization(foo);
+
assertTrue(foo([0,1], [0,1]));
assertTrue(foo([0,2], [0,2]));
assertFalse(foo([0,2.25], [0,2.75]));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-closure.js b/deps/v8/test/mjsunit/compiler/deopt-closure.js
index 2ce531faf0..ef2b500a66 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-closure.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-closure.js
@@ -12,6 +12,7 @@
}
return g();
}
+ %PrepareFunctionForOptimization(f);
assertEquals(24, f(23));
assertEquals(43, f(42));
%OptimizeFunctionOnNextCall(f);
@@ -26,6 +27,7 @@
}
return g();
}
+ %PrepareFunctionForOptimization(f);
assertEquals(24, f(23));
assertEquals(43, f(42));
%OptimizeFunctionOnNextCall(f);
@@ -40,6 +42,7 @@
}
return new g();
}
+ %PrepareFunctionForOptimization(f);
assertEquals({ val: 24 }, f(23));
assertEquals({ val: 43 }, f(42));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js b/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js
index 1df04bbad8..786a460fe4 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js
@@ -43,5 +43,6 @@ function f() {
return eval("200");
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(100, f());
diff --git a/deps/v8/test/mjsunit/compiler/deopt-eager-and-lazy.js b/deps/v8/test/mjsunit/compiler/deopt-eager-and-lazy.js
index daea40a950..cab060d31d 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-eager-and-lazy.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-eager-and-lazy.js
@@ -19,9 +19,12 @@ function h(o) {
return o.x;
}
+%PrepareFunctionForOptimization(g);
g({x : 1});
g({x : 2});
%OptimizeFunctionOnNextCall(g);
-%OptimizeFunctionOnNextCall(h);
g({x : 3});
+%PrepareFunctionForOptimization(h);
+%OptimizeFunctionOnNextCall(h);
+g({x : 4});
g({y : 1, x : 3});
diff --git a/deps/v8/test/mjsunit/compiler/deopt-eager-var-mutation-ite.js b/deps/v8/test/mjsunit/compiler/deopt-eager-var-mutation-ite.js
index 17ce86ff4a..3b1698eb5e 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-eager-var-mutation-ite.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-eager-var-mutation-ite.js
@@ -22,6 +22,7 @@ function g(b) {
} // It should trigger an eager deoptimization when b=true.
}
+%PrepareFunctionForOptimization(f);
f(false); f(false);
%OptimizeFunctionOnNextCall(f);
f(false);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-eager-with-freeze.js b/deps/v8/test/mjsunit/compiler/deopt-eager-with-freeze.js
index 538176d2e8..6596c85185 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-eager-with-freeze.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-eager-with-freeze.js
@@ -12,6 +12,7 @@ function useObject(obj) {
return obj.f;
}
+%PrepareFunctionForOptimization(useObject);
var o = {f: 1, g: 2}
assertEquals(useObject(o), 2);
assertEquals(useObject(o), 4);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-followed-by-gc.js b/deps/v8/test/mjsunit/compiler/deopt-followed-by-gc.js
index a5ea622dfc..fb4922191b 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-followed-by-gc.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-followed-by-gc.js
@@ -17,6 +17,7 @@ function deopt() {
}
%NeverOptimizeFunction(deopt);
+%PrepareFunctionForOptimization(opt_me);
opt_me();
opt_me();
%OptimizeFunctionOnNextCall(opt_me);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js b/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
index 8fa8af5a9b..deff3e71a7 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
@@ -44,6 +44,7 @@ Array.prototype.f = function() {
return g([].f.call({}), deopt + 1, called);
}
+ %PrepareFunctionForOptimization(f);
called = f(0, called);
called = f(0, called);
%OptimizeFunctionOnNextCall(f);
@@ -65,6 +66,7 @@ Array.prototype.f = function() {
return [].pop.call(a1) + b.value;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(7, f(obj));
assertEquals(6, f(obj));
%OptimizeFunctionOnNextCall(f);
@@ -84,6 +86,7 @@ Array.prototype.f = function() {
return [].shift.call(a2) + b.value;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(4, f(obj));
assertEquals(5, f(obj));
%OptimizeFunctionOnNextCall(f);
@@ -102,6 +105,7 @@ Array.prototype.f = function() {
return [].push.call(a3, b.value);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(5, f(obj));
assertEquals(6, f(obj));
%OptimizeFunctionOnNextCall(f);
@@ -121,6 +125,7 @@ Array.prototype.f = function() {
return [].indexOf.call(a4, b.value);
}
+ %PrepareFunctionForOptimization(f);
f(obj);
f(obj);
%OptimizeFunctionOnNextCall(f);
@@ -141,6 +146,7 @@ Array.prototype.f = function() {
return [].lastIndexOf.call(a5, b.value);
}
+ %PrepareFunctionForOptimization(f);
f(obj);
f(obj);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-lazy-freeze.js b/deps/v8/test/mjsunit/compiler/deopt-lazy-freeze.js
index 592fca8ad2..b7cdd37f7e 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-lazy-freeze.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-lazy-freeze.js
@@ -21,6 +21,7 @@ function setAndUseObject() {
return obj.f;
}
+%PrepareFunctionForOptimization(setAndUseObject);
assertEquals(setAndUseObject(), 2);
assertEquals(setAndUseObject(), 4);
%OptimizeFunctionOnNextCall(setAndUseObject);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-lazy-shape-mutation.js b/deps/v8/test/mjsunit/compiler/deopt-lazy-shape-mutation.js
index df4878e9cf..d9dc6d995a 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-lazy-shape-mutation.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-lazy-shape-mutation.js
@@ -17,6 +17,7 @@ function f() {
return o.x;
}
+%PrepareFunctionForOptimization(f);
f(); f();
%OptimizeFunctionOnNextCall(f);
b = true;
diff --git a/deps/v8/test/mjsunit/compiler/deopt-lazy-var-mutation.js b/deps/v8/test/mjsunit/compiler/deopt-lazy-var-mutation.js
index 93886f2ad7..17f3c392e4 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-lazy-var-mutation.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-lazy-var-mutation.js
@@ -19,6 +19,7 @@ function foo(a) {
}
var o = 1;
+%PrepareFunctionForOptimization(foo);
assertEquals(foo(o), 2);
assertEquals(foo(o), 2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-many-lazy.js b/deps/v8/test/mjsunit/compiler/deopt-many-lazy.js
index e8a0b04419..1b24bfc767 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-many-lazy.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-many-lazy.js
@@ -23,11 +23,19 @@ function b() {
%DeoptimizeFunction(a);
}
+%PrepareFunctionForOptimization(f);
f(); f();
+%OptimizeFunctionOnNextCall(f);
+f();
+%PrepareFunctionForOptimization(a);
a(); a();
+%OptimizeFunctionOnNextCall(a);
+a();
for(var i = 0; i < 5; i++) {
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
- %OptimizeFunctionOnNextCall(a);
f();
+ %PrepareFunctionForOptimization(a);
+ %OptimizeFunctionOnNextCall(a);
a();
}
diff --git a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
index 0b19df8a1c..493bd8ec23 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
@@ -36,6 +36,7 @@ function f(a) {
// accumulator holding an unboxed double which needs materialized.
global = Math.sqrt(a);
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f(0.25);
assertEquals(0.5, global);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-now-lazy.js b/deps/v8/test/mjsunit/compiler/deopt-now-lazy.js
index 6a86bcaf4d..b5d11acbec 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-now-lazy.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-now-lazy.js
@@ -8,5 +8,6 @@ function f() {
%DeoptimizeNow();
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
index 6f19d45bb8..7634474f84 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
@@ -7,6 +7,7 @@
(function() {
function foo(x, y) { return x << y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
@@ -32,6 +33,7 @@
(function() {
function foo(x, y) { return x >> y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
@@ -57,6 +59,7 @@
(function() {
function foo(x, y) { return x >>> y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
@@ -82,6 +85,7 @@
(function() {
function foo(x, y) { return x ^ y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
@@ -107,6 +111,7 @@
(function() {
function foo(x, y) { return x | y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
@@ -132,6 +137,7 @@
(function() {
function foo(x, y) { return x & y; }
+ %PrepareFunctionForOptimization(foo);
foo(1.1, 0.1);
foo(0.1, 1.1);
foo(true, false);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-simple-eager.js b/deps/v8/test/mjsunit/compiler/deopt-simple-eager.js
index 9928647c8a..9944b72019 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-simple-eager.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-simple-eager.js
@@ -10,6 +10,7 @@ function f(o) {
return o.x;
}
+%PrepareFunctionForOptimization(f);
assertEquals(f({x : 2}), 2);
assertEquals(f({x : 2}), 2);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-simple-lazy.js b/deps/v8/test/mjsunit/compiler/deopt-simple-lazy.js
index e3721a3159..e32fc2af48 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-simple-lazy.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-simple-lazy.js
@@ -14,6 +14,7 @@ function g() {
%DeoptimizeFunction(f);
}
+%PrepareFunctionForOptimization(f);
f(); f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js b/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js
index b0e382e38c..6a1ca40c37 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-simple-try-catch.js
@@ -22,6 +22,7 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
assertEquals(f(), 43);
assertEquals(f(), 43);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-soft-simple.js b/deps/v8/test/mjsunit/compiler/deopt-soft-simple.js
index eaa1e3c695..a32f6104ae 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-soft-simple.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-soft-simple.js
@@ -17,5 +17,6 @@ function f() {
return o.x;
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
index e5ec075aa9..a7edbbc27d 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
@@ -9,10 +9,12 @@ var s = "12345";
(function() {
function foo() { return s[5]; }
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
assertOptimized(foo);
@@ -21,10 +23,12 @@ var s = "12345";
(function() {
function foo(i) { return s[i]; }
+ %PrepareFunctionForOptimization(foo);
foo(0);
foo(1);
%OptimizeFunctionOnNextCall(foo);
foo(5);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(5);
assertOptimized(foo);
@@ -33,10 +37,12 @@ var s = "12345";
(function() {
function foo(s) { return s[5]; }
+ %PrepareFunctionForOptimization(foo);
foo(s);
foo(s);
%OptimizeFunctionOnNextCall(foo);
foo(s);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(s);
assertOptimized(foo);
@@ -45,10 +51,12 @@ var s = "12345";
(function() {
function foo(s, i) { return s[i]; }
+ %PrepareFunctionForOptimization(foo);
foo(s, 0);
foo(s, 1);
%OptimizeFunctionOnNextCall(foo);
foo(s, 5);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(s, 5);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js b/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js
index c93ef9dfd5..3af7078cbc 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js
@@ -25,6 +25,7 @@ var o = { toString : function() {
return "1";
} };
+%PrepareFunctionForOptimization(f);
counter = 0;
assertEquals(1, f(deopt, o));
assertEquals(2, counter);
@@ -34,6 +35,7 @@ counter = 0;
assertEquals(-1, f(o, deopt));
assertEquals(2, counter);
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
counter = 0;
assertEquals(0, f(deopt, deopt));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-tonumber-compare.js b/deps/v8/test/mjsunit/compiler/deopt-tonumber-compare.js
index 9a7e992ada..64b2f74f34 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-tonumber-compare.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-tonumber-compare.js
@@ -29,14 +29,16 @@ function deopt(f) {
};
}
+%PrepareFunctionForOptimization(f);
assertEquals(false, f(deopt(f)));
assertEquals(1, counter);
-assertEquals(true, g(deopt(g)));
-assertEquals(2, counter);
-
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f(deopt(f)));
+assertEquals(2, counter);
+
+%PrepareFunctionForOptimization(g);
+assertEquals(true, g(deopt(g)));
assertEquals(3, counter);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-tonumber-shift.js b/deps/v8/test/mjsunit/compiler/deopt-tonumber-shift.js
index bb4d1d5c1c..a4483e9598 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-tonumber-shift.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-tonumber-shift.js
@@ -25,6 +25,7 @@ var o = { toString : function() {
return "1";
} };
+%PrepareFunctionForOptimization(f);
counter = 0;
assertEquals(4, f(deopt, o));
assertEquals(2, counter);
@@ -34,6 +35,7 @@ counter = 0;
assertEquals(4, f(o, deopt));
assertEquals(2, counter);
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
counter = 0;
assertEquals(8, f(deopt, deopt));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-twice-on-call.js b/deps/v8/test/mjsunit/compiler/deopt-twice-on-call.js
index f30b7d0aff..d591d9c9cf 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-twice-on-call.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-twice-on-call.js
@@ -17,6 +17,7 @@ function h() {
%DeoptimizeFunction(f);
}
+%PrepareFunctionForOptimization(f);
f(); f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-twice.js b/deps/v8/test/mjsunit/compiler/deopt-twice.js
index 1ec9c9fab8..929cca65ca 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-twice.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-twice.js
@@ -13,6 +13,7 @@ function g() {
%DeoptimizeFunction(f);
}
+%PrepareFunctionForOptimization(f);
f(); f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/deoptimize-lazy-weak.js b/deps/v8/test/mjsunit/compiler/deoptimize-lazy-weak.js
index c774089098..e698654778 100644
--- a/deps/v8/test/mjsunit/compiler/deoptimize-lazy-weak.js
+++ b/deps/v8/test/mjsunit/compiler/deoptimize-lazy-weak.js
@@ -38,10 +38,14 @@ function f() {
}
shouldDeopt = false;
+%PrepareFunctionForOptimization(dummy_opt);
f();
f();
-
-%OptimizeFunctionOnNextCall(f);
%OptimizeFunctionOnNextCall(dummy_opt);
+f();
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeFunctionOnNextCall(f);
+
shouldDeopt = true;
assertEquals(2, f());
diff --git a/deps/v8/test/mjsunit/compiler/division-by-constant.js b/deps/v8/test/mjsunit/compiler/division-by-constant.js
index 212bdb24b9..b37d9f9975 100644
--- a/deps/v8/test/mjsunit/compiler/division-by-constant.js
+++ b/deps/v8/test/mjsunit/compiler/division-by-constant.js
@@ -107,6 +107,7 @@ function TestDivisionLike(ref, construct, values, divisor) {
var OptFun = new Function("dividend", construct(divisor));
// Warm up type feedback.
+ %PrepareFunctionForOptimization(OptFun);
OptFun(7);
OptFun(11);
%OptimizeFunctionOnNextCall(OptFun);
diff --git a/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js b/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js
index 02bd8d9a25..e211f7fd8f 100644
--- a/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js
+++ b/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js
@@ -6,5 +6,6 @@
function bar(a) { a[0](true); }
function foo(a) { return bar(1); }
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(function() {bar([foo])}, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js b/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js
new file mode 100644
index 0000000000..582f5940b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dont-flush-code-marked-for-opt.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --allow-natives-syntax --expose-gc --stress-flush-bytecode
+
+function foo(a) {}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+gc();
+foo();
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/double-array-to-global.js b/deps/v8/test/mjsunit/compiler/double-array-to-global.js
index e221d90358..4dc1c9b7bf 100644
--- a/deps/v8/test/mjsunit/compiler/double-array-to-global.js
+++ b/deps/v8/test/mjsunit/compiler/double-array-to-global.js
@@ -11,6 +11,7 @@ function foo(a) {
b = a[i];
}
}
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js b/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js
index 067400cfc6..c7a39cbedf 100644
--- a/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js
+++ b/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js
@@ -12,6 +12,7 @@ function f() {
return g(1, (%_DeoptimizeNow(), 2), 3);
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-1.js b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
index f05040bd02..f1b53a5cad 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
@@ -36,6 +36,7 @@ function g() {
return f(1,2,3);
}
+%PrepareFunctionForOptimization(g);
assertEquals(3, g());
assertEquals(3, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-11.js b/deps/v8/test/mjsunit/compiler/escape-analysis-11.js
index e922fc15c8..5a136ee24e 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-11.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-11.js
@@ -13,6 +13,7 @@ function f() {
g();
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-12.js b/deps/v8/test/mjsunit/compiler/escape-analysis-12.js
index 16029a0ad3..d253d0d948 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-12.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-12.js
@@ -11,6 +11,7 @@ function f() {
x.b = 1;
0 <= y.b;
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-13.js b/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
index 5f281aaaa4..b3de53a502 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-13.js
@@ -11,6 +11,7 @@ function f() {
assertEquals("a", x.a);
assertEquals("b", x.b);
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-15.js b/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
index 1960d74892..510caa0782 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-15.js
@@ -12,6 +12,7 @@ function f(i) {
o2.a = o1;
if (i == 4) return o3;
}
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 100; ++i){
f(i%5)
}
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-16.js b/deps/v8/test/mjsunit/compiler/escape-analysis-16.js
index 4cd9ae43fd..bda6f82844 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-16.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-16.js
@@ -12,6 +12,7 @@ function foo(){
}
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo)
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-17.js b/deps/v8/test/mjsunit/compiler/escape-analysis-17.js
index 5709d47129..aca0d32735 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-17.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-17.js
@@ -21,6 +21,7 @@ function foo() {
}
return x;
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-18.js b/deps/v8/test/mjsunit/compiler/escape-analysis-18.js
index f2ff08299f..49719480f6 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-18.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-18.js
@@ -16,6 +16,7 @@ function foo(array) {
return bar(array);
}
+%PrepareFunctionForOptimization(foo);
let array = [,.5,];
foo(array);
foo(array);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
index 49f440e856..b62d7e278c 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
@@ -38,6 +38,7 @@ function g(a) {
return f(a,1,2,3);
}
+%PrepareFunctionForOptimization(g);
assertEquals(4, g());
assertEquals(4, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-3.js b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
index b92d1c3876..193bf7cacb 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
@@ -37,6 +37,7 @@ function g() {
return arguments[f(1,2)];
}
+%PrepareFunctionForOptimization(g);
assertEquals(6, g(4,5,6));
assertEquals(6, g(4,5,6));
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-4.js b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
index ef9f95fd36..576f299e4f 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
@@ -41,6 +41,7 @@ function g() {
return "" + f(1,2,3) + " " + h(4,5,6);
}
+%PrepareFunctionForOptimization(g);
assertEquals("3 [object Arguments]", g());
assertEquals("3 [object Arguments]", g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-5.js b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
index 54b5e82958..69d0fdf575 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
@@ -41,6 +41,7 @@ function h(x) {
assertEquals("[object Arguments]", ""+x)
}
+%PrepareFunctionForOptimization(g);
assertEquals(4, g(h));
assertEquals(4, g(h));
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-6.js b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
index c36e7d956e..9d7ae92c65 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
@@ -41,6 +41,7 @@ function g() {
return x.length;
}
+%PrepareFunctionForOptimization(g);
assertEquals(5, g());
assertEquals(5, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-7.js b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
index cfa30cbeb4..581bdf2c34 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
@@ -43,6 +43,7 @@ function g(a) {
return o.x;
}
+%PrepareFunctionForOptimization(g);
assertEquals(5, g(true));
assertEquals(7, g(false));
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-8.js b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
index d9c6d254ef..77d0492318 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
@@ -42,6 +42,7 @@ function g() {
return o.x;
}
+%PrepareFunctionForOptimization(g);
assertEquals(5, g());
assertEquals(5, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-9.js b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
index 0b8f75c576..4e0c487008 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
@@ -43,6 +43,7 @@ function g(a) {
return o[0];
}
+%PrepareFunctionForOptimization(g);
assertEquals(7, g());
assertEquals(7, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
index 65e736c706..4659a39597 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
@@ -52,6 +52,7 @@
assertEquals(5, r.y.z);
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
f();
@@ -81,6 +82,7 @@
assertEquals(3, r.i.y.z);
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
f();
@@ -113,9 +115,12 @@
assertEquals(3, l.y.z)
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
- f(); f();
+ f();
+ %PrepareFunctionForOptimization(f);
+ f();
%OptimizeFunctionOnNextCall(f);
f(); f();
})();
@@ -147,10 +152,13 @@
assertEquals(3, l.y.z)
}
+ %PrepareFunctionForOptimization(f);
%NeverOptimizeFunction(i);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
- f(); f();
+ f();
+ %PrepareFunctionForOptimization(f);
+ f();
%OptimizeFunctionOnNextCall(f);
f(); f();
})();
@@ -179,9 +187,12 @@
assertEquals(7, k.t.u)
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
- f(); f();
+ f();
+ %PrepareFunctionForOptimization(f);
+ f();
%OptimizeFunctionOnNextCall(f);
f(); f();
})();
@@ -206,6 +217,7 @@
return a + b + c;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(4, f(1, 2));
assertEquals(5, f(2, 1));
%OptimizeFunctionOnNextCall(f);
@@ -232,6 +244,7 @@
return a + b + c;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(4, f(1, 2));
assertEquals(5, f(2, 1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-array.js b/deps/v8/test/mjsunit/compiler/escape-analysis-array.js
index 2c44fa8c9b..3696c7cfc3 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-array.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-array.js
@@ -11,6 +11,7 @@
return a[i];
}
+ %PrepareFunctionForOptimization(f);
assertEquals("first", f(0));
assertEquals("first", f(0));
%OptimizeFunctionOnNextCall(f);
@@ -24,6 +25,7 @@
return a[i];
}
+ %PrepareFunctionForOptimization(f);
assertEquals("first", f(0));
assertEquals("second", f(1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-cycle.js b/deps/v8/test/mjsunit/compiler/escape-analysis-cycle.js
index ee3a4a721c..8a233c3af5 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-cycle.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-cycle.js
@@ -16,6 +16,7 @@ function f() {
return o.c.a.c.a.c.a.c.b;
}
+%PrepareFunctionForOptimization(f);
assertEquals(42, f());
assertEquals(42, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js
index 7337264b85..2570414cab 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js
@@ -40,6 +40,7 @@
assertEquals(2, o1[1]);
assertEquals(3, o1[2]);
}
+ %PrepareFunctionForOptimization(func);
func(false);
func(false);
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js
index 306f3e7410..5c0ac97436 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js
@@ -43,6 +43,7 @@
assertEquals(4, o2[0]);
assertEquals(o1, o2[1]);
}
+ %PrepareFunctionForOptimization(func);
func();
func();
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js
index 9999e53178..e560f16301 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js
@@ -44,6 +44,7 @@
assertEquals(4, o2[0]);
assertEquals(o1, o2[1]);
}
+ %PrepareFunctionForOptimization(func);
func();
func();
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js
index c80765706c..99e4e5c205 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js
@@ -50,6 +50,7 @@
assertEquals(o1, o2.a);
assertEquals(4, o2.b);
}
+ %PrepareFunctionForOptimization(func);
func();
func();
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js
index e70f0b1221..dde500f299 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js
@@ -35,6 +35,7 @@ function f() {
return x[0];
}
+%PrepareFunctionForOptimization(f);
assertEquals(f(), 23.1234);
assertEquals(f(), 23.1234);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-6.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-6.js
index 6a776286db..da8aa376f2 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-6.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-6.js
@@ -10,6 +10,7 @@ function f(x) {
return a.length;
}
+%PrepareFunctionForOptimization(f);
assertEquals(3, f());
assertEquals(3, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-framestate-use-at-branchpoint.js b/deps/v8/test/mjsunit/compiler/escape-analysis-framestate-use-at-branchpoint.js
index c86674e52e..cd6bfb0d3a 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-framestate-use-at-branchpoint.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-framestate-use-at-branchpoint.js
@@ -13,6 +13,7 @@ function foo() {
function bar() {i};
return o.x;
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js b/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js
index 6ad3a53218..b0dd2d655a 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js
@@ -10,6 +10,7 @@
%_DeoptimizeNow();
return a.length;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(3, f());
assertEquals(3, f());
%OptimizeFunctionOnNextCall(f);
@@ -22,6 +23,7 @@
%_DeoptimizeNow();
return fun.length;
}
+ %PrepareFunctionForOptimization(g);
assertEquals(2, g());
assertEquals(2, g());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js
index 2c2135b6da..a2d0936ffa 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js
@@ -29,6 +29,7 @@ function f(x) {
return o.a + 1;
}
+%PrepareFunctionForOptimization(f);
f(0,0);
f(1,0);
f(2,0);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
index 9d033b9640..90adbe8e7a 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
@@ -16,6 +16,7 @@ function f(x) {
return res;
}
+%PrepareFunctionForOptimization(f);
f(0);
f(1);
f(0);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-replacement.js b/deps/v8/test/mjsunit/compiler/escape-analysis-replacement.js
index ffe95e30aa..caa77a148e 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-replacement.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-replacement.js
@@ -33,6 +33,7 @@ function foo(x){
return c.c;
}
+%PrepareFunctionForOptimization(foo);
foo(true);
foo(false);
foo(true);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
index e6cc9f1ed2..7a7d1225c0 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
@@ -50,6 +50,7 @@ function test(value_track, value_break) {
var deopt = { deopt:false };
// Warm-up field tracking to a certain representation.
+ %PrepareFunctionForOptimization(access);
access(value_track, value_track(), deopt);
access(value_track, value_track(), deopt);
%OptimizeFunctionOnNextCall(access);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js b/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js
index 2ac1253a18..320465db82 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-rest-parameters.js
@@ -23,6 +23,7 @@
return a + b + c;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(4, f(1, 2));
assertEquals(5, f(2, 1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-type-none-in-object-state.js b/deps/v8/test/mjsunit/compiler/escape-analysis-type-none-in-object-state.js
index aee6485eed..1692e4a56c 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-type-none-in-object-state.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-type-none-in-object-state.js
@@ -18,6 +18,7 @@ function g(x) {
f(""+x);
}
+%PrepareFunctionForOptimization(g);
g("x"); g("x");
%OptimizeFunctionOnNextCall(g);
g("x");
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-typeguard.js b/deps/v8/test/mjsunit/compiler/escape-analysis-typeguard.js
index 8746a81f9e..6d6cff1137 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-typeguard.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-typeguard.js
@@ -15,6 +15,7 @@ function foo(){
assertEquals(7, z.a);
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
foo();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis.js b/deps/v8/test/mjsunit/compiler/escape-analysis.js
index 111ed634fd..6b4fc96ddf 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis.js
@@ -42,6 +42,7 @@
}
assertEquals(expected, object.a);
}
+ %PrepareFunctionForOptimization(join);
join(true, 1); join(true, 1);
join(false, 2); join(false, 2);
%OptimizeFunctionOnNextCall(join);
@@ -65,6 +66,7 @@
assertEquals(45, object.a);
assertEquals(23, object.b);
}
+ %PrepareFunctionForOptimization(loop);
loop(); loop();
%OptimizeFunctionOnNextCall(loop);
loop(); loop();
@@ -99,6 +101,7 @@
assertEquals(54, object.b);
assertEquals(23, object.c);
}
+ %PrepareFunctionForOptimization(nested);
nested(); nested();
%OptimizeFunctionOnNextCall(nested);
nested(); nested();
@@ -126,6 +129,7 @@
assertEquals(3.0, o2.d.c);
assertEquals(4.5, o2.e);
}
+ %PrepareFunctionForOptimization(func);
func(); func();
%OptimizeFunctionOnNextCall(func);
func(); func();
@@ -167,6 +171,7 @@
assertEquals(5.9, o.e);
assertEquals(6.7, o.g);
}
+ %PrepareFunctionForOptimization(func);
func(); func();
%OptimizeFunctionOnNextCall(func);
func(); func();
@@ -193,6 +198,7 @@
x: { get:getter, set:setter },
y: { get:getter, set:setter }
});
+ %PrepareFunctionForOptimization(check);
check(23, 42); check(23, 42);
%OptimizeFunctionOnNextCall(check);
check(23, 42); check(23, 42);
@@ -261,6 +267,7 @@
return o.z;
}
}
+ %PrepareFunctionForOptimization(oob);
assertEquals(3, oob(cons1, false));
assertEquals(3, oob(cons1, false));
assertEquals(7, oob(cons2, true));
@@ -295,6 +302,7 @@
assertEquals(99, o1.x);
assertEquals(99, o2.b.x);
}
+ %PrepareFunctionForOptimization(deep);
deep(); deep();
%OptimizeFunctionOnNextCall(deep);
deep(); deep();
@@ -333,6 +341,7 @@
o3.b.x = 1;
assertEquals(1, o1.x);
}
+ %PrepareFunctionForOptimization(deep);
deep(false); deep(false);
%OptimizeFunctionOnNextCall(deep);
deep(false); deep(false);
@@ -358,6 +367,7 @@
assertEquals(3, r.x.y);
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
f();
@@ -374,6 +384,7 @@
assertEquals(111, l2.x.y);
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
f();
@@ -392,6 +403,7 @@
assertEquals(0, dummy.d);
}
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
f();
@@ -410,6 +422,7 @@
deopt.deopt
assertEquals(x, o.x);
}
+ %PrepareFunctionForOptimization(field);
field(1); field(2);
%OptimizeFunctionOnNextCall(field);
field(3); field(4);
@@ -430,6 +443,7 @@
deopt.deopt
assertEquals(x, o.x);
}
+ %PrepareFunctionForOptimization(field);
field({}); field({});
%OptimizeFunctionOnNextCall(field);
field({}); field({});
diff --git a/deps/v8/test/mjsunit/compiler/eval-introduced-closure.js b/deps/v8/test/mjsunit/compiler/eval-introduced-closure.js
index 550c7c30ee..9b2d89685d 100644
--- a/deps/v8/test/mjsunit/compiler/eval-introduced-closure.js
+++ b/deps/v8/test/mjsunit/compiler/eval-introduced-closure.js
@@ -42,6 +42,7 @@ function do_eval(str) {
}
var eval_f = do_eval('(' + f + ')');
+%PrepareFunctionForOptimization(eval_f);
for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
%OptimizeFunctionOnNextCall(eval_f);
assertEquals(27, eval_f());
@@ -53,6 +54,7 @@ function do_eval_local(str) {
}
eval_f = do_eval_local('(' + f + ')');
+%PrepareFunctionForOptimization(eval_f);
for (var i = 0; i < 5; i++) assertEquals(42, eval_f());
%OptimizeFunctionOnNextCall(eval_f);
assertEquals(42, eval_f());
@@ -65,6 +67,7 @@ function do_eval_with_other_eval_call(str) {
}
eval_f = do_eval_with_other_eval_call('(' + f + ')');
+%PrepareFunctionForOptimization(eval_f);
for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
%OptimizeFunctionOnNextCall(eval_f);
assertEquals(27, eval_f());
@@ -72,6 +75,7 @@ assertEquals(27, eval_f());
function test_non_strict_outer_eval() {
function strict_eval(str) { "use strict"; return eval(str); }
var eval_f = strict_eval('(' + f + ')');
+ %PrepareFunctionForOptimization(eval_f);
for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
%OptimizeFunctionOnNextCall(eval_f);
assertEquals(27, eval_f());
@@ -85,6 +89,7 @@ function test_strict_outer_eval() {
"use strict";
function strict_eval(str) { "use strict"; return eval(str); }
var eval_f = strict_eval('(' + f + ')');
+ %PrepareFunctionForOptimization(eval_f);
for (var i = 0; i < 5; i++) assertEquals(27, eval_f());
%OptimizeFunctionOnNextCall(eval_f);
assertEquals(27, eval_f());
diff --git a/deps/v8/test/mjsunit/compiler/feedback-after-throw.js b/deps/v8/test/mjsunit/compiler/feedback-after-throw.js
index 891e315c5c..595d4c17cf 100644
--- a/deps/v8/test/mjsunit/compiler/feedback-after-throw.js
+++ b/deps/v8/test/mjsunit/compiler/feedback-after-throw.js
@@ -32,6 +32,7 @@ function foo() {
return 1 > 5;
};
+%PrepareFunctionForOptimization(foo);
try { foo() } catch(e) {}
try { foo() } catch(e) {}
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-1.js b/deps/v8/test/mjsunit/compiler/for-in-1.js
index 80add89609..f7cfa409a6 100644
--- a/deps/v8/test/mjsunit/compiler/for-in-1.js
+++ b/deps/v8/test/mjsunit/compiler/for-in-1.js
@@ -12,6 +12,7 @@ function foo(o) {
}
var a = [];
+%PrepareFunctionForOptimization(foo);
assertEquals("", foo(a));
assertEquals("", foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-2.js b/deps/v8/test/mjsunit/compiler/for-in-2.js
index a586aff94e..d4b0397a50 100644
--- a/deps/v8/test/mjsunit/compiler/for-in-2.js
+++ b/deps/v8/test/mjsunit/compiler/for-in-2.js
@@ -12,6 +12,7 @@ function foo(o) {
}
var a = [];
+%PrepareFunctionForOptimization(foo);
assertEquals("", foo(a));
assertEquals("", foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-3.js b/deps/v8/test/mjsunit/compiler/for-in-3.js
index 80f3fa50c9..0f43a7569e 100644
--- a/deps/v8/test/mjsunit/compiler/for-in-3.js
+++ b/deps/v8/test/mjsunit/compiler/for-in-3.js
@@ -12,6 +12,7 @@ function foo(o) {
}
var o = {};
+%PrepareFunctionForOptimization(foo);
assertEquals("", foo(o));
assertEquals("", foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-4.js b/deps/v8/test/mjsunit/compiler/for-in-4.js
index d15c3484dd..af5c0c8400 100644
--- a/deps/v8/test/mjsunit/compiler/for-in-4.js
+++ b/deps/v8/test/mjsunit/compiler/for-in-4.js
@@ -12,6 +12,7 @@ function foo(o) {
}
var a = [];
+%PrepareFunctionForOptimization(foo);
assertEquals("", foo(a));
assertEquals("", foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/for-in-5.js b/deps/v8/test/mjsunit/compiler/for-in-5.js
index 8f469ab1b3..71c5d8822a 100644
--- a/deps/v8/test/mjsunit/compiler/for-in-5.js
+++ b/deps/v8/test/mjsunit/compiler/for-in-5.js
@@ -11,6 +11,7 @@ function foo(o) {
return s;
}
var o = {a:1, b:2, c:3};
+%PrepareFunctionForOptimization(foo);
assertEquals("abc", foo(o));
assertEquals("abc", foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/function-apply.js b/deps/v8/test/mjsunit/compiler/function-apply.js
index f9440dede1..70309ce2df 100644
--- a/deps/v8/test/mjsunit/compiler/function-apply.js
+++ b/deps/v8/test/mjsunit/compiler/function-apply.js
@@ -10,6 +10,7 @@
function bar() { return this; }
function foo() { return bar.apply(this, null); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo.call(42));
assertEquals(42, foo.call(42));
%OptimizeFunctionOnNextCall(foo);
@@ -20,6 +21,7 @@
function bar() { return this; }
function foo() { return bar.apply(this, undefined); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo.call(42));
assertEquals(42, foo.call(42));
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +39,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -52,6 +55,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -64,6 +68,7 @@
function bar() { return this; }
function foo() { return bar.apply(); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo());
assertEquals(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -74,6 +79,7 @@
function bar() { return this; }
function foo() { return bar.apply(this); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo.call(42));
assertEquals(42, foo.call(42));
%OptimizeFunctionOnNextCall(foo);
@@ -84,6 +90,7 @@
function bar() { return this; }
function foo() { return bar.apply(this, arguments, this); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo.call(42));
assertEquals(42, foo.call(42));
%OptimizeFunctionOnNextCall(foo);
@@ -100,6 +107,7 @@
return Function.prototype.apply.call(undefined, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -114,6 +122,7 @@
return Function.prototype.apply.call(null, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -128,6 +137,7 @@
return Function.prototype.apply.call(null, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/function-bind.js b/deps/v8/test/mjsunit/compiler/function-bind.js
index cc9451e0d5..c760507a17 100644
--- a/deps/v8/test/mjsunit/compiler/function-bind.js
+++ b/deps/v8/test/mjsunit/compiler/function-bind.js
@@ -12,6 +12,7 @@
return bar.bind(x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0)());
assertEquals(1, foo(1)());
%OptimizeFunctionOnNextCall(foo);
@@ -26,6 +27,7 @@
return bar.bind(undefined, x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0)());
assertEquals(1, foo(1)());
%OptimizeFunctionOnNextCall(foo);
@@ -39,6 +41,7 @@
return bar.bind(undefined, x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0)());
assertEquals(1, foo(1)());
%OptimizeFunctionOnNextCall(foo);
@@ -53,6 +56,7 @@
return bar.bind(undefined, x, y);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 0)());
assertEquals(2, foo(1, 1)());
%OptimizeFunctionOnNextCall(foo);
@@ -68,6 +72,7 @@
return bar.bind(undefined, x, y);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 0)());
assertEquals(2, foo(1, 1)());
%OptimizeFunctionOnNextCall(foo);
@@ -81,6 +86,7 @@
function foo(g) { return bar(g.bind(null, 2)); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo((x, y) => x + y));
assertEquals(1, foo((x, y) => x - y));
%OptimizeFunctionOnNextCall(foo);
@@ -93,6 +99,7 @@
function foo(a) { return a.map(add.bind(null, 1)); }
+ %PrepareFunctionForOptimization(foo);
assertEquals([1, 2, 3], foo([0, 1, 2]));
assertEquals([2, 3, 4], foo([1, 2, 3]));
%OptimizeFunctionOnNextCall(foo);
@@ -106,6 +113,7 @@
function foo(inc) { return inc(1); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(inc));
assertEquals(2, foo(inc));
%OptimizeFunctionOnNextCall(foo);
@@ -118,6 +126,7 @@
function foo() { return new B; }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertInstanceof(foo(), B);
%OptimizeFunctionOnNextCall(foo);
@@ -137,6 +146,7 @@
function foo(z) { return new B(z); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(3).x);
assertEquals(2, foo(3).y);
assertEquals(3, foo(3).z);
@@ -154,6 +164,7 @@
return new B;
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertInstanceof(foo(), A);
%OptimizeFunctionOnNextCall(foo);
@@ -174,6 +185,7 @@
return new B(z);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(3).x);
assertEquals(2, foo(3).y);
assertEquals(3, foo(3).z);
@@ -191,6 +203,7 @@
return new B;
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(B), A);
assertInstanceof(foo(B), A);
%OptimizeFunctionOnNextCall(foo);
@@ -211,6 +224,7 @@
return new B(z);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(B, 3).x);
assertEquals(2, foo(B, 3).y);
assertEquals(3, foo(B, 3).z);
@@ -231,6 +245,7 @@
};
const B = C.__proto__ = A.bind(null, 1);
+ %PrepareFunctionForOptimization(C);
assertInstanceof(new C(), A);
assertInstanceof(new C(), B);
assertInstanceof(new C(), C);
@@ -253,6 +268,7 @@
return bar(B)
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(B), A);
assertInstanceof(foo(B), A);
%OptimizeFunctionOnNextCall(foo);
@@ -276,6 +292,7 @@
return bar(B, z);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(B, 3).x);
assertEquals(2, foo(B, 3).y);
assertEquals(3, foo(B, 3).z);
diff --git a/deps/v8/test/mjsunit/compiler/function-caller.js b/deps/v8/test/mjsunit/compiler/function-caller.js
index 1192e680cb..931e31f2fe 100644
--- a/deps/v8/test/mjsunit/compiler/function-caller.js
+++ b/deps/v8/test/mjsunit/compiler/function-caller.js
@@ -18,6 +18,7 @@
(function caller() { g() })();
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/global-delete.js b/deps/v8/test/mjsunit/compiler/global-delete.js
index c32fda6dfa..d87a5970a0 100644
--- a/deps/v8/test/mjsunit/compiler/global-delete.js
+++ b/deps/v8/test/mjsunit/compiler/global-delete.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -13,6 +14,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/global-var-delete.js b/deps/v8/test/mjsunit/compiler/global-var-delete.js
index a7ea9ea4b1..bd9cf58d62 100644
--- a/deps/v8/test/mjsunit/compiler/global-var-delete.js
+++ b/deps/v8/test/mjsunit/compiler/global-var-delete.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -13,6 +14,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/increment-typefeedback.js b/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
index 53e5ed678f..eeab4257ae 100644
--- a/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
+++ b/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
@@ -32,6 +32,7 @@ function f(x) {
return x;
}
+%PrepareFunctionForOptimization(f);
f(0.5);
f(0.5);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/inline-accessors1.js b/deps/v8/test/mjsunit/compiler/inline-accessors1.js
index daa01ec022..3cfbab5dc9 100644
--- a/deps/v8/test/mjsunit/compiler/inline-accessors1.js
+++ b/deps/v8/test/mjsunit/compiler/inline-accessors1.js
@@ -20,6 +20,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(o));
assertEquals(1, foo(o));
%OptimizeFunctionOnNextCall(foo);
@@ -43,6 +44,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(o));
assertEquals(1, foo(o));
%OptimizeFunctionOnNextCall(foo);
@@ -70,6 +72,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(o));
assertEquals(0, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/inline-accessors2.js b/deps/v8/test/mjsunit/compiler/inline-accessors2.js
index b3985bf9dc..c13cf83e5d 100644
--- a/deps/v8/test/mjsunit/compiler/inline-accessors2.js
+++ b/deps/v8/test/mjsunit/compiler/inline-accessors2.js
@@ -36,6 +36,7 @@ function TestInlinedGetter(context, obj, expected) {
forceDeopt = { deopt: 0 };
accessorCallCount = 0;
+ %PrepareFunctionForOptimization(context);
assertEquals(expected, context(obj));
assertEquals(1, accessorCallCount);
@@ -194,6 +195,7 @@ function TestInlinedSetter(context, obj, value, expected) {
accessorCallCount = 0;
setterArgument = value;
+ %PrepareFunctionForOptimization(context);
assertEquals(expected, context(obj, value));
assertEquals(value, setterValue);
assertEquals(1, accessorCallCount);
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index 13f4a33e7b..4ee823f651 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -46,6 +46,7 @@ A.prototype.Z = function () {
};
var a = new A();
+%PrepareFunctionForOptimization(a.Z);
a.Z(4,5,6);
a.Z(4,5,6);
%OptimizeFunctionOnNextCall(a.Z);
@@ -75,6 +76,7 @@ function F4() {
return F31(1);
}
+%PrepareFunctionForOptimization(F4);
F4(1);
F4(1);
F4(1);
@@ -108,6 +110,7 @@ F4(1);
adapt4to2(1, 2, 3, 4);
}
+ %PrepareFunctionForOptimization(test_adaptation);
test_adaptation();
test_adaptation();
%OptimizeFunctionOnNextCall(test_adaptation);
@@ -146,6 +149,7 @@ function toarr2(marker, a, b, c) {
function test_toarr(toarr) {
var marker = { x: 0 };
+ %PrepareFunctionForOptimization(toarr);
assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
%OptimizeFunctionOnNextCall(toarr);
@@ -175,7 +179,10 @@ test_toarr(toarr2);
return inner(x, y);
}
+ %PrepareFunctionForOptimization(outer);
%OptimizeFunctionOnNextCall(outer);
+ assertEquals(2, outer(1, 2));
+ %PrepareFunctionForOptimization(inner);
%OptimizeFunctionOnNextCall(inner);
assertEquals(2, outer(1, 2));
})();
@@ -197,6 +204,7 @@ test_toarr(toarr2);
return inner(x, y);
}
+ %PrepareFunctionForOptimization(outer);
assertEquals(2, outer(1, 2));
assertEquals(2, outer(1, 2));
assertEquals(2, outer(1, 2));
@@ -242,6 +250,7 @@ test_toarr(toarr2);
}
}
+ %PrepareFunctionForOptimization(outer);
for (var step = 0; step < 4; step++) {
if (outerCount == 1) outer(10);
if (outerCount == 2) outer(10, 11);
@@ -302,6 +311,7 @@ test_toarr(toarr2);
);
}
+ %PrepareFunctionForOptimization(outer);
outer();
outer();
%OptimizeFunctionOnNextCall(outer);
diff --git a/deps/v8/test/mjsunit/compiler/inline-arity-mismatch.js b/deps/v8/test/mjsunit/compiler/inline-arity-mismatch.js
index 4a61fa3a62..1a942d9903 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arity-mismatch.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arity-mismatch.js
@@ -48,13 +48,16 @@ function h2(a, b) {
var o = {x: 2};
+%PrepareFunctionForOptimization(h1);
assertEquals(4, h1(o, o));
assertEquals(4, h1(o, o));
+%OptimizeFunctionOnNextCall(h1);
+assertEquals(4, h1(o, o));
+
+%PrepareFunctionForOptimization(h2);
assertEquals(4, h2(o, o));
assertEquals(4, h2(o, o));
-%OptimizeFunctionOnNextCall(h1);
%OptimizeFunctionOnNextCall(h2);
-assertEquals(4, h1(o, o));
assertEquals(4, h2(o, o));
var u = {y:0, x:1};
diff --git a/deps/v8/test/mjsunit/compiler/inline-closures.js b/deps/v8/test/mjsunit/compiler/inline-closures.js
index 69161e505e..3ee75842fa 100644
--- a/deps/v8/test/mjsunit/compiler/inline-closures.js
+++ b/deps/v8/test/mjsunit/compiler/inline-closures.js
@@ -42,6 +42,7 @@ object.f = mkClosure('g');
object.g = mkClosure('h');
object.h = mkClosure('x');
+%PrepareFunctionForOptimization(object.f);
assertSame(1, object.f({value:1}));
assertSame(2, object.f({value:2}));
%OptimizeFunctionOnNextCall(object.f);
diff --git a/deps/v8/test/mjsunit/compiler/inline-compare.js b/deps/v8/test/mjsunit/compiler/inline-compare.js
index d97dce2e96..2d9c7a2e4c 100644
--- a/deps/v8/test/mjsunit/compiler/inline-compare.js
+++ b/deps/v8/test/mjsunit/compiler/inline-compare.js
@@ -42,6 +42,7 @@ function TestInlineCompare(o) {
}
}
+%PrepareFunctionForOptimization(TestInlineCompare);
var o = {};
o.f = function() { return 0 === 1; };
for (var i = 0; i < 5; i++) TestInlineCompare(o);
diff --git a/deps/v8/test/mjsunit/compiler/inline-construct.js b/deps/v8/test/mjsunit/compiler/inline-construct.js
index 111c0f32ad..4c5c9978d2 100644
--- a/deps/v8/test/mjsunit/compiler/inline-construct.js
+++ b/deps/v8/test/mjsunit/compiler/inline-construct.js
@@ -35,6 +35,7 @@ function TestInlinedConstructor(constructor, closure) {
var noDeopt = { deopt:0 };
var forceDeopt = { /*empty*/ };
+ %PrepareFunctionForOptimization(closure);
result = closure(constructor, 11, noDeopt, counter);
assertEquals(11, result);
assertEquals(1, counter.value);
@@ -119,6 +120,7 @@ function f_too_many(a, b, c) {
var obj = new c_too_many(a, b, c);
return obj.x;
}
+%PrepareFunctionForOptimization(f_too_many);
assertEquals(23, f_too_many(11, 12, 1));
assertEquals(42, f_too_many(23, 19, 1));
%OptimizeFunctionOnNextCall(f_too_many);
@@ -135,6 +137,7 @@ function f_too_few(a) {
var obj = new c_too_few(a);
return obj.x;
}
+%PrepareFunctionForOptimization(f_too_few);
assertEquals(12, f_too_few(11));
assertEquals(24, f_too_few(23));
%OptimizeFunctionOnNextCall(f_too_few);
diff --git a/deps/v8/test/mjsunit/compiler/inline-context-deopt.js b/deps/v8/test/mjsunit/compiler/inline-context-deopt.js
index ef134ad970..4b20719341 100644
--- a/deps/v8/test/mjsunit/compiler/inline-context-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/inline-context-deopt.js
@@ -14,5 +14,6 @@ function bar(s, t) {
return foo(s);
}
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertEquals(13, bar(1, 2));
diff --git a/deps/v8/test/mjsunit/compiler/inline-context-slots.js b/deps/v8/test/mjsunit/compiler/inline-context-slots.js
index d0e907b1e5..a78d28e07b 100644
--- a/deps/v8/test/mjsunit/compiler/inline-context-slots.js
+++ b/deps/v8/test/mjsunit/compiler/inline-context-slots.js
@@ -43,6 +43,7 @@
return this.max();
}
var f = new F();
+ %PrepareFunctionForOptimization(f.run);
for (var i=0; i<5; i++) f.run();
%OptimizeFunctionOnNextCall(f.run);
assertEquals(10, f.run());
diff --git a/deps/v8/test/mjsunit/compiler/inline-exception-1.js b/deps/v8/test/mjsunit/compiler/inline-exception-1.js
index 8da6845c3b..b8b650bb38 100644
--- a/deps/v8/test/mjsunit/compiler/inline-exception-1.js
+++ b/deps/v8/test/mjsunit/compiler/inline-exception-1.js
@@ -30,6 +30,7 @@ function warmUp(f) {
}
function resetOptAndAssertResultEquals(expected, f) {
+ %PrepareFunctionForOptimization(f);
warmUp(f);
resetState();
// %DebugPrint(f);
@@ -39,6 +40,7 @@ function resetOptAndAssertResultEquals(expected, f) {
}
function resetOptAndAssertThrowsWith(expected, f) {
+ %PrepareFunctionForOptimization(f);
warmUp(f);
resetState();
// %DebugPrint(f);
diff --git a/deps/v8/test/mjsunit/compiler/inline-exception-2.js b/deps/v8/test/mjsunit/compiler/inline-exception-2.js
index 6dbc7a478b..945be2de88 100644
--- a/deps/v8/test/mjsunit/compiler/inline-exception-2.js
+++ b/deps/v8/test/mjsunit/compiler/inline-exception-2.js
@@ -30,6 +30,7 @@ function warmUp(f) {
}
function resetOptAndAssertResultEquals(expected, f) {
+ %PrepareFunctionForOptimization(f);
warmUp(f);
resetState();
// %DebugPrint(f);
@@ -39,6 +40,7 @@ function resetOptAndAssertResultEquals(expected, f) {
}
function resetOptAndAssertThrowsWith(expected, f) {
+ %PrepareFunctionForOptimization(f);
warmUp(f);
resetState();
// %DebugPrint(f);
diff --git a/deps/v8/test/mjsunit/compiler/inline-function-apply.js b/deps/v8/test/mjsunit/compiler/inline-function-apply.js
index 12238e1c53..8ccb8165a7 100644
--- a/deps/v8/test/mjsunit/compiler/inline-function-apply.js
+++ b/deps/v8/test/mjsunit/compiler/inline-function-apply.js
@@ -68,6 +68,7 @@
return r;
}
+ %PrepareFunctionForOptimization(A);
assertEquals("ABC", A(1,2));
assertEquals("ABC", A(1,2));
%OptimizeFunctionOnNextCall(A);
diff --git a/deps/v8/test/mjsunit/compiler/inline-global-access.js b/deps/v8/test/mjsunit/compiler/inline-global-access.js
index b52652a764..da6b25e75e 100644
--- a/deps/v8/test/mjsunit/compiler/inline-global-access.js
+++ b/deps/v8/test/mjsunit/compiler/inline-global-access.js
@@ -45,6 +45,7 @@ function TestInlineGlobalLoad(o) {
}
}
+%PrepareFunctionForOptimization(TestInlineGlobalLoad);
var o = {};
o.f = function() { return GLOBAL; };
for (var i = 0; i < 5; i++) TestInlineGlobalLoad(o);
diff --git a/deps/v8/test/mjsunit/compiler/inline-literals.js b/deps/v8/test/mjsunit/compiler/inline-literals.js
index 448799669e..7718ae0112 100644
--- a/deps/v8/test/mjsunit/compiler/inline-literals.js
+++ b/deps/v8/test/mjsunit/compiler/inline-literals.js
@@ -43,6 +43,7 @@ function TestArrayLiteral(a, b, c) {
assertEquals(expected, result, "TestArrayLiteral");
}
+%PrepareFunctionForOptimization(TestArrayLiteral);
TestArrayLiteral(1, 2, 3);
TestArrayLiteral(1, 2, 3);
%OptimizeFunctionOnNextCall(TestArrayLiteral);
@@ -63,6 +64,7 @@ function TestObjectLiteral(a, b, c) {
assertEquals(expected, result, "TestObjectLiteral");
}
+%PrepareFunctionForOptimization(TestObjectLiteral);
TestObjectLiteral(1, 2, 3);
TestObjectLiteral(1, 2, 3);
%OptimizeFunctionOnNextCall(TestObjectLiteral);
@@ -82,6 +84,7 @@ function TestRegExpLiteral(s, x, y, expected) {
assertEquals(expected, result, "TestRegExpLiteral");
}
+%PrepareFunctionForOptimization(TestRegExpLiteral);
TestRegExpLiteral("a-", "reg", "exp", "regexp-");
TestRegExpLiteral("-b", "reg", "exp", "-expreg");
%OptimizeFunctionOnNextCall(TestRegExpLiteral);
@@ -103,6 +106,7 @@ function TestFunctionLiteral(a, b, c, expected) {
assertEquals(expected, result, "TestFunctionLiteral");
}
+%PrepareFunctionForOptimization(TestFunctionLiteral);
TestFunctionLiteral(1, 2, 3, 6);
TestFunctionLiteral(4, 5, 6, 15);
%OptimizeFunctionOnNextCall(TestFunctionLiteral);
diff --git a/deps/v8/test/mjsunit/compiler/inline-omit-arguments-deopt.js b/deps/v8/test/mjsunit/compiler/inline-omit-arguments-deopt.js
index 65bfce715d..8e909bff16 100644
--- a/deps/v8/test/mjsunit/compiler/inline-omit-arguments-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/inline-omit-arguments-deopt.js
@@ -15,5 +15,6 @@ function baz() {
return foo.arguments.length == 1 && foo.arguments[0] == 11;
}
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertEquals(true, bar(12, 14));
diff --git a/deps/v8/test/mjsunit/compiler/inline-omit-arguments-object.js b/deps/v8/test/mjsunit/compiler/inline-omit-arguments-object.js
index 342b78cac7..2154a1ba69 100644
--- a/deps/v8/test/mjsunit/compiler/inline-omit-arguments-object.js
+++ b/deps/v8/test/mjsunit/compiler/inline-omit-arguments-object.js
@@ -10,5 +10,6 @@ function foo(s, t) {
return args.length == 1 && args[0] == 11;
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(true, foo(11));
diff --git a/deps/v8/test/mjsunit/compiler/inline-omit-arguments.js b/deps/v8/test/mjsunit/compiler/inline-omit-arguments.js
index d72e45303b..9b14993064 100644
--- a/deps/v8/test/mjsunit/compiler/inline-omit-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-omit-arguments.js
@@ -8,5 +8,6 @@ var x = 42;
function bar(s, t, u, v) { return x + s; }
function foo(s, t) { return bar(s); }
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(42 + 12, foo(12));
diff --git a/deps/v8/test/mjsunit/compiler/inline-param.js b/deps/v8/test/mjsunit/compiler/inline-param.js
index 8fa80088fe..6c46161189 100644
--- a/deps/v8/test/mjsunit/compiler/inline-param.js
+++ b/deps/v8/test/mjsunit/compiler/inline-param.js
@@ -41,6 +41,7 @@ function TestInlineOneParam(o, p) {
}
}
+%PrepareFunctionForOptimization(TestInlineOneParam);
var obj = {x:42};
var o1 = {};
o1.f = function(o) { return o.x; };
@@ -78,6 +79,7 @@ function TestInlineTwoParams(o, p) {
}
}
+%PrepareFunctionForOptimization(TestInlineTwoParams);
var o2 = {};
o2.h = function(i, j) { return i < j; };
for (var i = 0; i < 5; i++) TestInlineTwoParams(o2, 42);
diff --git a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-deopt.js b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-deopt.js
index dfbdd8d06b..190c686813 100644
--- a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-deopt.js
@@ -16,5 +16,6 @@ function baz() {
foo.arguments[2] == 15;
}
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertEquals(true, bar(12, 14));
diff --git a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-object.js b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-object.js
index fec77af0c0..83bfbf5dd6 100644
--- a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-object.js
+++ b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments-object.js
@@ -13,5 +13,6 @@ function bar(s, t) {
args[2] == 13;
}
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertEquals(true, bar(11, 12));
diff --git a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments.js b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments.js
index c912acaa3f..b75c5df61d 100644
--- a/deps/v8/test/mjsunit/compiler/inline-surplus-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-surplus-arguments.js
@@ -8,5 +8,6 @@ var x = 42;
function foo(s) { return x + s; }
function bar(s, t) { return foo(s, t, 13); }
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertEquals(42 + 12, bar(12));
diff --git a/deps/v8/test/mjsunit/compiler/inline-throw.js b/deps/v8/test/mjsunit/compiler/inline-throw.js
index 099b9d3351..a3e31abdc9 100644
--- a/deps/v8/test/mjsunit/compiler/inline-throw.js
+++ b/deps/v8/test/mjsunit/compiler/inline-throw.js
@@ -42,6 +42,7 @@ function g(x) {
return f(x);
}
+%PrepareFunctionForOptimization(g);
for (var i = 0; i < 5; i++) g(0);
%OptimizeFunctionOnNextCall(g);
assertEquals(true, g(0));
@@ -57,6 +58,7 @@ function h(x) {
return f(x) ? "yes" : "no";
}
+%PrepareFunctionForOptimization(h);
for (var i = 0; i < 5; i++) h(0);
%OptimizeFunctionOnNextCall(h);
assertEquals("yes", h(0));
diff --git a/deps/v8/test/mjsunit/compiler/inline-two.js b/deps/v8/test/mjsunit/compiler/inline-two.js
index 68372a979e..21571091dc 100644
--- a/deps/v8/test/mjsunit/compiler/inline-two.js
+++ b/deps/v8/test/mjsunit/compiler/inline-two.js
@@ -41,6 +41,7 @@ function TestInlineX(o) {
}
}
+%PrepareFunctionForOptimization(TestInlineX);
var o2 = {};
o2.size = function() { return 42; }
o2.g = function() { return this.size(); };
@@ -64,6 +65,7 @@ function TestInlineX2(o) {
}
}
+%PrepareFunctionForOptimization(TestInlineX2);
var obj = {}
obj.foo = function() { return 42; }
var o3 = {};
@@ -89,6 +91,7 @@ function TestInlineFG(o) {
}
}
+%PrepareFunctionForOptimization(TestInlineFG);
var obj = {}
obj.g = function() { return 42; }
var o3 = {};
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js
index 8eb1c308a3..3dfcf03222 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js
@@ -10,6 +10,7 @@ function foo(a) {
var a = new Array(4);
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(a));
assertEquals(undefined, foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js
index 8ae642619e..78e2b53eee 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js
@@ -15,6 +15,7 @@ var a = new Array(4);
var o = {}
o.__defineGetter__(0, function() { return 1; });
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(a));
assertEquals(undefined, foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
index 08cbdbef8c..7fe6cd5535 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
@@ -10,6 +10,7 @@
var x = {};
var a = [x,x,];
+ %PrepareFunctionForOptimization(foo);
assertEquals(x, foo(a));
assertEquals(x, foo(a));
%OptimizeFunctionOnNextCall(foo);
@@ -23,6 +24,7 @@
var x = 0;
var a = [x,x,];
+ %PrepareFunctionForOptimization(foo);
assertEquals(x, foo(a));
assertEquals(x, foo(a));
%OptimizeFunctionOnNextCall(foo);
@@ -36,6 +38,7 @@
var x = 0;
var a = [x,x,x];
+ %PrepareFunctionForOptimization(foo);
assertEquals(x, foo(a));
assertEquals(x, foo(a));
%OptimizeFunctionOnNextCall(foo);
@@ -49,6 +52,7 @@
var x = {};
var a = [x,x,x];
+ %PrepareFunctionForOptimization(foo);
assertEquals(x, foo(a));
assertEquals(x, foo(a));
%OptimizeFunctionOnNextCall(foo);
@@ -61,6 +65,7 @@
var a = [,,];
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(a));
assertEquals(undefined, foo(a));
%OptimizeFunctionOnNextCall(foo);
@@ -75,6 +80,7 @@
var a = [1, 2, 3];
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo(a));
assertEquals(2, foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call-mapcheck.js b/deps/v8/test/mjsunit/compiler/inlined-call-mapcheck.js
index 1f7b2dad87..3a02e701da 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-call-mapcheck.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-call-mapcheck.js
@@ -36,6 +36,7 @@
function g() {}
+ %PrepareFunctionForOptimization(f);
f(g);
f(g);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call.js b/deps/v8/test/mjsunit/compiler/inlined-call.js
index 772dcbee8b..0c8134aa1e 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-call.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-call.js
@@ -69,6 +69,7 @@ function unshiftsArray(num) {
[].unshift.call(array, num);
}
+%PrepareFunctionForOptimization(unshiftsArray);
unshiftsArray(50);
unshiftsArray(60);
%OptimizeFunctionOnNextCall(unshiftsArray);
@@ -93,6 +94,7 @@ function callNoArgs() {
[].fun.call();
}
+%PrepareFunctionForOptimization(callNoArgs);
callNoArgs();
callNoArgs();
assertEquals(this, funRecv);
@@ -109,6 +111,7 @@ function callStrictNoArgs() {
[].funStrict.call();
}
+%PrepareFunctionForOptimization(callStrictNoArgs);
callStrictNoArgs();
callStrictNoArgs();
assertEquals(undefined, funStrictRecv);
@@ -125,6 +128,7 @@ function callManyArgs() {
[].manyArgs.call(0, 1, 2, 3, 4, 5);
}
+%PrepareFunctionForOptimization(callManyArgs);
callManyArgs();
callManyArgs();
%OptimizeFunctionOnNextCall(callManyArgs);
@@ -139,6 +143,7 @@ function callManyArgsSloppy() {
[].manyArgsSloppy.call(null, 1, 2, 3, 4, 5);
}
+%PrepareFunctionForOptimization(callManyArgsSloppy);
callManyArgsSloppy();
callManyArgsSloppy();
%OptimizeFunctionOnNextCall(callManyArgsSloppy);
@@ -154,6 +159,7 @@ function callBuiltinIndirectly() {
return "".charCodeAt.call(str, 3);
}
+%PrepareFunctionForOptimization(callBuiltinIndirectly);
callBuiltinIndirectly();
callBuiltinIndirectly();
%OptimizeFunctionOnNextCall(callBuiltinIndirectly);
@@ -174,12 +180,14 @@ function callInlined(num) {
return callInlineableBuiltinIndirectlyWhileInlined(num);
}
+%PrepareFunctionForOptimization(callInlineableBuiltinIndirectlyWhileInlined);
callInlineableBuiltinIndirectlyWhileInlined(1);
callInlineableBuiltinIndirectlyWhileInlined(2);
%OptimizeFunctionOnNextCall(callInlineableBuiltinIndirectlyWhileInlined);
callInlineableBuiltinIndirectlyWhileInlined(3);
assertOptimized(callInlineableBuiltinIndirectlyWhileInlined);
+%PrepareFunctionForOptimization(callInlined);
callInlined(1);
callInlined(2);
%OptimizeFunctionOnNextCall(callInlined);
diff --git a/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js b/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js
index 49c8899e69..c82fedf280 100644
--- a/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js
+++ b/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js
@@ -13,6 +13,7 @@
return {} instanceof C;
}
+ %PrepareFunctionForOptimization(f);
assertTrue(f());
assertTrue(f());
%OptimizeFunctionOnNextCall(f);
@@ -35,6 +36,7 @@
return f(b, C);
}
+ %PrepareFunctionForOptimization(g);
assertFalse(f(true, Number));
assertFalse(f(true, Number));
assertFalse(g(false));
@@ -57,6 +59,7 @@
return f(b, C);
}
+ %PrepareFunctionForOptimization(g);
assertFalse(f(true, Number));
assertFalse(f(true, Number));
assertFalse(g(false));
@@ -76,6 +79,7 @@
return {} instanceof C;
}
+ %PrepareFunctionForOptimization(f);
assertTrue(f());
assertTrue(f());
%OptimizeFunctionOnNextCall(f);
@@ -97,6 +101,7 @@
return {} instanceof C;
}
+ %PrepareFunctionForOptimization(f);
assertFalse(f());
assertFalse(f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof-opt1.js b/deps/v8/test/mjsunit/compiler/instanceof-opt1.js
index 6ffdfc0ab1..0549222565 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof-opt1.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof-opt1.js
@@ -12,6 +12,7 @@ var Foo = {
// OrdinaryHasInstance call inside Function.prototype[@@hasInstance].
function foo() { return 1 instanceof Foo; }
+%PrepareFunctionForOptimization(foo);
assertEquals(false, foo());
assertEquals(false, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof-opt2.js b/deps/v8/test/mjsunit/compiler/instanceof-opt2.js
index 6f57faf1fc..bddff35535 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof-opt2.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof-opt2.js
@@ -10,6 +10,7 @@ function Foo() {}
// OrdinaryHasInstance call inside Function.prototype[@@hasInstance].
function foo() { return 1 instanceof Foo; }
+%PrepareFunctionForOptimization(foo);
assertEquals(false, foo());
assertEquals(false, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof-opt3.js b/deps/v8/test/mjsunit/compiler/instanceof-opt3.js
index 3317d8acf4..439f39b02c 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof-opt3.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof-opt3.js
@@ -11,6 +11,7 @@ var Foo = Bar.bind(null);
// OrdinaryHasInstance call inside Function.prototype[@@hasInstance].
function foo() { return 1 instanceof Foo; }
+%PrepareFunctionForOptimization(foo);
assertEquals(false, foo());
assertEquals(false, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof.js b/deps/v8/test/mjsunit/compiler/instanceof.js
index 67127cf88e..ad0d2c1786 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof.js
@@ -42,6 +42,7 @@ F.__proto__ = null;
(function() {
function foo(o) { return o instanceof A; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
assertTrue(foo(new A()));
@@ -59,6 +60,7 @@ F.__proto__ = null;
}
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
assertTrue(foo(new A()));
@@ -72,6 +74,7 @@ F.__proto__ = null;
(function() {
function foo(o) { return o instanceof B; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(a));
assertFalse(foo(a));
assertFalse(foo(new A()));
@@ -83,6 +86,7 @@ F.__proto__ = null;
(function() {
function foo(o) { return o instanceof C; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
assertTrue(foo(new A()));
@@ -94,6 +98,7 @@ F.__proto__ = null;
(function() {
function foo(o) { return o instanceof D; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
assertFalse(foo(new A()));
@@ -111,6 +116,7 @@ F.__proto__ = null;
}
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(a));
assertTrue(foo(new A()));
%OptimizeFunctionOnNextCall(foo);
@@ -123,6 +129,7 @@ F.__proto__ = null;
return o instanceof F;
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(a));
assertFalse(foo(new A()));
assertTrue(foo(new F()));
@@ -138,6 +145,7 @@ F.__proto__ = null;
return a instanceof A;
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -155,6 +163,7 @@ F.__proto__ = null;
makeFoo();
const foo = makeFoo();
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(new B));
assertFalse(foo(new A));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof2.js b/deps/v8/test/mjsunit/compiler/instanceof2.js
index ca006e3046..3d5b904243 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof2.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof2.js
@@ -15,6 +15,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -30,6 +31,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -45,6 +47,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -59,6 +62,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -75,6 +79,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -94,6 +99,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -109,6 +115,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -124,6 +131,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -138,6 +146,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -154,6 +163,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -171,6 +181,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -189,6 +200,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -207,6 +219,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -225,6 +238,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
diff --git a/deps/v8/test/mjsunit/compiler/instanceof3.js b/deps/v8/test/mjsunit/compiler/instanceof3.js
index e390c42092..085ad61387 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof3.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof3.js
@@ -15,6 +15,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -30,6 +31,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -45,6 +47,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -59,6 +62,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -75,6 +79,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -94,6 +99,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -109,6 +115,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -124,6 +131,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -138,6 +146,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -154,6 +163,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertTrue(IsGoo(goo));
assertTrue(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -171,6 +181,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -189,6 +200,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
@@ -207,6 +219,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertThrows(_ => IsGoo(goo), TypeError);
assertThrows(_ => IsGoo(goo), TypeError);
%OptimizeFunctionOnNextCall(IsGoo);
@@ -225,6 +238,7 @@
return x instanceof Goo;
}
+ %PrepareFunctionForOptimization(IsGoo);
assertFalse(IsGoo(goo));
assertFalse(IsGoo(goo));
%OptimizeFunctionOnNextCall(IsGoo);
diff --git a/deps/v8/test/mjsunit/compiler/int64.js b/deps/v8/test/mjsunit/compiler/int64.js
index b2c53913da..a69df8dc5d 100644
--- a/deps/v8/test/mjsunit/compiler/int64.js
+++ b/deps/v8/test/mjsunit/compiler/int64.js
@@ -11,6 +11,7 @@
return i + 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0x000000001, foo(false));
assertEquals(0x000000001, foo(false));
assertEquals(0x100000000, foo(true));
@@ -27,6 +28,7 @@
return i + 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0x000000000, foo(false));
assertEquals(0x000000000, foo(false));
assertEquals(0x100000000, foo(true));
@@ -45,6 +47,7 @@
return a[0];
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0x0FFFFFFFF, foo({x:0}));
assertEquals(0x100000000, foo({x:1}));
%OptimizeFunctionOnNextCall(foo);
@@ -57,6 +60,7 @@
return {x: Math.floor((o.x + 11123456789) + -11123456788)}.x;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo({x:0}));
assertEquals(2, foo({x:1}));
%OptimizeFunctionOnNextCall(foo);
@@ -70,6 +74,7 @@
return a[i];
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo([1], 0xFFFFFFFF));
assertEquals(2, foo([2], 0xFFFFFFFF));
%OptimizeFunctionOnNextCall(foo);
@@ -84,6 +89,7 @@
return i;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0xFFFFFFFF));
assertEquals(0, foo(0xFFFFFFFF));
%OptimizeFunctionOnNextCall(foo);
@@ -97,6 +103,7 @@
return dv.getInt8(i, true);
}
+ %PrepareFunctionForOptimization(foo);
const dv = new DataView(new ArrayBuffer(10));
dv.setFloat32(0, 8, true);
dv.setFloat32(4, 9, true);
@@ -117,6 +124,7 @@
return dv.getInt8(i, true);
}
+ %PrepareFunctionForOptimization(foo);
const dv = new DataView(new ArrayBuffer(18));
dv.setFloat64(0, 16, true);
dv.setFloat64(8, 17, true);
diff --git a/deps/v8/test/mjsunit/compiler/integral32-add-sub.js b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
index 16515d3990..4704ba7da8 100644
--- a/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
+++ b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
@@ -11,6 +11,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(-2147483648, foo(0));
assertEquals(0, foo(2147483648));
assertEquals(2147483647, foo(4294967295));
@@ -31,6 +32,7 @@
return x - y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(-2147483648, foo(0));
assertEquals(0, foo(2147483648));
assertEquals(2147483647, foo(4294967295));
@@ -51,6 +53,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2147483648, foo(0));
assertEquals(0, foo(-2147483648));
assertEquals(4294967295, foo(2147483647));
@@ -71,6 +74,7 @@
return x - y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2147483648, foo(0));
assertEquals(0, foo(-2147483648));
assertEquals(4294967295, foo(2147483647));
@@ -91,6 +95,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2147483647, foo(2147483647));
assertEquals(-2147483648, foo(-2147483648));
assertEquals(0, foo(0));
@@ -111,6 +116,7 @@
return y - z;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2147483647, foo(-1));
assertEquals(2147483648, foo(0));
assertEquals(2147483649, foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js b/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
index ff4558e7ef..ce147d72ac 100644
--- a/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
+++ b/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
@@ -32,6 +32,7 @@ function outer() {
function inner() {
return x;
}
+ %PrepareFunctionForOptimization(inner);
inner();
%OptimizeFunctionOnNextCall(inner);
inner();
diff --git a/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js b/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js
index faa5e63239..9b37c5c266 100644
--- a/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js
+++ b/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js
@@ -13,6 +13,7 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(foo);
await foo(1);
await foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/lazy-deopt-in-literal.js b/deps/v8/test/mjsunit/compiler/lazy-deopt-in-literal.js
index 0a1481c571..16ed234fcc 100644
--- a/deps/v8/test/mjsunit/compiler/lazy-deopt-in-literal.js
+++ b/deps/v8/test/mjsunit/compiler/lazy-deopt-in-literal.js
@@ -13,6 +13,7 @@ function fun3() {
return r[113];
}
+%PrepareFunctionForOptimization(fun3);
fun3();
fun3();
%OptimizeFunctionOnNextCall(fun3);
diff --git a/deps/v8/test/mjsunit/compiler/literals-optimized.js b/deps/v8/test/mjsunit/compiler/literals-optimized.js
index 049e21a3a5..4e086e8e24 100644
--- a/deps/v8/test/mjsunit/compiler/literals-optimized.js
+++ b/deps/v8/test/mjsunit/compiler/literals-optimized.js
@@ -30,6 +30,7 @@
// Test optimized versions of array and object literals.
function TestOptimizedLiteral(create, verify) {
+ %PrepareFunctionForOptimization(create);
verify(create(1, 2, 3), 1, 2, 3);
verify(create(3, 5, 7), 3, 5, 7);
%OptimizeFunctionOnNextCall(create);
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-global.js b/deps/v8/test/mjsunit/compiler/load-elimination-global.js
index 9caaa9f718..ed3cd70908 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-global.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-global.js
@@ -171,6 +171,7 @@ function test_store_store() {
}
function test(x, f) {
+ %PrepareFunctionForOptimization(f);
X = true;
assertEquals(x, f());
assertEquals(x, f());
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-osr.js b/deps/v8/test/mjsunit/compiler/load-elimination-osr.js
index a57fe173ee..159c647c6c 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-osr.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-osr.js
@@ -58,6 +58,7 @@ function foo_hot(x, limit) {
return o.y;
}
+%PrepareFunctionForOptimization(foo_hot);
assertEquals(22, foo_hot(11, 1));
assertEquals(24, foo_hot(12, 1));
%OptimizeFunctionOnNextCall(foo_hot);
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-params.js b/deps/v8/test/mjsunit/compiler/load-elimination-params.js
index 13a4a8596d..cdf55b1158 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-params.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-params.js
@@ -43,6 +43,8 @@ function test_params1(a, b) {
return i + j + k + l;
}
+%PrepareFunctionForOptimization(test_params1);
+
assertEquals(14, test_params1(new B(3, 4), new B(4, 5)));
assertEquals(110, test_params1(new B(11, 7), new B(44, 8)));
@@ -63,6 +65,8 @@ function test_params2(a, b) {
return i + j + k + l;
}
+%PrepareFunctionForOptimization(test_params2);
+
assertEquals(14, test_params2(3, 4));
assertEquals(110, test_params2(11, 44));
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination.js b/deps/v8/test/mjsunit/compiler/load-elimination.js
index 9bf8564308..b4b8a187da 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination.js
@@ -141,6 +141,7 @@ function test_store_store() {
}
function test(x, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f());
assertEquals(x, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js b/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js
index 5838a83979..bf2a0922ab 100644
--- a/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js
+++ b/deps/v8/test/mjsunit/compiler/materialize-dictionary-properties.js
@@ -12,6 +12,7 @@ function f() {
return o ? 1 : 0;
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js b/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js
index b6b99afcf4..c8f85aa0ad 100644
--- a/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js
+++ b/deps/v8/test/mjsunit/compiler/materialize-mutable-heap-number.js
@@ -16,6 +16,7 @@ function f() {
return o.x + 0.25;
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/math-ceil.js b/deps/v8/test/mjsunit/compiler/math-ceil.js
index f91348b4d2..8caa184e45 100644
--- a/deps/v8/test/mjsunit/compiler/math-ceil.js
+++ b/deps/v8/test/mjsunit/compiler/math-ceil.js
@@ -14,6 +14,7 @@
return Object.is(-0, Math.ceil(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(0.5));
%OptimizeFunctionOnNextCall(foo);
@@ -31,6 +32,7 @@
return Object.is(NaN, Math.ceil(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(NaN));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/math-floor-global.js b/deps/v8/test/mjsunit/compiler/math-floor-global.js
index 9ee649cb2d..e5761e33e6 100644
--- a/deps/v8/test/mjsunit/compiler/math-floor-global.js
+++ b/deps/v8/test/mjsunit/compiler/math-floor-global.js
@@ -34,6 +34,7 @@ var test_id = 0;
function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return flo(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -59,6 +60,7 @@ function test() {
function ifloor(x) {
return 1 / Math.floor(x);
}
+ %PrepareFunctionForOptimization(ifloor);
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
@@ -155,6 +157,7 @@ function floorsum(i, n) {
}
return ret;
}
+%PrepareFunctionForOptimization(floorsum);
assertEquals(-0, floorsum(1, -0));
%OptimizeFunctionOnNextCall(floorsum);
// The optimized function will deopt. Run it with enough iterations to try
diff --git a/deps/v8/test/mjsunit/compiler/math-floor-local.js b/deps/v8/test/mjsunit/compiler/math-floor-local.js
index 5ebe90b705..7444363d5f 100644
--- a/deps/v8/test/mjsunit/compiler/math-floor-local.js
+++ b/deps/v8/test/mjsunit/compiler/math-floor-local.js
@@ -34,6 +34,7 @@ function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) +
'";var f = Math.floor; return f(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -59,6 +60,7 @@ function test() {
function ifloor(x) {
return 1 / Math.floor(x);
}
+ %PrepareFunctionForOptimization(ifloor);
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
@@ -155,6 +157,7 @@ function floorsum(i, n) {
}
return ret;
}
+%PrepareFunctionForOptimization(floorsum);
assertEquals(-0, floorsum(1, -0));
%OptimizeFunctionOnNextCall(floorsum);
// The optimized function will deopt. Run it with enough iterations to try
diff --git a/deps/v8/test/mjsunit/compiler/math-imul.js b/deps/v8/test/mjsunit/compiler/math-imul.js
index 1de18a6a2d..bb3d61a9d7 100644
--- a/deps/v8/test/mjsunit/compiler/math-imul.js
+++ b/deps/v8/test/mjsunit/compiler/math-imul.js
@@ -8,6 +8,7 @@
(function() {
function foo() { return Math.imul(); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo());
assertEquals(0, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -18,6 +19,7 @@
(function() {
function foo(x) { return Math.imul(x); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(0, foo(2));
%OptimizeFunctionOnNextCall(foo);
@@ -28,11 +30,13 @@
(function() {
function foo(x, y) { return Math.imul(x, y); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(null, 1));
assertEquals(0, foo(2, undefined));
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(null, 1));
assertEquals(0, foo(2, undefined));
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(null, 1));
assertEquals(0, foo(2, undefined));
@@ -43,6 +47,7 @@
(function() {
function foo(x, y) { return Math.imul(x|0, y|0); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1, 1));
assertEquals(2, foo(2, 1));
%OptimizeFunctionOnNextCall(foo);
@@ -55,6 +60,7 @@
(function() {
function foo(x, y) { return Math.imul(x>>>0, y>>>0); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1, 1));
assertEquals(2, foo(2, 1));
%OptimizeFunctionOnNextCall(foo);
@@ -67,6 +73,7 @@
(function() {
function foo(x, y) { return Math.imul(x, y); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1.1, 1.1));
assertEquals(2, foo(2.1, 1.1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/math-max.js b/deps/v8/test/mjsunit/compiler/math-max.js
index 350bdfba88..ce3d43dde2 100644
--- a/deps/v8/test/mjsunit/compiler/math-max.js
+++ b/deps/v8/test/mjsunit/compiler/math-max.js
@@ -14,6 +14,7 @@
return Object.is(-0, Math.max(1, x))
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(-1));
%OptimizeFunctionOnNextCall(foo);
@@ -30,6 +31,7 @@
return Object.is(-0, Math.max(0, x))
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(-1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/math-min.js b/deps/v8/test/mjsunit/compiler/math-min.js
index 882103984d..0fedb493a6 100644
--- a/deps/v8/test/mjsunit/compiler/math-min.js
+++ b/deps/v8/test/mjsunit/compiler/math-min.js
@@ -14,6 +14,7 @@
return Object.is(-0, Math.min(-1, x))
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(-1));
%OptimizeFunctionOnNextCall(foo);
@@ -30,6 +31,7 @@
return Object.is(+0, Math.min(-0, x))
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(-1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/math-mul.js b/deps/v8/test/mjsunit/compiler/math-mul.js
index a391b445fe..0ac620d82c 100644
--- a/deps/v8/test/mjsunit/compiler/math-mul.js
+++ b/deps/v8/test/mjsunit/compiler/math-mul.js
@@ -7,12 +7,14 @@
// For TurboFan, make sure we can eliminate the -0 return value check
// by recognizing a constant value.
function gotaconstant(y) { return 15 * y; }
+%PrepareFunctionForOptimization(gotaconstant);
assertEquals(45, gotaconstant(3));
gotaconstant(3);
%OptimizeFunctionOnNextCall(gotaconstant);
gotaconstant(3);
function gotaconstant_truncated(x, y) { return x * y | 0; }
+%PrepareFunctionForOptimization(gotaconstant_truncated);
assertEquals(45, gotaconstant_truncated(3, 15));
gotaconstant_truncated(3, 15);
%OptimizeFunctionOnNextCall(gotaconstant_truncated);
@@ -20,6 +22,7 @@ gotaconstant_truncated(3, 15);
function test(x, y) { return x * y; }
+%PrepareFunctionForOptimization(test);
assertEquals(12, test(3, 4));
assertEquals(16, test(4, 4));
@@ -39,6 +42,7 @@ assertEquals(SMI_MAX + SMI_MAX + SMI_MAX, test(SMI_MAX, 3));
// Verify that strength reduction will reduce the -0 check quite a bit
// if we have a negative integer constant.
function negtest(y) { return -3 * y; }
+%PrepareFunctionForOptimization(negtest);
assertEquals(-12, negtest(4));
assertEquals(-12, negtest(4));
%OptimizeFunctionOnNextCall(negtest);
diff --git a/deps/v8/test/mjsunit/compiler/math-round.js b/deps/v8/test/mjsunit/compiler/math-round.js
index c42bf8f2a0..c0715c977b 100644
--- a/deps/v8/test/mjsunit/compiler/math-round.js
+++ b/deps/v8/test/mjsunit/compiler/math-round.js
@@ -14,6 +14,7 @@
return Object.is(-0, Math.round(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(0.5));
%OptimizeFunctionOnNextCall(foo);
@@ -31,6 +32,7 @@
return Object.is(NaN, Math.round(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(NaN));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/math-sign.js b/deps/v8/test/mjsunit/compiler/math-sign.js
index 0fff0982df..8923f8ddf2 100644
--- a/deps/v8/test/mjsunit/compiler/math-sign.js
+++ b/deps/v8/test/mjsunit/compiler/math-sign.js
@@ -9,6 +9,7 @@ function signInt32(i) {
return Math.sign(i);
}
+%PrepareFunctionForOptimization(signInt32);
signInt32(0);
signInt32(2);
%OptimizeFunctionOnNextCall(signInt32);
@@ -27,6 +28,7 @@ function signFloat64(i) {
return Math.sign(+i);
}
+%PrepareFunctionForOptimization(signFloat64);
signFloat64(0.1);
signFloat64(-0.1);
%OptimizeFunctionOnNextCall(signFloat64);
diff --git a/deps/v8/test/mjsunit/compiler/math-trunc.js b/deps/v8/test/mjsunit/compiler/math-trunc.js
index e5cc523bc0..955bd5fa2c 100644
--- a/deps/v8/test/mjsunit/compiler/math-trunc.js
+++ b/deps/v8/test/mjsunit/compiler/math-trunc.js
@@ -14,6 +14,7 @@
return Object.is(-0, Math.trunc(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(0.5));
%OptimizeFunctionOnNextCall(foo);
@@ -31,6 +32,7 @@
return Object.is(NaN, Math.trunc(x));
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1.5));
assertTrue(foo(NaN));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/minus-zero.js b/deps/v8/test/mjsunit/compiler/minus-zero.js
index 23639dfcc3..b984bb4dfc 100644
--- a/deps/v8/test/mjsunit/compiler/minus-zero.js
+++ b/deps/v8/test/mjsunit/compiler/minus-zero.js
@@ -31,6 +31,7 @@ function add(x, y) {
return x + y;
}
+%PrepareFunctionForOptimization(add);
assertEquals(0, add(0, 0));
assertEquals(0, add(0, 0));
%OptimizeFunctionOnNextCall(add);
@@ -41,6 +42,7 @@ function testsin() {
assertEquals(-0, Math.sin(-0));
}
+%PrepareFunctionForOptimization(testsin);
testsin();
testsin();
%OptimizeFunctionOnNextCall(testsin);
@@ -51,6 +53,7 @@ function testfloor() {
assertEquals(-0, Math.floor(-0));
}
+%PrepareFunctionForOptimization(testfloor);
testfloor();
testfloor();
%OptimizeFunctionOnNextCall(testfloor);
@@ -63,6 +66,7 @@ function add(a, b) {
return a + b;
}
+%PrepareFunctionForOptimization(add);
assertEquals(1, 1/add(double_one, 0));
assertEquals(1, 1/add(0, double_one));
%OptimizeFunctionOnNextCall(add);
diff --git a/deps/v8/test/mjsunit/compiler/mul-div-52bit.js b/deps/v8/test/mjsunit/compiler/mul-div-52bit.js
index 46a5d05a9f..c74be69978 100644
--- a/deps/v8/test/mjsunit/compiler/mul-div-52bit.js
+++ b/deps/v8/test/mjsunit/compiler/mul-div-52bit.js
@@ -46,6 +46,7 @@ function nonPowerOfTwoDiv(a, b) {
}
function test(fn, a, b, sets) {
+ %PrepareFunctionForOptimization(fn);
const expected = fn(a, b);
fn(1, 2);
fn(0, 0);
diff --git a/deps/v8/test/mjsunit/compiler/multiply-add.js b/deps/v8/test/mjsunit/compiler/multiply-add.js
index 2b4304e845..faac9455c1 100644
--- a/deps/v8/test/mjsunit/compiler/multiply-add.js
+++ b/deps/v8/test/mjsunit/compiler/multiply-add.js
@@ -40,6 +40,7 @@ function h(a, b, c, d) {
return a * b + c * d;
}
+%PrepareFunctionForOptimization(f);
assertEquals(5, f(1, 2, 3));
assertEquals(5, f(1, 2, 3));
%OptimizeFunctionOnNextCall(f);
@@ -47,23 +48,28 @@ assertEquals(5, f(1, 2, 3));
assertEquals("2foo", f(1, 2, "foo"));
assertEquals(5.41, f(1.1, 2.1, 3.1));
assertEquals(5.41, f(1.1, 2.1, 3.1));
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(5.41, f(1.1, 2.1, 3.1));
+%PrepareFunctionForOptimization(g);
assertEquals(7, g(1, 2, 3));
assertEquals(7, g(1, 2, 3));
%OptimizeFunctionOnNextCall(g);
assertEquals(7, g(1, 2, 3));
assertEquals(8.36, g(1.1, 2.2, 3.3));
assertEquals(8.36, g(1.1, 2.2, 3.3));
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
assertEquals(8.36, g(1.1, 2.2, 3.3));
+%PrepareFunctionForOptimization(h);
assertEquals(14, h(1, 2, 3, 4));
assertEquals(14, h(1, 2, 3, 4));
%OptimizeFunctionOnNextCall(h);
assertEquals(14, h(1, 2, 3, 4));
assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
+%PrepareFunctionForOptimization(h);
%OptimizeFunctionOnNextCall(h);
assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
diff --git a/deps/v8/test/mjsunit/compiler/multiply-sub.js b/deps/v8/test/mjsunit/compiler/multiply-sub.js
index 4793181d47..c9a803e951 100644
--- a/deps/v8/test/mjsunit/compiler/multiply-sub.js
+++ b/deps/v8/test/mjsunit/compiler/multiply-sub.js
@@ -40,16 +40,19 @@ function h(a, b, c, d) {
return a * b - c * d;
}
+%PrepareFunctionForOptimization(f);
assertEquals(-5.41, f(1.1, 2.1, 3.1));
assertEquals(-5.41, f(1.1, 2.1, 3.1));
%OptimizeFunctionOnNextCall(f);
assertEquals(-5.41, f(1.1, 2.1, 3.1));
+%PrepareFunctionForOptimization(g);
assertEquals(8.36, g(2.2, 3.3, -1.1));
assertEquals(8.36, g(2.2, 3.3, -1.1));
%OptimizeFunctionOnNextCall(g);
assertEquals(8.36, g(2.2, 3.3, -1.1));
+%PrepareFunctionForOptimization(h);
assertEquals(-1.5, h(1.5, 3.0, 12, 0.5));
assertEquals(-1.5, h(1.5, 3.0, 12, 0.5));
%OptimizeFunctionOnNextCall(h);
diff --git a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
index 7f4db56483..4d7505a968 100644
--- a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
+++ b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt --turbo-inlining
if (isNeverOptimizeLiteMode()) {
print("Warning: skipping test that requires optimization in Lite mode.");
@@ -42,6 +42,7 @@ function f() {
function g() {
f();
}
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/native-context-specialization-string-concat.js b/deps/v8/test/mjsunit/compiler/native-context-specialization-string-concat.js
index 24fe4b1753..0df3835c5b 100644
--- a/deps/v8/test/mjsunit/compiler/native-context-specialization-string-concat.js
+++ b/deps/v8/test/mjsunit/compiler/native-context-specialization-string-concat.js
@@ -27,6 +27,7 @@ function test(arg) {
d = '"' + foo + arg + bar + '"';
}
+%PrepareFunctionForOptimization(test);
test('boo');
%OptimizeFunctionOnNextCall(test);
test('baa');
diff --git a/deps/v8/test/mjsunit/compiler/new-cons-string.js b/deps/v8/test/mjsunit/compiler/new-cons-string.js
index 7f6da7262a..0d5470ddac 100644
--- a/deps/v8/test/mjsunit/compiler/new-cons-string.js
+++ b/deps/v8/test/mjsunit/compiler/new-cons-string.js
@@ -9,6 +9,7 @@
return "abcdefghijklm" + s;
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(isOneByteString(foo("0")));
assertTrue(isOneByteString(foo("0")));
%OptimizeFunctionOnNextCall(foo);
@@ -20,6 +21,7 @@
return s + "abcdefghijklm";
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(isOneByteString(foo("0")));
assertTrue(isOneByteString(foo("0")));
%OptimizeFunctionOnNextCall(foo);
@@ -31,6 +33,7 @@
return "abcdefghijklm" + s;
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(isOneByteString(foo("\u1234")));
assertFalse(isOneByteString(foo("\u1234")));
%OptimizeFunctionOnNextCall(foo);
@@ -42,6 +45,7 @@
return s + "abcdefghijklm";
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(isOneByteString(foo("\u1234")));
assertFalse(isOneByteString(foo("\u1234")));
%OptimizeFunctionOnNextCall(foo);
@@ -53,6 +57,7 @@
return "abcdefghijkl\u1234" + s;
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(isOneByteString(foo("0")));
assertFalse(isOneByteString(foo("0")));
%OptimizeFunctionOnNextCall(foo);
@@ -64,6 +69,7 @@
return s + "abcdefghijkl\u1234";
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(isOneByteString(foo("0")));
assertFalse(isOneByteString(foo("0")));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-abs.js b/deps/v8/test/mjsunit/compiler/number-abs.js
index 9eb8ab5bb5..8e976132cf 100644
--- a/deps/v8/test/mjsunit/compiler/number-abs.js
+++ b/deps/v8/test/mjsunit/compiler/number-abs.js
@@ -13,6 +13,7 @@
return 1 / Math.abs(x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(Infinity, foo(-0));
assertEquals(Infinity, foo(-0));
%OptimizeFunctionOnNextCall(foo);
@@ -26,6 +27,7 @@
return Math.abs(x * -2);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(-1));
assertEquals(4, foo(-2));
%OptimizeFunctionOnNextCall(foo);
@@ -45,6 +47,7 @@
return Math.abs(Math.max(x * -2, 0));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(-1));
assertEquals(4, foo(-2));
%OptimizeFunctionOnNextCall(foo);
@@ -64,6 +67,7 @@
return Math.abs(Math.min(x * -2, 2 ** 32));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(-1));
assertEquals(4, foo(-2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-add.js b/deps/v8/test/mjsunit/compiler/number-add.js
index 61e6495c52..ba5ab172e0 100644
--- a/deps/v8/test/mjsunit/compiler/number-add.js
+++ b/deps/v8/test/mjsunit/compiler/number-add.js
@@ -26,6 +26,7 @@
return foo(x, -1);
}
+ %PrepareFunctionForOptimization(bar);
assertEquals(0, bar(1));
assertEquals(1, bar(2));
%OptimizeFunctionOnNextCall(bar);
@@ -55,6 +56,7 @@
return baz(1) | 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo());
assertEquals(2, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-ceil.js b/deps/v8/test/mjsunit/compiler/number-ceil.js
index ce87cd0fc0..a82dd4cf34 100644
--- a/deps/v8/test/mjsunit/compiler/number-ceil.js
+++ b/deps/v8/test/mjsunit/compiler/number-ceil.js
@@ -10,6 +10,7 @@
return Math.abs(Math.ceil(x * -2));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1));
assertEquals(4, foo(2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
index 33abf6b913..8ca710a5ef 100644
--- a/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
+++ b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --turbo-inlining
// Test that SpeculativeNumberEqual[SignedSmall] properly passes the
// kIdentifyZeros truncation.
@@ -12,6 +12,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
@@ -39,6 +40,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
@@ -61,6 +63,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1, -1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
@@ -88,6 +91,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1, -1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
@@ -110,6 +114,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
@@ -137,6 +142,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0, 1));
assertEquals(1, foo(1, 1));
assertEquals(1, foo(1, 2));
diff --git a/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js b/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js
index edffc9ec53..33a13bd5bc 100644
--- a/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js
@@ -25,6 +25,7 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/number-divide.js b/deps/v8/test/mjsunit/compiler/number-divide.js
index c4cc8fa881..1c7710c1f8 100644
--- a/deps/v8/test/mjsunit/compiler/number-divide.js
+++ b/deps/v8/test/mjsunit/compiler/number-divide.js
@@ -21,6 +21,7 @@
return bar(x) | 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(2));
assertEquals(2, foo(3));
@@ -50,6 +51,7 @@
return bar(x) | 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(2));
assertEquals(2, foo(3));
@@ -68,6 +70,7 @@
function foo(x) { return (x | 0) / 2; }
// Warmup with proper int32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(2));
assertEquals(2, foo(4));
%OptimizeFunctionOnNextCall(foo);
@@ -79,6 +82,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(4, foo(8));
assertOptimized(foo);
@@ -92,6 +96,7 @@
function foo(x, y) { return x / y; }
// Warmup with proper int32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(4, 2));
assertEquals(2, foo(8, 4));
%OptimizeFunctionOnNextCall(foo);
@@ -103,6 +108,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(2, foo(2, 1));
assertOptimized(foo);
@@ -116,6 +122,7 @@
function foo(x, y) { return x / y; }
// Warmup with proper int32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(4, 2));
assertEquals(2, foo(8, 4));
%OptimizeFunctionOnNextCall(foo);
@@ -127,6 +134,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(2, foo(2, 1));
assertOptimized(foo);
@@ -140,6 +148,7 @@
function foo(x, y) { return x / y; }
// Warmup with proper int32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(4, 2));
assertEquals(2, foo(8, 4));
%OptimizeFunctionOnNextCall(foo);
@@ -151,6 +160,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(2, foo(2, 1));
assertOptimized(foo);
@@ -164,6 +174,7 @@
function foo(s) { return s.length / 2; }
// Warmup with proper uint32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo("ab".repeat(1)));
assertEquals(2, foo("ab".repeat(2)));
%OptimizeFunctionOnNextCall(foo);
@@ -175,6 +186,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(4, foo("ab".repeat(4)));
assertOptimized(foo);
@@ -188,6 +200,7 @@
function foo(x, y) { return (x >>> 0) / (y >>> 0); }
// Warmup with proper uint32 divisions.
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(4, 2));
assertEquals(2, foo(8, 4));
%OptimizeFunctionOnNextCall(foo);
@@ -199,6 +212,7 @@
assertUnoptimized(foo);
// Try again with the new feedback, and now it should stay optimized.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(2, foo(2, 1));
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-floor.js b/deps/v8/test/mjsunit/compiler/number-floor.js
index 180b89e559..ede0730ada 100644
--- a/deps/v8/test/mjsunit/compiler/number-floor.js
+++ b/deps/v8/test/mjsunit/compiler/number-floor.js
@@ -10,6 +10,7 @@
return Math.abs(Math.floor(x * -2));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1));
assertEquals(4, foo(2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-isfinite-inl.js b/deps/v8/test/mjsunit/compiler/number-isfinite-inl.js
index 2948fe0953..54e6b2c0f8 100644
--- a/deps/v8/test/mjsunit/compiler/number-isfinite-inl.js
+++ b/deps/v8/test/mjsunit/compiler/number-isfinite-inl.js
@@ -22,6 +22,7 @@ function test(f) {
assertFalse(Number.isFinite(-1 / 0));
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
@@ -48,6 +49,7 @@ function test2(f) {
assertFalse(Number.isFinite(-1 / 0));
}
+%PrepareFunctionForOptimization(test2);
test2();
test2();
%OptimizeFunctionOnNextCall(test2);
diff --git a/deps/v8/test/mjsunit/compiler/number-isfinite.js b/deps/v8/test/mjsunit/compiler/number-isfinite.js
index 03493ce69e..b28be7a3d7 100644
--- a/deps/v8/test/mjsunit/compiler/number-isfinite.js
+++ b/deps/v8/test/mjsunit/compiler/number-isfinite.js
@@ -26,6 +26,7 @@ function f(x) {
return Number.isFinite(+x);
}
+%PrepareFunctionForOptimization(f);
test(f);
test(f);
%OptimizeFunctionOnNextCall(f);
@@ -56,6 +57,7 @@ function f2(x) {
return Number.isFinite(x);
}
+%PrepareFunctionForOptimization(f2);
test2(f2);
test2(f2);
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/compiler/number-isinteger-inl.js b/deps/v8/test/mjsunit/compiler/number-isinteger-inl.js
index 8379fb8147..6f3bbd6e87 100644
--- a/deps/v8/test/mjsunit/compiler/number-isinteger-inl.js
+++ b/deps/v8/test/mjsunit/compiler/number-isinteger-inl.js
@@ -23,6 +23,7 @@ function test() {
assertFalse(Number.isInteger(Number.EPSILON));
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
@@ -48,6 +49,7 @@ function test2() {
assertFalse(Number.isInteger(Number.EPSILON));
}
+%PrepareFunctionForOptimization(test2);
test2();
test2();
%OptimizeFunctionOnNextCall(test2);
diff --git a/deps/v8/test/mjsunit/compiler/number-isinteger.js b/deps/v8/test/mjsunit/compiler/number-isinteger.js
index aae172ea06..b3a45d9094 100644
--- a/deps/v8/test/mjsunit/compiler/number-isinteger.js
+++ b/deps/v8/test/mjsunit/compiler/number-isinteger.js
@@ -27,6 +27,7 @@ function f(x) {
return Number.isInteger(+x);
}
+%PrepareFunctionForOptimization(f);
test(f);
test(f);
%OptimizeFunctionOnNextCall(f);
@@ -56,6 +57,7 @@ function f2(x) {
return Number.isInteger(x);
}
+%PrepareFunctionForOptimization(f2);
test2(f2);
test2(f2);
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/compiler/number-isnan.js b/deps/v8/test/mjsunit/compiler/number-isnan.js
index fb6bb6d741..aaa9acb909 100644
--- a/deps/v8/test/mjsunit/compiler/number-isnan.js
+++ b/deps/v8/test/mjsunit/compiler/number-isnan.js
@@ -22,6 +22,7 @@ function f(x) {
return Number.isNaN(+x);
}
+%PrepareFunctionForOptimization(f);
test(f);
test(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/number-issafeinteger.js b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
index b705e95ed5..26ac38ea04 100644
--- a/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
+++ b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
@@ -43,6 +43,7 @@ function test(f) {
// Check that the NumberIsSafeInteger simplified operator in
// TurboFan does the right thing.
function NumberIsSafeInteger(x) { return Number.isSafeInteger(+x); }
+%PrepareFunctionForOptimization(NumberIsSafeInteger);
test(NumberIsSafeInteger);
test(NumberIsSafeInteger);
%OptimizeFunctionOnNextCall(NumberIsSafeInteger);
@@ -52,6 +53,7 @@ test(NumberIsSafeInteger);
// TurboFan does the right thing as well (i.e. when TurboFan
// is not able to tell statically that the inputs are numbers).
function ObjectIsSafeInteger(x) { return Number.isSafeInteger(x); }
+%PrepareFunctionForOptimization(ObjectIsSafeInteger);
test(ObjectIsSafeInteger);
test(ObjectIsSafeInteger);
%OptimizeFunctionOnNextCall(ObjectIsSafeInteger);
diff --git a/deps/v8/test/mjsunit/compiler/number-max.js b/deps/v8/test/mjsunit/compiler/number-max.js
index 0e9b84fb39..35a29faccf 100644
--- a/deps/v8/test/mjsunit/compiler/number-max.js
+++ b/deps/v8/test/mjsunit/compiler/number-max.js
@@ -11,6 +11,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(2));
assertEquals(1, foo(-1));
%OptimizeFunctionOnNextCall(foo);
@@ -29,6 +30,7 @@
return Math.max(x - 1, x + 1);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(-Math.pow(2, 31) + 1, foo(-Math.pow(2, 31)));
assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-min.js b/deps/v8/test/mjsunit/compiler/number-min.js
index 6c7c62d773..da66026df7 100644
--- a/deps/v8/test/mjsunit/compiler/number-min.js
+++ b/deps/v8/test/mjsunit/compiler/number-min.js
@@ -11,6 +11,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1));
assertEquals(1, foo(2));
%OptimizeFunctionOnNextCall(foo);
@@ -29,6 +30,7 @@
return Math.min(x - 1, x + 1);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(-Math.pow(2, 31) - 1, foo(-Math.pow(2, 31)));
assertEquals(Math.pow(2, 31) - 2, foo(Math.pow(2, 31) - 1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-modulus.js b/deps/v8/test/mjsunit/compiler/number-modulus.js
index 0925aa0da3..e8f86e7fda 100644
--- a/deps/v8/test/mjsunit/compiler/number-modulus.js
+++ b/deps/v8/test/mjsunit/compiler/number-modulus.js
@@ -13,6 +13,7 @@
return (x * -2) % (2 ** 32) === 0;
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(2));
assertFalse(foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-multiply.js b/deps/v8/test/mjsunit/compiler/number-multiply.js
index 5b644974ec..07e1f846ad 100644
--- a/deps/v8/test/mjsunit/compiler/number-multiply.js
+++ b/deps/v8/test/mjsunit/compiler/number-multiply.js
@@ -15,6 +15,7 @@
return Object.is(-0, bar(-1e-308));
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -27,6 +28,7 @@
return 0 * Math.round(x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0.1));
assertEquals(-0, foo(-0.1));
assertEquals(NaN, foo(NaN));
@@ -49,6 +51,7 @@
return Math.min(x * y, 0);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1, 0));
assertEquals(-0, foo(1, -0));
assertEquals(NaN, foo(NaN, -0));
diff --git a/deps/v8/test/mjsunit/compiler/number-round.js b/deps/v8/test/mjsunit/compiler/number-round.js
index 9aec7f7a12..4a9eb9acbf 100644
--- a/deps/v8/test/mjsunit/compiler/number-round.js
+++ b/deps/v8/test/mjsunit/compiler/number-round.js
@@ -10,6 +10,7 @@
return Math.abs(Math.round(x * -2));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1));
assertEquals(4, foo(2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-subtract.js b/deps/v8/test/mjsunit/compiler/number-subtract.js
index cb3e1c7e70..56f028c1a3 100644
--- a/deps/v8/test/mjsunit/compiler/number-subtract.js
+++ b/deps/v8/test/mjsunit/compiler/number-subtract.js
@@ -27,6 +27,7 @@
return baz(42) | 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(0, foo());
assertEquals(0, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-toboolean.js b/deps/v8/test/mjsunit/compiler/number-toboolean.js
index 02b30b3ed6..372031cdd0 100644
--- a/deps/v8/test/mjsunit/compiler/number-toboolean.js
+++ b/deps/v8/test/mjsunit/compiler/number-toboolean.js
@@ -12,6 +12,7 @@
return 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(2));
%OptimizeFunctionOnNextCall(foo);
@@ -33,6 +34,7 @@
return 0;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(-1));
assertEquals(1, foo(-2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/number-trunc.js b/deps/v8/test/mjsunit/compiler/number-trunc.js
index aa7d02c20f..1cc005d558 100644
--- a/deps/v8/test/mjsunit/compiler/number-trunc.js
+++ b/deps/v8/test/mjsunit/compiler/number-trunc.js
@@ -10,6 +10,7 @@
return Math.abs(Math.trunc(x * -2));
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(2, foo(1));
assertEquals(4, foo(2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/object-constructor.js b/deps/v8/test/mjsunit/compiler/object-constructor.js
index 162416fd57..71e51ee2ab 100644
--- a/deps/v8/test/mjsunit/compiler/object-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/object-constructor.js
@@ -14,6 +14,7 @@
function foo(a) {
return Object(a.bar)();
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(new A));
assertEquals(undefined, foo(new A));
%OptimizeFunctionOnNextCall(foo);
@@ -25,6 +26,7 @@
function foo() {
return Object("a");
}
+ %PrepareFunctionForOptimization(foo);
assertEquals('object', typeof foo());
assertEquals('object', typeof foo());
%OptimizeFunctionOnNextCall(foo);
@@ -41,6 +43,7 @@
function foo() {
return new A(1, 2, 3);
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertInstanceof(foo(), Object);
assertInstanceof(foo(), A);
diff --git a/deps/v8/test/mjsunit/compiler/object-create.js b/deps/v8/test/mjsunit/compiler/object-create.js
index bd366fe0b0..253024a675 100644
--- a/deps/v8/test/mjsunit/compiler/object-create.js
+++ b/deps/v8/test/mjsunit/compiler/object-create.js
@@ -6,6 +6,7 @@
(function TestWithNullPrototype() {
function f() { return Object.create(null); }
+ %PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
assertEquals(undefined, f().foo);
@@ -14,6 +15,7 @@
(function TestWithCustomPrototype() {
const x = {foo: 42}; // This must be defined here for context specialization.
function f() { return Object.create(x); }
+ %PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
assertEquals(42, f().foo);
@@ -21,6 +23,7 @@
(function TestWithObjectPrototype() {
function f() { return Object.create(Object.prototype); }
+ %PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
assertEquals("[object Object]", f().toString());
diff --git a/deps/v8/test/mjsunit/compiler/object-getprototypeof.js b/deps/v8/test/mjsunit/compiler/object-getprototypeof.js
index ac172dbeb2..8360a8df91 100644
--- a/deps/v8/test/mjsunit/compiler/object-getprototypeof.js
+++ b/deps/v8/test/mjsunit/compiler/object-getprototypeof.js
@@ -9,6 +9,7 @@ var object = Object.create(prototype);
function foo() { return Object.getPrototypeOf(object); }
+%PrepareFunctionForOptimization(foo);
assertSame(prototype, foo());
assertSame(prototype, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/object-is.js b/deps/v8/test/mjsunit/compiler/object-is.js
index f89b73e9d8..1d7e927894 100644
--- a/deps/v8/test/mjsunit/compiler/object-is.js
+++ b/deps/v8/test/mjsunit/compiler/object-is.js
@@ -6,6 +6,7 @@
(function() {
function foo(o) { return Object.is(o, -0); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(-0));
assertFalse(foo(0));
assertFalse(foo(NaN));
@@ -23,6 +24,7 @@
(function() {
function foo(o) { return Object.is(-0, o); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(-0));
assertFalse(foo(0));
assertFalse(foo(NaN));
@@ -40,6 +42,7 @@
(function() {
function foo(o) { return Object.is(+o, -0); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(-0));
assertFalse(foo(0));
assertFalse(foo(NaN));
@@ -51,6 +54,7 @@
(function() {
function foo(o) { return Object.is(-0, +o); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(-0));
assertFalse(foo(0));
assertFalse(foo(NaN));
@@ -62,6 +66,7 @@
(function() {
function foo(o) { return Object.is(o, NaN); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(0));
assertTrue(foo(NaN));
@@ -79,6 +84,7 @@
(function() {
function foo(o) { return Object.is(NaN, o); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(0));
assertTrue(foo(NaN));
@@ -96,6 +102,7 @@
(function() {
function foo(o) { return Object.is(+o, NaN); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(0));
assertTrue(foo(NaN));
@@ -107,6 +114,7 @@
(function() {
function foo(o) { return Object.is(NaN, +o); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(-0));
assertFalse(foo(0));
assertTrue(foo(NaN));
@@ -118,6 +126,7 @@
(function() {
function foo(o) { return Object.is(`${o}`, "foo"); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo("bar"));
assertTrue(foo("foo"));
%OptimizeFunctionOnNextCall(foo);
@@ -127,6 +136,7 @@
(function() {
function foo(o) { return Object.is(String(o), "foo"); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo("bar"));
assertTrue(foo("foo"));
%OptimizeFunctionOnNextCall(foo);
@@ -136,6 +146,7 @@
(function() {
function foo(o) { return Object.is(o, o); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(-0));
assertTrue(foo(0));
assertTrue(foo(NaN));
@@ -153,6 +164,7 @@
(function() {
function foo(o) { return Object.is(o|0, 0); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(0));
assertTrue(foo(-0));
assertTrue(foo(NaN));
@@ -167,6 +179,7 @@
(function() {
const s = Symbol();
function foo() { return Object.is(s, Symbol()); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/object-isprototypeof.js b/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
index 284a4387d6..d8e3c3e796 100644
--- a/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
+++ b/deps/v8/test/mjsunit/compiler/object-isprototypeof.js
@@ -8,6 +8,7 @@
(function() {
function foo(x, y) { return Object.prototype.isPrototypeOf.call(x, y); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(null, {}));
assertThrows(() => foo(undefined, {}));
assertThrows(() => foo(null, []));
@@ -43,6 +44,7 @@
function foo(x) { return A.prototype.isPrototypeOf(x); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(0));
assertFalse(foo(""));
assertFalse(foo(null));
@@ -74,6 +76,7 @@
function foo() { return A.prototype.isPrototypeOf(0); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -86,6 +89,7 @@
function foo() { return A.prototype.isPrototypeOf(null); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -98,6 +102,7 @@
function foo() { return A.prototype.isPrototypeOf(undefined); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -112,6 +117,7 @@
function foo() { return A.prototype.isPrototypeOf(a); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -124,6 +130,7 @@
function foo() { return A.prototype.isPrototypeOf(a); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -136,6 +143,7 @@
function foo() { return Array.prototype.isPrototypeOf(a); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -146,6 +154,7 @@
function foo() { return Object.prototype.isPrototypeOf(a); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
index eb8df4b50c..843b19775a 100644
--- a/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
@@ -8,6 +8,7 @@ function foo() {
with ({ value:"fooed" }) { return value; }
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals("fooed", foo());
assertOptimized(foo);
@@ -16,6 +17,7 @@ function bar() {
with ({ value:"bared" }) { return value; }
}
+%PrepareFunctionForOptimization(bar);
assertEquals("bared", bar());
%OptimizeFunctionOnNextCall(bar);
assertEquals("bared", bar());
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call.js b/deps/v8/test/mjsunit/compiler/opt-next-call.js
index 2878efefe9..2e92dba26d 100644
--- a/deps/v8/test/mjsunit/compiler/opt-next-call.js
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call.js
@@ -8,6 +8,7 @@ function foo() {
return "fooed";
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals("fooed", foo());
assertOptimized(foo);
@@ -16,6 +17,7 @@ function bar() {
return "bared";
}
+%PrepareFunctionForOptimization(bar);
assertEquals("bared", bar());
%OptimizeFunctionOnNextCall(bar);
assertEquals("bared", bar());
diff --git a/deps/v8/test/mjsunit/compiler/optimize-bitnot.js b/deps/v8/test/mjsunit/compiler/optimize-bitnot.js
index 28315a4fe2..e129f8c069 100644
--- a/deps/v8/test/mjsunit/compiler/optimize-bitnot.js
+++ b/deps/v8/test/mjsunit/compiler/optimize-bitnot.js
@@ -31,6 +31,7 @@ function f(x) {
return ~~x;
}
+%PrepareFunctionForOptimization(f);
f(42);
f(42);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-closures.js b/deps/v8/test/mjsunit/compiler/optimized-closures.js
index 499e4d5e24..48d7816bcf 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-closures.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-closures.js
@@ -40,7 +40,8 @@ function f() {
return 42;
}
return x + y + h(y);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(0);
%OptimizeFunctionOnNextCall(g);
a[i] = g(i);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
index 6e08e4a57f..5144e68768 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
@@ -6,6 +6,7 @@
var a = new Float32Array(1);
function len(a) { return a.length; }
+%PrepareFunctionForOptimization(len);
assertEquals(1, len(a));
assertEquals(1, len(a));
%OptimizeFunctionOnNextCall(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
index 7d48d09c68..116a6246de 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
@@ -6,6 +6,7 @@
var a = new Float64Array(1);
function len(a) { return a.length; }
+%PrepareFunctionForOptimization(len);
assertEquals(1, len(a));
assertEquals(1, len(a));
%OptimizeFunctionOnNextCall(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index ca17ee6a75..5af7caaef2 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -174,6 +174,7 @@ function m(t, deopt) {
function tryFunction(result, mkT, f) {
+ %PrepareFunctionForOptimization(f);
var d = {deopt: false};
assertEquals(result, f(mkT(), d));
assertEquals(result, f(mkT(), d));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-function-calls.js b/deps/v8/test/mjsunit/compiler/optimized-function-calls.js
index c3e69d71f5..8f04538c53 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-function-calls.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-function-calls.js
@@ -48,6 +48,7 @@ delete object.x;
function call_f(o) {
return o.f();
}
+%PrepareFunctionForOptimization(call_f);
for (var i = 0; i < 5; i++) call_f(object);
%OptimizeFunctionOnNextCall(call_f);
call_f(object);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js b/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js
index 242b4be772..76e7c86ad9 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js
@@ -13,5 +13,6 @@ F[Symbol.hasInstance] = function(v) { return true };
Object.setPrototypeOf(F, proto);
function foo(x) { return x instanceof F };
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js b/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js
index 38a35b73f1..603f2d5200 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js
@@ -8,6 +8,7 @@ function F() {}
var f = new F
function foo(x) { return x instanceof F };
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertFalse(foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
index 00bf8d12a4..ba27f1d387 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
@@ -6,6 +6,7 @@
var a = new Int32Array(1);
function len(a) { return a.length; }
+%PrepareFunctionForOptimization(len);
assertEquals(1, len(a));
assertEquals(1, len(a));
%OptimizeFunctionOnNextCall(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
index 3a88ed7d25..3d2f43292b 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
@@ -6,6 +6,7 @@
var a = new Uint32Array(1);
function len(a) { return a.length; }
+%PrepareFunctionForOptimization(len);
assertEquals(1, len(a));
assertEquals(1, len(a));
%OptimizeFunctionOnNextCall(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-with.js b/deps/v8/test/mjsunit/compiler/optimized-with.js
index 9bc8713722..dc0351908b 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-with.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-with.js
@@ -13,6 +13,7 @@
return e
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f({ x:23 }));
assertEquals(42, f({ x:42 }));
assertInstanceof(f(null), TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/osr-sar.js b/deps/v8/test/mjsunit/compiler/osr-sar.js
index 02684f088c..66e8809307 100644
--- a/deps/v8/test/mjsunit/compiler/osr-sar.js
+++ b/deps/v8/test/mjsunit/compiler/osr-sar.js
@@ -39,6 +39,7 @@ function test() {
}
var K3 = 0x80000000;
+ %PrepareFunctionForOptimization(SarShr);
assertEquals(-2, SarShr(K3 | 0));
assertEquals(-2, SarShr(K3 | 0));
%OptimizeFunctionOnNextCall(SarShr);
diff --git a/deps/v8/test/mjsunit/compiler/phi-representations.js b/deps/v8/test/mjsunit/compiler/phi-representations.js
index 6d11bb0d8e..5598d7caf8 100644
--- a/deps/v8/test/mjsunit/compiler/phi-representations.js
+++ b/deps/v8/test/mjsunit/compiler/phi-representations.js
@@ -36,6 +36,7 @@ function ar() {
return (r - r);
}
+%PrepareFunctionForOptimization(ar);
assertEquals(0, ar());
assertEquals(0, ar());
%OptimizeFunctionOnNextCall(ar);
@@ -50,6 +51,7 @@ function ar2() {
return (r - r);
}
+%PrepareFunctionForOptimization(ar2);
assertEquals(0, ar2());
assertEquals(0, ar2());
%OptimizeFunctionOnNextCall(ar2);
diff --git a/deps/v8/test/mjsunit/compiler/pic.js b/deps/v8/test/mjsunit/compiler/pic.js
index f5b136ce91..e85e6ef273 100644
--- a/deps/v8/test/mjsunit/compiler/pic.js
+++ b/deps/v8/test/mjsunit/compiler/pic.js
@@ -47,6 +47,8 @@ function Test(o) {
assertEquals(99, CallF(o));
}
+%PrepareFunctionForOptimization(Test);
+
// Create a bunch of objects with different layouts.
var o1 = { x: 0, y: 1 };
var o2 = { y: 1, x: 0 };
diff --git a/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js b/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js
index e954d50fa8..72d42c318b 100644
--- a/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js
+++ b/deps/v8/test/mjsunit/compiler/polymorphic-symbols.js
@@ -13,6 +13,7 @@
{[symbol]: 3, d: 4}
];
function foo(o) { return o[symbol]; }
+ %PrepareFunctionForOptimization(foo);
for (let i = 0; i < OBJS.length; ++i) {
assertEquals(i, foo(OBJS[i]));
assertEquals(i, foo(OBJS[i]));
@@ -33,6 +34,7 @@
{[symbol]: 3, d: 4}
];
function foo(o) { o[symbol] = o; }
+ %PrepareFunctionForOptimization(foo);
for (let i = 0; i < OBJS.length; ++i) {
foo(OBJS[i]);
foo(OBJS[i]);
diff --git a/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js b/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js
index f4d8cd4e5d..3208e54d5b 100644
--- a/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js
+++ b/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js
@@ -8,6 +8,7 @@
var resolve, value;
(new Promise(r => resolve = r)).then(v => value = v);
function foo() { resolve(1); }
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -19,6 +20,7 @@
var reject, value;
(new Promise((_, r) => reject = r)).catch(v => value = v);
function foo() { reject(1); }
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -29,6 +31,7 @@
(function() {
var value;
function foo(x) { return new Promise((resolve, reject) => resolve(x)); }
+ %PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
@@ -39,6 +42,7 @@
(function() {
var value;
function foo(x) { return new Promise((resolve, reject) => reject(x)); }
+ %PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-constructor.js b/deps/v8/test/mjsunit/compiler/promise-constructor.js
index ee069fbca2..7cbae22705 100644
--- a/deps/v8/test/mjsunit/compiler/promise-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/promise-constructor.js
@@ -17,6 +17,7 @@ failWithMessage = (msg) => %AbortJS(msg);
return {resolve, reject, promise};
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -29,6 +30,7 @@ failWithMessage = (msg) => %AbortJS(msg);
return new Promise(1);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -42,6 +44,7 @@ failWithMessage = (msg) => %AbortJS(msg);
return new Promise(1);
}
+ %PrepareFunctionForOptimization(foo);
let threw;
try {
threw = false;
@@ -105,6 +108,7 @@ failWithMessage = (msg) => %AbortJS(msg);
assertInstanceof(p, Promise);
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -129,6 +133,7 @@ failWithMessage = (msg) => %AbortJS(msg);
assertInstanceof(p, Promise);
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -154,6 +159,7 @@ failWithMessage = (msg) => %AbortJS(msg);
assertInstanceof(p, Promise);
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -183,6 +189,7 @@ failWithMessage = (msg) => %AbortJS(msg);
}
%NeverOptimizeFunction(bar);
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -199,6 +206,7 @@ failWithMessage = (msg) => %AbortJS(msg);
function foo() {
promise = new Promise(bar);
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%NeverOptimizeFunction(bar);
@@ -218,6 +226,7 @@ failWithMessage = (msg) => %AbortJS(msg);
function foo() {
promise = new Promise(bar);
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js
index d3bd0b8543..5dcacc4cd9 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js
@@ -8,6 +8,7 @@ function foo(p) { return p.catch(x => x); }
const a = Promise.resolve(1);
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js
index 0d3f34db28..3281c69519 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js
@@ -8,6 +8,7 @@ function foo(p) { return p.catch(x => x); }
const a = Promise.resolve(1);
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js
index 5aadaada81..2d37f2b225 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js
@@ -21,6 +21,7 @@ class MyPromise extends Promise {
const a = MyPromise.resolve(1);
+%PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js
index eae343fd2e..5d8080b9d3 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js
@@ -6,6 +6,7 @@
(function() {
function foo(p) { return p.catch(); }
+ %PrepareFunctionForOptimization(foo);
foo(Promise.resolve(1));
foo(Promise.resolve(1));
%OptimizeFunctionOnNextCall(foo);
@@ -14,6 +15,7 @@
(function() {
function foo(p) { return p.catch(foo); }
+ %PrepareFunctionForOptimization(foo);
foo(Promise.resolve(1));
foo(Promise.resolve(1));
%OptimizeFunctionOnNextCall(foo);
@@ -22,6 +24,7 @@
(function() {
function foo(p) { return p.catch(foo, undefined); }
+ %PrepareFunctionForOptimization(foo);
foo(Promise.resolve(1));
foo(Promise.resolve(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js
index a6987d446f..ff469d4c89 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js
@@ -8,6 +8,7 @@ function foo(p) { return p.finally(x => x); }
const a = Promise.resolve(1);
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js
index 5bad54a61d..493539ee01 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js
@@ -8,6 +8,7 @@ function foo(p) { return p.finally(x => x); }
const a = Promise.resolve(1);
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js
index ff5657f6cb..7a1fd9c508 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js
@@ -21,6 +21,7 @@ class MyPromise extends Promise {
const a = MyPromise.resolve(1);
+%PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js
index 6060f7b857..64af086e18 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js
@@ -7,6 +7,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p) { return p.finally(); }
+ %PrepareFunctionForOptimization(foo);
foo(p);
foo(p);
%OptimizeFunctionOnNextCall(foo);
@@ -16,6 +17,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p) { return p.finally(x => x); }
+ %PrepareFunctionForOptimization(foo);
foo(p);
foo(p);
%OptimizeFunctionOnNextCall(foo);
@@ -25,6 +27,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p, f) { return p.finally(f); }
+ %PrepareFunctionForOptimization(foo);
foo(p, x => x);
foo(p, x => x);
%OptimizeFunctionOnNextCall(foo);
@@ -34,6 +37,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p, f) { return p.finally(f).finally(f); }
+ %PrepareFunctionForOptimization(foo);
foo(p, x => x);
foo(p, x => x);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-then.js b/deps/v8/test/mjsunit/compiler/promise-prototype-then.js
index caf77708b6..3df23531b1 100644
--- a/deps/v8/test/mjsunit/compiler/promise-prototype-then.js
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-then.js
@@ -7,6 +7,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p) { return p.then(); }
+ %PrepareFunctionForOptimization(foo);
foo(p);
foo(p);
%OptimizeFunctionOnNextCall(foo);
@@ -16,6 +17,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p) { return p.then(x => x); }
+ %PrepareFunctionForOptimization(foo);
foo(p);
foo(p);
%OptimizeFunctionOnNextCall(foo);
@@ -25,6 +27,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p) { return p.then(x => x, y => y); }
+ %PrepareFunctionForOptimization(foo);
foo(p);
foo(p);
%OptimizeFunctionOnNextCall(foo);
@@ -34,6 +37,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p, f) { return p.then(f, f); }
+ %PrepareFunctionForOptimization(foo);
foo(p, x => x);
foo(p, x => x);
%OptimizeFunctionOnNextCall(foo);
@@ -43,6 +47,7 @@
(function() {
const p = Promise.resolve(1);
function foo(p, f) { return p.then(f, f).then(f, f); }
+ %PrepareFunctionForOptimization(foo);
foo(p, x => x);
foo(p, x => x);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
index 7acd891b9b..f01dcaffcd 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -18,6 +18,7 @@
return Promise.resolve(a);
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
%OptimizeFunctionOnNextCall(foo);
@@ -46,6 +47,7 @@
return a;
}
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), Promise);
assertInstanceof(foo(), Promise);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve.js b/deps/v8/test/mjsunit/compiler/promise-resolve.js
index 13cb0fa0a3..5ac84db0e1 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve.js
@@ -10,6 +10,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo() { return Promise.resolve(); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(undefined, foo());
assertFulfilledWith(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -18,6 +19,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return Promise.resolve(x); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(3, foo(3));
assertFulfilledWith(3, foo(3));
%OptimizeFunctionOnNextCall(foo);
@@ -26,6 +28,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x, y) { return Promise.resolve(x, y); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(1, foo(1, 0));
assertFulfilledWith(2, foo(2, 1));
%OptimizeFunctionOnNextCall(foo);
@@ -34,6 +37,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return Promise.resolve({x}); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith({x:1}, foo(1));
assertFulfilledWith({x:2}, foo(2));
%OptimizeFunctionOnNextCall(foo);
@@ -42,6 +46,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return Promise.resolve(Promise.resolve(x)); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(null, foo(null));
assertFulfilledWith('a', foo('a'));
%OptimizeFunctionOnNextCall(foo);
@@ -55,6 +60,7 @@ function assertFulfilledWith(expected, thenable) {
}
};
function foo() { return Promise.resolve(thenable); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(1, foo());
assertFulfilledWith(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -66,6 +72,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo() { return MyPromise.resolve(); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(undefined, foo());
assertFulfilledWith(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -74,6 +81,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return MyPromise.resolve(x); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(3, foo(3));
assertFulfilledWith(3, foo(3));
%OptimizeFunctionOnNextCall(foo);
@@ -82,6 +90,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x, y) { return MyPromise.resolve(x, y); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(1, foo(1, 0));
assertFulfilledWith(2, foo(2, 1));
%OptimizeFunctionOnNextCall(foo);
@@ -90,6 +99,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return MyPromise.resolve({x}); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith({x:1}, foo(1));
assertFulfilledWith({x:2}, foo(2));
%OptimizeFunctionOnNextCall(foo);
@@ -98,6 +108,7 @@ function assertFulfilledWith(expected, thenable) {
(function() {
function foo(x) { return MyPromise.resolve(Promise.resolve(x)); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(null, foo(null));
assertFulfilledWith('a', foo('a'));
%OptimizeFunctionOnNextCall(foo);
@@ -111,6 +122,7 @@ function assertFulfilledWith(expected, thenable) {
}
};
function foo() { return MyPromise.resolve(thenable); }
+ %PrepareFunctionForOptimization(foo);
assertFulfilledWith(1, foo());
assertFulfilledWith(1, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/promise-species.js b/deps/v8/test/mjsunit/compiler/promise-species.js
index f029e3aad5..17497a30e1 100644
--- a/deps/v8/test/mjsunit/compiler/promise-species.js
+++ b/deps/v8/test/mjsunit/compiler/promise-species.js
@@ -10,6 +10,8 @@ function f() {
return new Promise(r => 88).then(x => 88);
}
+%PrepareFunctionForOptimization(f);
+
let y;
y = f();
diff --git a/deps/v8/test/mjsunit/compiler/property-calls.js b/deps/v8/test/mjsunit/compiler/property-calls.js
index ad5ca81bfd..508cf37016 100644
--- a/deps/v8/test/mjsunit/compiler/property-calls.js
+++ b/deps/v8/test/mjsunit/compiler/property-calls.js
@@ -30,6 +30,7 @@
function f(o) { return o.g(); }
function g() { return 42; }
+%PrepareFunctionForOptimization(f);
var object = { };
object.g = g;
for (var i = 0; i < 5; i++) f(object);
diff --git a/deps/v8/test/mjsunit/compiler/property-refs.js b/deps/v8/test/mjsunit/compiler/property-refs.js
index 6f1f19f0a6..09fdca3407 100644
--- a/deps/v8/test/mjsunit/compiler/property-refs.js
+++ b/deps/v8/test/mjsunit/compiler/property-refs.js
@@ -47,6 +47,7 @@ function LoadXY(x, y) {
return Load(object);
}
+%PrepareFunctionForOptimization(LoadXY);
for (var i = 0; i < 5; i++) LoadXY(i, i);
%OptimizeFunctionOnNextCall(LoadXY);
LoadXY(6, 6);
diff --git a/deps/v8/test/mjsunit/compiler/property-static.js b/deps/v8/test/mjsunit/compiler/property-static.js
index 07021340cd..51990af2b7 100644
--- a/deps/v8/test/mjsunit/compiler/property-static.js
+++ b/deps/v8/test/mjsunit/compiler/property-static.js
@@ -46,6 +46,7 @@ Object.prototype.load.call({ A:0, B:0, C:0, D:0, E:0, F:0, property:15 });
return object.load();
}
+ %PrepareFunctionForOptimization(f);
assertSame(1, f(1));
assertSame(2, f(2));
%OptimizeFunctionOnNextCall(f);
@@ -62,6 +63,7 @@ Object.prototype.load.call({ A:0, B:0, C:0, D:0, E:0, F:0, property:15 });
return object.load();
}
+ %PrepareFunctionForOptimization(f);
assertSame(1, f(1));
assertSame(2, f(2));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/property-stores.js b/deps/v8/test/mjsunit/compiler/property-stores.js
index 4ffac07ad0..e593676a61 100644
--- a/deps/v8/test/mjsunit/compiler/property-stores.js
+++ b/deps/v8/test/mjsunit/compiler/property-stores.js
@@ -35,16 +35,19 @@ var obj = {x: 0,
h: function() { this.x = a; }};
var i;
+%PrepareFunctionForOptimization(obj.f);
for (i = 0; i < 5; i++) { obj.f(); }
%OptimizeFunctionOnNextCall(obj.f);
obj.f();
assertEquals(7, obj.x);
+%PrepareFunctionForOptimization(obj.g);
for (i = 0; i < 5; i++) { obj.g(); }
%OptimizeFunctionOnNextCall(obj.g);
obj.g();
assertEquals(43, obj.x);
+%PrepareFunctionForOptimization(obj.h);
for (i = 0; i < 5; i++) { obj.h(); }
%OptimizeFunctionOnNextCall(obj.h);
obj.h();
diff --git a/deps/v8/test/mjsunit/compiler/proto-chain-constant.js b/deps/v8/test/mjsunit/compiler/proto-chain-constant.js
index 0d9e3b0e1e..4eedd4ced9 100644
--- a/deps/v8/test/mjsunit/compiler/proto-chain-constant.js
+++ b/deps/v8/test/mjsunit/compiler/proto-chain-constant.js
@@ -38,6 +38,7 @@ var obj0 = c(obj1, { f0: { value: function() { return 0; }, writable: true }});
function get4(obj) { return obj.f4; }
+%PrepareFunctionForOptimization(get4);
assertEquals(4, get4(obj0)());
assertEquals(4, get4(obj0)());
%OptimizeFunctionOnNextCall(get4);
@@ -47,6 +48,7 @@ assertEquals(5, get4(obj0)());
function get3(obj) { return obj.f3; }
+%PrepareFunctionForOptimization(get3);
assertEquals(3, get3(obj0)());
assertEquals(3, get3(obj0)());
%OptimizeFunctionOnNextCall(get3);
diff --git a/deps/v8/test/mjsunit/compiler/proto-chain-load.js b/deps/v8/test/mjsunit/compiler/proto-chain-load.js
index 60c6431d2b..3454033d26 100644
--- a/deps/v8/test/mjsunit/compiler/proto-chain-load.js
+++ b/deps/v8/test/mjsunit/compiler/proto-chain-load.js
@@ -37,6 +37,7 @@ var obj0 = Object.create(obj1, { f0: {value: 0} });
function get4(obj) { return obj.f4; }
+%PrepareFunctionForOptimization(get4);
assertEquals(4, get4(obj0));
assertEquals(4, get4(obj0));
%OptimizeFunctionOnNextCall(get4);
diff --git a/deps/v8/test/mjsunit/compiler/receiver-conversion.js b/deps/v8/test/mjsunit/compiler/receiver-conversion.js
index c3f807a422..c4f8bf9c4a 100644
--- a/deps/v8/test/mjsunit/compiler/receiver-conversion.js
+++ b/deps/v8/test/mjsunit/compiler/receiver-conversion.js
@@ -10,6 +10,7 @@
var global = this;
function test(outer, inner, check) {
+ %PrepareFunctionForOptimization(outer);
check(outer());
check(outer());
%OptimizeFunctionOnNextCall(outer);
diff --git a/deps/v8/test/mjsunit/compiler/recursive-deopt.js b/deps/v8/test/mjsunit/compiler/recursive-deopt.js
index c921ade65a..6942a1d3a8 100644
--- a/deps/v8/test/mjsunit/compiler/recursive-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/recursive-deopt.js
@@ -35,6 +35,7 @@ function f(n) {
return f(n - 1) << one;
}
+%PrepareFunctionForOptimization(f);
var one = 1;
for (var i = 0; i < 5; i++) assertEquals(1 << 5, f(4));
diff --git a/deps/v8/test/mjsunit/compiler/redundancy-elimination.js b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
index 1e5185adb7..dc01fb4a05 100644
--- a/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
+++ b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
@@ -19,6 +19,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 0));
assertEquals(3, foo([1, 2], 0));
%OptimizeFunctionOnNextCall(foo);
@@ -40,6 +41,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 0));
assertEquals(3, foo([1, 2], 0));
%OptimizeFunctionOnNextCall(foo);
@@ -61,6 +63,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 1));
assertEquals(3, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
@@ -82,6 +85,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 1));
assertEquals(3, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
@@ -97,6 +101,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 0));
%OptimizeFunctionOnNextCall(foo);
@@ -112,6 +117,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 0));
assertEquals(3, foo([1, 2], 0));
%OptimizeFunctionOnNextCall(foo);
@@ -127,6 +133,7 @@
return x + y;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(3, foo([1, 2], 1));
assertEquals(3, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
@@ -142,6 +149,7 @@
return i;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
@@ -162,6 +170,7 @@
return i;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
@@ -182,6 +191,7 @@
return i;
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-apply.js b/deps/v8/test/mjsunit/compiler/reflect-apply.js
index fb8f201a72..4f431f3b28 100644
--- a/deps/v8/test/mjsunit/compiler/reflect-apply.js
+++ b/deps/v8/test/mjsunit/compiler/reflect-apply.js
@@ -10,6 +10,7 @@
function bar() { return this; }
function foo() { return Reflect.apply(bar); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -20,6 +21,7 @@
function bar() { return this; }
function foo() { return Reflect.apply(bar, this); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -30,6 +32,7 @@
function bar() { return this; }
function foo() { return Reflect.apply(bar, this, arguments, this); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(42, foo.call(42));
assertEquals(42, foo.call(42));
%OptimizeFunctionOnNextCall(foo);
@@ -47,6 +50,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -62,6 +66,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -78,6 +83,7 @@
return Reflect.apply(undefined, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -92,6 +98,7 @@
return Reflect.apply(null, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -106,6 +113,7 @@
return Reflect.apply(null, this, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-construct.js b/deps/v8/test/mjsunit/compiler/reflect-construct.js
index fb70ff4412..cf52856ebc 100644
--- a/deps/v8/test/mjsunit/compiler/reflect-construct.js
+++ b/deps/v8/test/mjsunit/compiler/reflect-construct.js
@@ -10,6 +10,7 @@
function A() {}
function foo() { return Reflect.construct(A); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -20,6 +21,7 @@
function A(x) { this.x = x; }
function foo() { return Reflect.construct(A, arguments); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertInstanceof(foo(), A);
assertEquals(1, foo(1).x);
@@ -32,6 +34,7 @@
function A(x) { this.x = x; }
function foo() { return Reflect.construct(A, arguments, A, A); }
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), A);
assertInstanceof(foo(), A);
assertEquals(1, foo(1).x);
@@ -51,6 +54,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -66,6 +70,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -81,6 +86,7 @@
return Reflect.construct(undefined, dummy, undefined);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -95,6 +101,7 @@
return Reflect.construct(undefined, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -109,6 +116,7 @@
return Reflect.construct(null, dummy, null);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -122,6 +130,7 @@
return Reflect.construct(null, dummy);
}
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-get.js b/deps/v8/test/mjsunit/compiler/reflect-get.js
index 0c329e497e..b414ccb2bc 100644
--- a/deps/v8/test/mjsunit/compiler/reflect-get.js
+++ b/deps/v8/test/mjsunit/compiler/reflect-get.js
@@ -9,6 +9,7 @@
"use strict";
function foo() { return Reflect.get(); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -18,6 +19,7 @@
"use strict";
function foo(o) { return Reflect.get(o); }
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo({}));
assertEquals(undefined, foo({}));
%OptimizeFunctionOnNextCall(foo);
@@ -27,6 +29,7 @@
"use strict";
function foo(o) { return Reflect.get(o); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo.bind(undefined, 1));
assertThrows(foo.bind(undefined, undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -45,6 +48,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(10, foo());
assertEquals(10, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -61,6 +65,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js b/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js
index a5ea89140e..5001158bee 100644
--- a/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js
+++ b/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js
@@ -9,6 +9,7 @@ var object = Object.create(prototype);
function foo() { return Reflect.getPrototypeOf(object); }
+%PrepareFunctionForOptimization(foo);
assertSame(prototype, foo());
assertSame(prototype, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/reflect-has.js b/deps/v8/test/mjsunit/compiler/reflect-has.js
index 2f9ee1b66a..955ff26980 100644
--- a/deps/v8/test/mjsunit/compiler/reflect-has.js
+++ b/deps/v8/test/mjsunit/compiler/reflect-has.js
@@ -9,6 +9,7 @@
"use strict";
function foo() { return Reflect.has(); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -18,6 +19,7 @@
"use strict";
function foo(o) { return Reflect.has(o); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo({}));
assertFalse(foo({}));
%OptimizeFunctionOnNextCall(foo);
@@ -27,6 +29,7 @@
"use strict";
function foo(o) { return Reflect.has(o); }
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo.bind(undefined, 1));
assertThrows(foo.bind(undefined, undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -44,6 +47,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
@@ -60,6 +64,7 @@
}
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
assertEquals(1, foo({[Symbol.toPrimitive]() { throw new Error(); }}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-106351.js b/deps/v8/test/mjsunit/compiler/regress-106351.js
index 2a67a055d3..025802962e 100644
--- a/deps/v8/test/mjsunit/compiler/regress-106351.js
+++ b/deps/v8/test/mjsunit/compiler/regress-106351.js
@@ -33,6 +33,7 @@ function test(x) {
assertEquals(0.5, v);
}
+%PrepareFunctionForOptimization(test);
for (var i = 0; i < 5; ++i) test(0.5);
%OptimizeFunctionOnNextCall(test);
test(0.5);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1085.js b/deps/v8/test/mjsunit/compiler/regress-1085.js
index 533cf59c9c..c969ddf644 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1085.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1085.js
@@ -31,6 +31,7 @@
// This test relies on specific type feedback for Math.min.
function f(x) { return 1 / Math.min(1, x); }
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 5; ++i) f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1394.js b/deps/v8/test/mjsunit/compiler/regress-1394.js
index fbf435731f..94bff2745c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1394.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1394.js
@@ -50,6 +50,8 @@ function f(x) {
return ret;
};
+%PrepareFunctionForOptimization(f);
+
for (var i = 0; i < 3; i++) assertEquals(i, f(i));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-177883.js b/deps/v8/test/mjsunit/compiler/regress-177883.js
index d5af584951..6636efa2e8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-177883.js
+++ b/deps/v8/test/mjsunit/compiler/regress-177883.js
@@ -171,6 +171,7 @@
HEAPF32[i] = 1.0;
}
+ %PrepareFunctionForOptimization(__ZNK4Math7frustum10clipstatusERKNS_4bboxE);
__ZNK4Math7frustum10clipstatusERKNS_4bboxE(0, 0);
__ZNK4Math7frustum10clipstatusERKNS_4bboxE(0, 0);
__ZNK4Math7frustum10clipstatusERKNS_4bboxE(0, 0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-3218915.js b/deps/v8/test/mjsunit/compiler/regress-3218915.js
index dfce815afb..abb47dc157 100644
--- a/deps/v8/test/mjsunit/compiler/regress-3218915.js
+++ b/deps/v8/test/mjsunit/compiler/regress-3218915.js
@@ -42,6 +42,7 @@ function observe(x, y) { try {} finally {} return x; }
function test(x) { return observe(this, ((0, side_effect()), x + 1)); }
// Run test enough times to get it optimized.
+%PrepareFunctionForOptimization(test);
for (var i = 0; i < 5; ++i) test(0);
%OptimizeFunctionOnNextCall(test);
test(0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-411262.js b/deps/v8/test/mjsunit/compiler/regress-411262.js
index ffbfe2e823..320e3b7f36 100644
--- a/deps/v8/test/mjsunit/compiler/regress-411262.js
+++ b/deps/v8/test/mjsunit/compiler/regress-411262.js
@@ -33,5 +33,6 @@ function f() {
b.apply(this, arguments);
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-4207.js b/deps/v8/test/mjsunit/compiler/regress-4207.js
index c4ab5a7837..48f26a884b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4207.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4207.js
@@ -5,11 +5,13 @@
// Flags: --allow-natives-syntax
function bar() { return 0/0 && 1; }
+%PrepareFunctionForOptimization(bar);
assertEquals(NaN, bar());
%OptimizeFunctionOnNextCall(bar);
assertEquals(NaN, bar());
function foo() { return 0/0 || 1; }
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-1.js b/deps/v8/test/mjsunit/compiler/regress-4389-1.js
index adb37165db..5f77d54bb7 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-1.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.fround(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-2.js b/deps/v8/test/mjsunit/compiler/regress-4389-2.js
index edfcf7a8f5..2bfc6e64e0 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-2.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.sqrt(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-3.js b/deps/v8/test/mjsunit/compiler/regress-4389-3.js
index f4dbc48670..a4eb335140 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-3.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-3.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.floor(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-4.js b/deps/v8/test/mjsunit/compiler/regress-4389-4.js
index 2b9b1493eb..600cfa8c04 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-4.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-4.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.round(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-5.js b/deps/v8/test/mjsunit/compiler/regress-4389-5.js
index e72a3c38ea..ab373b6a0c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-5.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-5.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.abs(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-6.js b/deps/v8/test/mjsunit/compiler/regress-4389-6.js
index 72a8856525..c51ae9fea3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4389-6.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-6.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo(x) { Math.log(x); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4413-1.js b/deps/v8/test/mjsunit/compiler/regress-4413-1.js
index 569823530f..10ef1f0114 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4413-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4413-1.js
@@ -11,5 +11,6 @@ var foo = (function(stdlib) {
return foo;
})(this);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-4470-1.js b/deps/v8/test/mjsunit/compiler/regress-4470-1.js
index 91d26b7212..f737a3992a 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4470-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4470-1.js
@@ -9,6 +9,7 @@ Foo.prototype.x = 0;
function foo(f) {
f.x = 1;
}
+%PrepareFunctionForOptimization(foo);
foo(new Foo);
foo(new Foo);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-491578.js b/deps/v8/test/mjsunit/compiler/regress-491578.js
index c27570456c..9f915caa48 100644
--- a/deps/v8/test/mjsunit/compiler/regress-491578.js
+++ b/deps/v8/test/mjsunit/compiler/regress-491578.js
@@ -11,5 +11,6 @@ function foo(x) {
f();
}
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-5074.js b/deps/v8/test/mjsunit/compiler/regress-5074.js
index 903b54ad98..25d69ef0de 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5074.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5074.js
@@ -12,6 +12,7 @@ function foo(a, b) {
return x + b;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(2.1, foo(1, 2));
assertEquals(2.1, foo(1, 2));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-5100.js b/deps/v8/test/mjsunit/compiler/regress-5100.js
index 694cd8a75b..07fffd0c2b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5100.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5100.js
@@ -14,6 +14,7 @@ a["undefined"] = "undefined";
(function() {
function f(x) { return a[x]; }
+ %PrepareFunctionForOptimization(f);
assertEquals(0, f(0));
assertEquals(0, f(0));
%OptimizeFunctionOnNextCall(f);
@@ -24,6 +25,7 @@ a["undefined"] = "undefined";
(function() {
function f( x) { return a[x]; }
+ %PrepareFunctionForOptimization(f);
assertEquals(0, f(0));
assertEquals(0, f(0));
%OptimizeFunctionOnNextCall(f);
@@ -34,6 +36,7 @@ a["undefined"] = "undefined";
(function() {
function f( x) { return a[x]; }
+ %PrepareFunctionForOptimization(f);
assertEquals(0, f(0));
assertEquals(0, f(0));
%OptimizeFunctionOnNextCall(f);
@@ -44,6 +47,7 @@ a["undefined"] = "undefined";
(function() {
function f( x) { return a[x]; }
+ %PrepareFunctionForOptimization(f);
assertEquals(0, f(0));
assertEquals(0, f(0));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-5129.js b/deps/v8/test/mjsunit/compiler/regress-5129.js
index 1d100ab34c..001d7fe61d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5129.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5129.js
@@ -11,5 +11,6 @@ function foo($a,$b) {
return ($sub|0) < 0;
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo(0x7fffffff,-1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-5158.js b/deps/v8/test/mjsunit/compiler/regress-5158.js
index ead5f4ed9d..9387807cf8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5158.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5158.js
@@ -9,6 +9,7 @@ function foo(x) {
return (x > 0) ? x : 0 - x;
}
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(-1);
foo(0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-5278.js b/deps/v8/test/mjsunit/compiler/regress-5278.js
index 25b1fb03d5..ea657565a9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5278.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5278.js
@@ -7,6 +7,7 @@
function foo(a, b) {
return a % b;
}
+%PrepareFunctionForOptimization(foo);
foo(2, 1);
foo(2, 1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-5320.js b/deps/v8/test/mjsunit/compiler/regress-5320.js
index e2fa65de31..add57fd5be 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5320.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5320.js
@@ -28,6 +28,7 @@
// Flags: --allow-natives-syntax --opt
function OptimizeTruncatingBinaryOp(func) {
+ %PrepareFunctionForOptimization(func);
func(42, -2);
func(31, undefined);
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/compiler/regress-5538.js b/deps/v8/test/mjsunit/compiler/regress-5538.js
index 7e4c25d3bc..2878b40b21 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5538.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5538.js
@@ -10,6 +10,7 @@
return Number.parseInt(x + 1);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(1));
%OptimizeFunctionOnNextCall(foo);
@@ -22,6 +23,7 @@
return Number.parseInt(x + 1, 0);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(1));
%OptimizeFunctionOnNextCall(foo);
@@ -34,6 +36,7 @@
return Number.parseInt(x + 1, 10);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(1));
%OptimizeFunctionOnNextCall(foo);
@@ -46,6 +49,7 @@
return Number.parseInt(x + 1, undefined);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(1, foo(0));
assertEquals(2, foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-600593.js b/deps/v8/test/mjsunit/compiler/regress-600593.js
index c93f2ab800..463f5bab47 100644
--- a/deps/v8/test/mjsunit/compiler/regress-600593.js
+++ b/deps/v8/test/mjsunit/compiler/regress-600593.js
@@ -15,6 +15,7 @@ function Error() {
return arguments.length;
}
+%PrepareFunctionForOptimization(f);
assertThrows(function() { f(true); });
assertThrows(function() { f(false); });
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-621147.js b/deps/v8/test/mjsunit/compiler/regress-621147.js
index 0a5a221c40..4c25b0c1b6 100644
--- a/deps/v8/test/mjsunit/compiler/regress-621147.js
+++ b/deps/v8/test/mjsunit/compiler/regress-621147.js
@@ -16,6 +16,8 @@ function test3(a) {
a[0] = 1;
}
+%PrepareFunctionForOptimization(test2);
+
test(0);
var smi_array = [1,2];
diff --git a/deps/v8/test/mjsunit/compiler/regress-621423.js b/deps/v8/test/mjsunit/compiler/regress-621423.js
index 962176ffbf..8bc32310ab 100644
--- a/deps/v8/test/mjsunit/compiler/regress-621423.js
+++ b/deps/v8/test/mjsunit/compiler/regress-621423.js
@@ -16,6 +16,7 @@ function f() {
g(a);
}
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-626986.js b/deps/v8/test/mjsunit/compiler/regress-626986.js
index 5e02918423..7ca1d964d9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-626986.js
+++ b/deps/v8/test/mjsunit/compiler/regress-626986.js
@@ -14,6 +14,8 @@ function f(o, x) {
o.f = x;
}
+%PrepareFunctionForOptimization(f);
+
f(o, g);
f(o, g);
f(o, g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-628403.js b/deps/v8/test/mjsunit/compiler/regress-628403.js
index 4096ac32ae..7086d8b51b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-628403.js
+++ b/deps/v8/test/mjsunit/compiler/regress-628403.js
@@ -20,6 +20,7 @@ function f(a) {
}
%NeverOptimizeFunction(g);
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-633497.js b/deps/v8/test/mjsunit/compiler/regress-633497.js
index 8bf358af00..0e3288e02a 100644
--- a/deps/v8/test/mjsunit/compiler/regress-633497.js
+++ b/deps/v8/test/mjsunit/compiler/regress-633497.js
@@ -23,6 +23,7 @@ function f(a) {
return +x;
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-638132.js b/deps/v8/test/mjsunit/compiler/regress-638132.js
index 1b94feb125..b4524926f6 100644
--- a/deps/v8/test/mjsunit/compiler/regress-638132.js
+++ b/deps/v8/test/mjsunit/compiler/regress-638132.js
@@ -19,6 +19,7 @@ function f(b) {
g(1, 2);
g(1, 2);
+%PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-639210.js b/deps/v8/test/mjsunit/compiler/regress-639210.js
index 50303fb9d6..a852a07a44 100644
--- a/deps/v8/test/mjsunit/compiler/regress-639210.js
+++ b/deps/v8/test/mjsunit/compiler/regress-639210.js
@@ -31,8 +31,10 @@ var m = (function m() {
m.init();
+%PrepareFunctionForOptimization(m.load);
%OptimizeFunctionOnNextCall(m.load);
assertEquals(2, m.load());
+%PrepareFunctionForOptimization(m.store);
%OptimizeFunctionOnNextCall(m.store);
assertEquals(0.1, m.store(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-644048.js b/deps/v8/test/mjsunit/compiler/regress-644048.js
index ee2dd6edef..3aeb0e3b5f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-644048.js
+++ b/deps/v8/test/mjsunit/compiler/regress-644048.js
@@ -10,6 +10,7 @@ function foo(x) {
: x) | 0
}
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-664117.js b/deps/v8/test/mjsunit/compiler/regress-664117.js
index cf00591340..3346db4dc8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-664117.js
+++ b/deps/v8/test/mjsunit/compiler/regress-664117.js
@@ -8,6 +8,7 @@ function foo() {
return v.length + 1;
}
+%PrepareFunctionForOptimization(foo);
var v = [];
foo();
v.length = 0xFFFFFFFF;
diff --git a/deps/v8/test/mjsunit/compiler/regress-664490.js b/deps/v8/test/mjsunit/compiler/regress-664490.js
index 94094c7362..69a8bd4af1 100644
--- a/deps/v8/test/mjsunit/compiler/regress-664490.js
+++ b/deps/v8/test/mjsunit/compiler/regress-664490.js
@@ -14,5 +14,6 @@ function f() {
foo(undefined == 0);
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-665680.js b/deps/v8/test/mjsunit/compiler/regress-665680.js
index b014bee9ac..12713da670 100644
--- a/deps/v8/test/mjsunit/compiler/regress-665680.js
+++ b/deps/v8/test/mjsunit/compiler/regress-665680.js
@@ -13,6 +13,7 @@ var invalidAsmFunction = (function() {
}
})();
+%PrepareFunctionForOptimization(invalidAsmFunction);
invalidAsmFunction();
%OptimizeFunctionOnNextCall(invalidAsmFunction);
invalidAsmFunction();
diff --git a/deps/v8/test/mjsunit/compiler/regress-668760.js b/deps/v8/test/mjsunit/compiler/regress-668760.js
index 58294becae..6cb0133088 100644
--- a/deps/v8/test/mjsunit/compiler/regress-668760.js
+++ b/deps/v8/test/mjsunit/compiler/regress-668760.js
@@ -22,6 +22,7 @@ function deopt() {
this.__defineGetter__("o", deopt );
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-671574.js b/deps/v8/test/mjsunit/compiler/regress-671574.js
index fad03f0a28..b9a6861320 100644
--- a/deps/v8/test/mjsunit/compiler/regress-671574.js
+++ b/deps/v8/test/mjsunit/compiler/regress-671574.js
@@ -17,5 +17,6 @@ function f() {
} catch(e) {"Caught: " + e; }
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-675704.js b/deps/v8/test/mjsunit/compiler/regress-675704.js
index 788f92e530..f3c30e33ac 100644
--- a/deps/v8/test/mjsunit/compiler/regress-675704.js
+++ b/deps/v8/test/mjsunit/compiler/regress-675704.js
@@ -20,6 +20,7 @@ function g(x) {
}
}
+%PrepareFunctionForOptimization(g);
g(false);
g(false);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-700883.js b/deps/v8/test/mjsunit/compiler/regress-700883.js
index 41440f3a3f..d9722a26d3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-700883.js
+++ b/deps/v8/test/mjsunit/compiler/regress-700883.js
@@ -18,6 +18,7 @@ function foo(x) {
return min(y, x);
}
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-7121.js b/deps/v8/test/mjsunit/compiler/regress-7121.js
index bdf3133bb8..0cf0dd3a51 100644
--- a/deps/v8/test/mjsunit/compiler/regress-7121.js
+++ b/deps/v8/test/mjsunit/compiler/regress-7121.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function foo() { %_ToLength(42n) }
+%PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-713367.js b/deps/v8/test/mjsunit/compiler/regress-713367.js
index 1bf0a04493..b4a61499d3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-713367.js
+++ b/deps/v8/test/mjsunit/compiler/regress-713367.js
@@ -27,4 +27,5 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
assertThrowsEquals(f, 42);
diff --git a/deps/v8/test/mjsunit/compiler/regress-714483.js b/deps/v8/test/mjsunit/compiler/regress-714483.js
index 7fc8868706..748c28f1bc 100644
--- a/deps/v8/test/mjsunit/compiler/regress-714483.js
+++ b/deps/v8/test/mjsunit/compiler/regress-714483.js
@@ -17,6 +17,7 @@ function foo(o) {
return o.f();
}
+%PrepareFunctionForOptimization(foo);
foo(o1);
try { foo(o2); } catch(e) {}
foo(o1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-715651.js b/deps/v8/test/mjsunit/compiler/regress-715651.js
index a75adc8ae7..32ab1da93d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-715651.js
+++ b/deps/v8/test/mjsunit/compiler/regress-715651.js
@@ -32,6 +32,7 @@ function g(o) {
return o.y;
}
+%PrepareFunctionForOptimization(g);
g(h());
g(h());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-726554.js b/deps/v8/test/mjsunit/compiler/regress-726554.js
index afd81936a5..f5338aa8ac 100644
--- a/deps/v8/test/mjsunit/compiler/regress-726554.js
+++ b/deps/v8/test/mjsunit/compiler/regress-726554.js
@@ -21,6 +21,7 @@ b[1] = 3.5;
h(b, [1073741823, 2147483648, -12]);
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-731495.js b/deps/v8/test/mjsunit/compiler/regress-731495.js
index 7e0d850efb..d6fbfebaff 100644
--- a/deps/v8/test/mjsunit/compiler/regress-731495.js
+++ b/deps/v8/test/mjsunit/compiler/regress-731495.js
@@ -10,6 +10,7 @@ function foo() {
return global;
};
+%PrepareFunctionForOptimization(foo);
assertEquals(foo(), "bar");
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(), "bar");
diff --git a/deps/v8/test/mjsunit/compiler/regress-733181.js b/deps/v8/test/mjsunit/compiler/regress-733181.js
index 0a76ab70ea..fad95ce3dc 100644
--- a/deps/v8/test/mjsunit/compiler/regress-733181.js
+++ b/deps/v8/test/mjsunit/compiler/regress-733181.js
@@ -8,6 +8,7 @@ function l(s) {
return ("xxxxxxxxxxxxxxxxxxxxxxx" + s).toLowerCase();
}
+%PrepareFunctionForOptimization(l);
l("abcd");
l("abcd");
%OptimizeFunctionOnNextCall(l);
@@ -17,6 +18,7 @@ function u(s) {
return ("xxxxxxxxxxxxxxxxxxxxxxx" + s).toUpperCase();
}
+%PrepareFunctionForOptimization(u);
u("abcd");
u("abcd");
%OptimizeFunctionOnNextCall(u);
diff --git a/deps/v8/test/mjsunit/compiler/regress-736567.js b/deps/v8/test/mjsunit/compiler/regress-736567.js
index 84c6dce1ff..c42b569d94 100644
--- a/deps/v8/test/mjsunit/compiler/regress-736567.js
+++ b/deps/v8/test/mjsunit/compiler/regress-736567.js
@@ -15,6 +15,7 @@ function g() {
f(0, "s");
}
+%PrepareFunctionForOptimization(g);
assertThrows(g);
%OptimizeFunctionOnNextCall(g);
assertThrows(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-739902.js b/deps/v8/test/mjsunit/compiler/regress-739902.js
index 8f94995840..cf8c306207 100644
--- a/deps/v8/test/mjsunit/compiler/regress-739902.js
+++ b/deps/v8/test/mjsunit/compiler/regress-739902.js
@@ -11,6 +11,7 @@
var e = 0x41000001;
+ %PrepareFunctionForOptimization(f);
f(e);
%OptimizeFunctionOnNextCall(f);
assertEquals("A", f(e));
@@ -21,6 +22,7 @@
return (x >>> 24) & 0xffff;
};
+ %PrepareFunctionForOptimization(f);
f(1);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-758096.js b/deps/v8/test/mjsunit/compiler/regress-758096.js
index 1ed32c0263..88178cac21 100644
--- a/deps/v8/test/mjsunit/compiler/regress-758096.js
+++ b/deps/v8/test/mjsunit/compiler/regress-758096.js
@@ -16,6 +16,7 @@
return obj.f();
}
+ %PrepareFunctionForOptimization(f);
f(x);
f(y);
f(x);
@@ -45,6 +46,7 @@
return fg() + a;
}
+ %PrepareFunctionForOptimization(h);
h(0);
h(0);
h(1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-758983.js b/deps/v8/test/mjsunit/compiler/regress-758983.js
index 45899c110b..93627e3dfd 100644
--- a/deps/v8/test/mjsunit/compiler/regress-758983.js
+++ b/deps/v8/test/mjsunit/compiler/regress-758983.js
@@ -13,6 +13,7 @@ function f(b) {
return r < 0;
}
+%PrepareFunctionForOptimization(f);
f(true);
f(true);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-761892.js b/deps/v8/test/mjsunit/compiler/regress-761892.js
index 5423c59c04..d4ce1223bb 100644
--- a/deps/v8/test/mjsunit/compiler/regress-761892.js
+++ b/deps/v8/test/mjsunit/compiler/regress-761892.js
@@ -9,6 +9,7 @@ function f(x) {
1.1!=(x||x0)
}
+%PrepareFunctionForOptimization(f);
f(1.1);
f(1.1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-762057.js b/deps/v8/test/mjsunit/compiler/regress-762057.js
index 4b5cab6ef5..41942f9616 100644
--- a/deps/v8/test/mjsunit/compiler/regress-762057.js
+++ b/deps/v8/test/mjsunit/compiler/regress-762057.js
@@ -14,6 +14,7 @@ function* foo() {
}
}
+%PrepareFunctionForOptimization(foo);
let gaga = foo();
gaga.next();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-772420.js b/deps/v8/test/mjsunit/compiler/regress-772420.js
index 4b58b10909..53ca7fbe65 100644
--- a/deps/v8/test/mjsunit/compiler/regress-772420.js
+++ b/deps/v8/test/mjsunit/compiler/regress-772420.js
@@ -22,6 +22,7 @@ function foo(arg) {
return value * undefined;
}
+%PrepareFunctionForOptimization(foo);
foo(3);
foo(3);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-772872.js b/deps/v8/test/mjsunit/compiler/regress-772872.js
index 345ace82bd..1e8d824ffd 100644
--- a/deps/v8/test/mjsunit/compiler/regress-772872.js
+++ b/deps/v8/test/mjsunit/compiler/regress-772872.js
@@ -7,6 +7,7 @@
function f() {
for (var x = 10; x > 5; x -= 16) {}
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-773954.js b/deps/v8/test/mjsunit/compiler/regress-773954.js
index b78a499d37..ad1d667d5d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-773954.js
+++ b/deps/v8/test/mjsunit/compiler/regress-773954.js
@@ -13,6 +13,7 @@ function f(o) {
return 5 + o.x++;
}
+%PrepareFunctionForOptimization(f);
try {
f(a);
f(b);
diff --git a/deps/v8/test/mjsunit/compiler/regress-780658.js b/deps/v8/test/mjsunit/compiler/regress-780658.js
index 57fdbbabed..8eade77e83 100644
--- a/deps/v8/test/mjsunit/compiler/regress-780658.js
+++ b/deps/v8/test/mjsunit/compiler/regress-780658.js
@@ -19,6 +19,7 @@ function with_tagged(x) {
return get1(l);
}
+%PrepareFunctionForOptimization(with_double);
with_double(.5);
with_tagged({});
with_double(.6);
diff --git a/deps/v8/test/mjsunit/compiler/regress-786521.js b/deps/v8/test/mjsunit/compiler/regress-786521.js
index 2b161270ed..ae354e364b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-786521.js
+++ b/deps/v8/test/mjsunit/compiler/regress-786521.js
@@ -17,6 +17,7 @@ inlined(true, 1);
inlined(true, 2);
inlined(false, 1);
+%PrepareFunctionForOptimization(foo);
function foo(b) { inlined(b, "") }
foo(false); foo(false);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-788539.js b/deps/v8/test/mjsunit/compiler/regress-788539.js
index 889090cdd9..b0caa2f827 100644
--- a/deps/v8/test/mjsunit/compiler/regress-788539.js
+++ b/deps/v8/test/mjsunit/compiler/regress-788539.js
@@ -31,6 +31,7 @@ function f3(a) {
f2(new C().bar.call(), Object(), String);
}
+%PrepareFunctionForOptimization(f3);
f3(new Array(1));
f3(new Array(1));
%OptimizeFunctionOnNextCall(f3);
diff --git a/deps/v8/test/mjsunit/compiler/regress-793863.js b/deps/v8/test/mjsunit/compiler/regress-793863.js
index 883805dff6..a27888ec80 100644
--- a/deps/v8/test/mjsunit/compiler/regress-793863.js
+++ b/deps/v8/test/mjsunit/compiler/regress-793863.js
@@ -8,5 +8,6 @@ function f(a) {
return arguments[0];
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(undefined, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-796041.js b/deps/v8/test/mjsunit/compiler/regress-796041.js
index e2c2e11c0b..ac1d428b7c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-796041.js
+++ b/deps/v8/test/mjsunit/compiler/regress-796041.js
@@ -29,6 +29,7 @@ function g(abort, a, b) {
return f(abort, "abc", a, b);
}
+%PrepareFunctionForOptimization(g);
g(true); g(true); g(true); g(true);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-797596.js b/deps/v8/test/mjsunit/compiler/regress-797596.js
index 4e3594bdb1..54ac741d0b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-797596.js
+++ b/deps/v8/test/mjsunit/compiler/regress-797596.js
@@ -8,6 +8,7 @@ function inferReceiverMapsInDeadCode() {
var obj = { func() {} };
gc();
function wrappedCode() { try { code(); } catch (e) {} }
+ %PrepareFunctionForOptimization(wrappedCode);
function code() {
obj.a;
try {
diff --git a/deps/v8/test/mjsunit/compiler/regress-799263.js b/deps/v8/test/mjsunit/compiler/regress-799263.js
index b6b1165329..a3c94a7e0a 100644
--- a/deps/v8/test/mjsunit/compiler/regress-799263.js
+++ b/deps/v8/test/mjsunit/compiler/regress-799263.js
@@ -16,6 +16,8 @@ function opt(a, b) {
b[0] = 9.431092e-317;
}
+%PrepareFunctionForOptimization(opt);
+
let arr1 = new Array(1);
arr1[0] = 'a';
opt(arr1, [0]);
diff --git a/deps/v8/test/mjsunit/compiler/regress-801097.js b/deps/v8/test/mjsunit/compiler/regress-801097.js
index d488ce4deb..655393a961 100644
--- a/deps/v8/test/mjsunit/compiler/regress-801097.js
+++ b/deps/v8/test/mjsunit/compiler/regress-801097.js
@@ -14,6 +14,7 @@ function GetFunction() {
}
var func = GetFunction();
+%PrepareFunctionForOptimization(func);
assertThrows("func();");
%OptimizeFunctionOnNextCall(func);
assertThrows("func()");
diff --git a/deps/v8/test/mjsunit/compiler/regress-817225.js b/deps/v8/test/mjsunit/compiler/regress-817225.js
index 22f0375605..9a2d67ef7b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-817225.js
+++ b/deps/v8/test/mjsunit/compiler/regress-817225.js
@@ -22,6 +22,7 @@ inlined();
function optimized(abort, a, b) {
return inlined(abort, "abc", a, b);
}
+%PrepareFunctionForOptimization(optimized);
optimized(true);
%OptimizeFunctionOnNextCall(optimized);
optimized();
diff --git a/deps/v8/test/mjsunit/compiler/regress-8380.js b/deps/v8/test/mjsunit/compiler/regress-8380.js
index d0bf28571e..f82518532d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-8380.js
+++ b/deps/v8/test/mjsunit/compiler/regress-8380.js
@@ -13,6 +13,7 @@ function reduceLHS() {
}
}
+%PrepareFunctionForOptimization(reduceLHS);
reduceLHS();
%OptimizeFunctionOnNextCall(reduceLHS);
reduceLHS();
@@ -27,6 +28,7 @@ function reduceRHS() {
}
}
+%PrepareFunctionForOptimization(reduceRHS);
reduceRHS();
%OptimizeFunctionOnNextCall(reduceRHS);
reduceRHS();
diff --git a/deps/v8/test/mjsunit/compiler/regress-841117.js b/deps/v8/test/mjsunit/compiler/regress-841117.js
index a059922a6e..313bd3983c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-841117.js
+++ b/deps/v8/test/mjsunit/compiler/regress-841117.js
@@ -6,6 +6,7 @@
var v = 1e9;
function f() { return Math.floor(v / 10); }
+%PrepareFunctionForOptimization(f);
assertEquals(1e8, f());
%OptimizeFunctionOnNextCall(f);
assertEquals(1e8, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-884052.js b/deps/v8/test/mjsunit/compiler/regress-884052.js
index babfcc3cea..937f97649a 100644
--- a/deps/v8/test/mjsunit/compiler/regress-884052.js
+++ b/deps/v8/test/mjsunit/compiler/regress-884052.js
@@ -11,6 +11,7 @@ function foo() {
}
}
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-888923.js b/deps/v8/test/mjsunit/compiler/regress-888923.js
index e352673b7d..5d3074d5a6 100644
--- a/deps/v8/test/mjsunit/compiler/regress-888923.js
+++ b/deps/v8/test/mjsunit/compiler/regress-888923.js
@@ -11,6 +11,7 @@
return o.y.a;
}
+ %PrepareFunctionForOptimization(f);
f({ x : 0, y : { a : 1 } });
f({ x : 0, y : { a : 2 } });
%OptimizeFunctionOnNextCall(f);
@@ -24,6 +25,7 @@
return o.x + a;
}
+ %PrepareFunctionForOptimization(f);
f({ x : 42, y : 21 });
f({ x : 42, y : 21 });
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-890620.js b/deps/v8/test/mjsunit/compiler/regress-890620.js
index f5fc7f4f65..77237774c3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-890620.js
+++ b/deps/v8/test/mjsunit/compiler/regress-890620.js
@@ -19,6 +19,7 @@ function f() {
g();
}
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-895799.js b/deps/v8/test/mjsunit/compiler/regress-895799.js
index 4305b7427b..c45a85f77d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-895799.js
+++ b/deps/v8/test/mjsunit/compiler/regress-895799.js
@@ -19,6 +19,8 @@ class A extends C {
}
}
+%PrepareFunctionForOptimization(A);
+
var D = new Proxy(A, { get() { %DeoptimizeFunction(A); } });
try { Reflect.construct(A, [], D); } catch(e) {}
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555-2.js b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
index 5852c6dd43..f7e2a728c2 100644
--- a/deps/v8/test/mjsunit/compiler/regress-905555-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
@@ -10,6 +10,7 @@ function boom(value) {
return global;
}
+%PrepareFunctionForOptimization(boom);
assertEquals(1, boom());
assertEquals(1, boom());
%OptimizeFunctionOnNextCall(boom, "concurrent");
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555.js b/deps/v8/test/mjsunit/compiler/regress-905555.js
index bc7ba7428e..72ccf9aa1d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-905555.js
+++ b/deps/v8/test/mjsunit/compiler/regress-905555.js
@@ -10,6 +10,7 @@ function boom(value) {
return global;
}
+%PrepareFunctionForOptimization(boom);
assertEquals(1, boom());
assertEquals(1, boom());
%OptimizeFunctionOnNextCall(boom, "concurrent");
diff --git a/deps/v8/test/mjsunit/compiler/regress-910838.js b/deps/v8/test/mjsunit/compiler/regress-910838.js
index 6e62a453e0..e67126735d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-910838.js
+++ b/deps/v8/test/mjsunit/compiler/regress-910838.js
@@ -14,6 +14,7 @@ function g(b, x) {
return f(b, 'abc', x);
}
+%PrepareFunctionForOptimization(g);
f(false, 0, 0);
g(true, 0);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-913232.js b/deps/v8/test/mjsunit/compiler/regress-913232.js
index efd7fb8e5f..46acd817b3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-913232.js
+++ b/deps/v8/test/mjsunit/compiler/regress-913232.js
@@ -10,5 +10,6 @@ function* E(b) {
}
}
+%PrepareFunctionForOptimization(E);
%OptimizeFunctionOnNextCall(E);
E();
diff --git a/deps/v8/test/mjsunit/compiler/regress-924151.js b/deps/v8/test/mjsunit/compiler/regress-924151.js
new file mode 100644
index 0000000000..6454a71032
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-924151.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(code) {
+ try {
+ if (typeof code === 'function') {
+ +Symbol();
+ } else {
+ eval();
+ }
+ } catch (e) {
+ return;
+ }
+ dummy();
+}
+
+function f() {
+ g(g);
+}
+
+try { g(); } catch(e) {; }
+
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-932392.js b/deps/v8/test/mjsunit/compiler/regress-932392.js
new file mode 100644
index 0000000000..c98686fca4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-932392.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(flag){
+ ((flag||(Math.max(-0,0)))==0)
+}
+
+%PrepareFunctionForOptimization(opt);
+try{opt(false)}catch{}
+%OptimizeFunctionOnNextCall(opt)
+try{opt(false)}catch{}
diff --git a/deps/v8/test/mjsunit/compiler/regress-934175.js b/deps/v8/test/mjsunit/compiler/regress-934175.js
new file mode 100644
index 0000000000..af115862c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-934175.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function ShortcutEmptyStringAddRight() {
+ let ar = new Float32Array(1);
+ function opt(i){
+ return ar[i] + (NaN ? 0 : '');
+ }
+ %PrepareFunctionForOptimization(opt);
+ ar[0] = 42;
+ opt(1);
+ %OptimizeFunctionOnNextCall(opt);
+ assertEquals("42", opt(0));
+})();
+
+(function ShortcutiEmptyStringAddLeft() {
+ let ar = new Float32Array(1);
+ function opt(i){
+ return (NaN ? 0 : '') + ar[i];
+ }
+ %PrepareFunctionForOptimization(opt);
+ ar[0] = 42;
+ opt(1);
+ %OptimizeFunctionOnNextCall(opt);
+ assertEquals("42", opt(0));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-944062-1.js b/deps/v8/test/mjsunit/compiler/regress-944062-1.js
new file mode 100644
index 0000000000..268999c881
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-944062-1.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const array = [42, 2.1]; // non-stable map (PACKED_DOUBLE)
+let b = false;
+
+function f() {
+ if (b) array[100000] = 4.2; // go to dictionary mode
+ return 42
+};
+%NeverOptimizeFunction(f);
+
+function includes() {
+ return array.includes(f());
+}
+
+assertTrue(includes());
+assertTrue(includes());
+%OptimizeFunctionOnNextCall(includes);
+assertTrue(includes());
+b = true;
+assertTrue(includes());
diff --git a/deps/v8/test/mjsunit/compiler/regress-944062-2.js b/deps/v8/test/mjsunit/compiler/regress-944062-2.js
new file mode 100644
index 0000000000..89f06b5452
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-944062-2.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function includes(key, array) {
+ // Transition to dictionary mode in the final invocation.
+ array.__defineSetter__(key, () => {});
+ // Will then read OOB.
+ return array.includes(1234);
+}
+includes("", []);
+includes("", []);
+%OptimizeFunctionOnNextCall(includes);
+includes("", []);
+includes("1235", []);
diff --git a/deps/v8/test/mjsunit/compiler/regress-arguments.js b/deps/v8/test/mjsunit/compiler/regress-arguments.js
index d32b435ff3..70ce05f628 100644
--- a/deps/v8/test/mjsunit/compiler/regress-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/regress-arguments.js
@@ -37,14 +37,19 @@ function h() { return f.apply(void 0, arguments); }
var foo = 42;
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+%PrepareFunctionForOptimization(g);
for (var i = 0; i < 3; i++) assertEquals(42, g());
%OptimizeFunctionOnNextCall(g);
-%OptimizeFunctionOnNextCall(f);
assertEquals(42, g());
+%PrepareFunctionForOptimization(h);
for (var i = 0; i < 3; i++) assertEquals(42, h());
%OptimizeFunctionOnNextCall(h);
-%OptimizeFunctionOnNextCall(f);
assertEquals(42, h());
var G1 = 21;
@@ -58,7 +63,7 @@ function u() {
Number.prototype.foo = 42;
delete Number.prototype.foo;
+%PrepareFunctionForOptimization(u);
for (var i = 0; i < 3; i++) assertEquals(void 0, u());
%OptimizeFunctionOnNextCall(u);
-%OptimizeFunctionOnNextCall(f);
assertEquals(void 0, u());
diff --git a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
index d95d128a9d..2fdbbc3bce 100644
--- a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
+++ b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
@@ -47,8 +47,9 @@ function makeTagInfoJSON(n) {
return a;
}
-var expr = '([' + makeTagInfoJSON(128).join(', ') + '])'
+var expr = '([' + makeTagInfoJSON(128).join(', ') + '])';
+%PrepareFunctionForOptimization(withEval);
for (var n = 0; n < 5; n++) {
withEval(expr, function(a) { return a; });
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-compare-negate.js b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
index e18d6a0f0e..ac420c9514 100644
--- a/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
+++ b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
@@ -11,6 +11,7 @@ function CompareNegate(a,b) {
return a < (sub|0);
}
+%PrepareFunctionForOptimization(CompareNegate);
var x = CompareNegate(1,0x80000000);
%OptimizeFunctionOnNextCall(CompareNegate);
CompareNegate(1,0x80000000);
diff --git a/deps/v8/test/mjsunit/compiler/regress-const.js b/deps/v8/test/mjsunit/compiler/regress-const.js
index 5099c2f629..1c89368e6f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-const.js
+++ b/deps/v8/test/mjsunit/compiler/regress-const.js
@@ -41,15 +41,20 @@ function g() {
return x;
}
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 5; i++) {
f();
- g();
}
-
%OptimizeFunctionOnNextCall(f);
-%OptimizeFunctionOnNextCall(g);
assertEquals(1, f());
+
+%PrepareFunctionForOptimization(g);
+for (var i = 0; i < 5; i++) {
+ g();
+}
+%OptimizeFunctionOnNextCall(g);
+
assertEquals(42, g());
@@ -59,6 +64,8 @@ function h(a, b) {
return r + X;
}
+%PrepareFunctionForOptimization(h);
+
for (var i = 0; i < 5; i++) h(1,2);
%OptimizeFunctionOnNextCall(h);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-540593.js b/deps/v8/test/mjsunit/compiler/regress-crbug-540593.js
index ec68e85771..46c40cfa32 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-540593.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-540593.js
@@ -10,5 +10,6 @@ var __f_2 = (function(stdlib) {
function __f_2() { return __v_3(); }
return __f_2;
})(this);
+%PrepareFunctionForOptimization(__f_2);
%OptimizeFunctionOnNextCall(__f_2);
__f_2();
diff --git a/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js b/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js
index 097a20bc41..df5b61195a 100644
--- a/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js
+++ b/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js
@@ -7,6 +7,7 @@
function g() { if (false) throw 0; }
function f() { g(); }
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js b/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js
index 01f3e13466..255a70a2a1 100644
--- a/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js
+++ b/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js
@@ -9,6 +9,7 @@ function f(apply) {
apply(function bogeyman() { value = 42 });
return value;
}
+%PrepareFunctionForOptimization(f);
function apply(fun) { fun() }
assertEquals(42, f(apply));
assertEquals(42, f(apply));
diff --git a/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js b/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js
index 834da290e0..71badcb362 100644
--- a/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js
+++ b/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js
@@ -19,5 +19,6 @@ var f = (function () {
return f;
})();
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f(0, -1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-funarguments.js b/deps/v8/test/mjsunit/compiler/regress-funarguments.js
index c913bd9521..98c45aa916 100644
--- a/deps/v8/test/mjsunit/compiler/regress-funarguments.js
+++ b/deps/v8/test/mjsunit/compiler/regress-funarguments.js
@@ -62,16 +62,26 @@ function hej(x) {
return o.g(x, "z");
}
-function opt() {
+function opt_g() {
+ %PrepareFunctionForOptimization(o.g);
for (var k=0; k<2; k++) {
for (var i=0; i<5; i++) o.g(i, "g");
- for (var j=0; j<5; j++) hej(j);
}
%OptimizeFunctionOnNextCall(o.g);
+ o.g(0, "g");
+}
+
+function opt_hej() {
+ %PrepareFunctionForOptimization(hej);
+ for (var k=0; k<2; k++) {
+ for (var j=0; j<5; j++) hej(j);
+ }
%OptimizeFunctionOnNextCall(hej);
+ hej(0)
}
-opt();
+opt_g();
+opt_hej();
assertArrayEquals([0, "g"], o.g(0, "g"));
assertArrayEquals([1, "f"], o.g(1, "g"));
assertArrayEquals([0, "h"], hej(0));
@@ -79,7 +89,8 @@ assertArrayEquals([1, "f"], hej(1));
o = new B();
-opt();
+opt_g();
+opt_hej();
assertArrayEquals([0, "f"], o.g(0, "g"));
assertArrayEquals([1, "g"], o.g(1, "g"));
assertArrayEquals([0, "f"], hej(0));
diff --git a/deps/v8/test/mjsunit/compiler/regress-funcaller.js b/deps/v8/test/mjsunit/compiler/regress-funcaller.js
index 5c2a59720b..1cb7f962af 100644
--- a/deps/v8/test/mjsunit/compiler/regress-funcaller.js
+++ b/deps/v8/test/mjsunit/compiler/regress-funcaller.js
@@ -42,6 +42,7 @@ A.prototype.g = gee;
var o = new A();
+%PrepareFunctionForOptimization(o.g);
for (var i=0; i<5; i++) {
o.g(i);
}
@@ -56,6 +57,7 @@ function hej(x) {
return o.g(x);
}
+%PrepareFunctionForOptimization(hej);
for (var j=0; j<5; j++) {
hej(j);
}
@@ -70,6 +72,7 @@ function from_eval(x) {
return o.g(x);
}
+%PrepareFunctionForOptimization(from_eval);
for (var j=0; j<5; j++) {
from_eval(j);
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-gvn.js b/deps/v8/test/mjsunit/compiler/regress-gvn.js
index 7055e34924..cafb778d38 100644
--- a/deps/v8/test/mjsunit/compiler/regress-gvn.js
+++ b/deps/v8/test/mjsunit/compiler/regress-gvn.js
@@ -37,6 +37,8 @@ function test(a) {
return a[0];
}
+%PrepareFunctionForOptimization(test);
+
var a = new Array();
var n = 100;
diff --git a/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js b/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
index 33655346a8..1c391bb3ab 100644
--- a/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
+++ b/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
@@ -38,6 +38,7 @@ function main(func) {
}
}
+%PrepareFunctionForOptimization(main);
main(o.g);
main(o.g);
main(o.g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-intoverflow.js b/deps/v8/test/mjsunit/compiler/regress-intoverflow.js
index 063a376148..6c0388be07 100644
--- a/deps/v8/test/mjsunit/compiler/regress-intoverflow.js
+++ b/deps/v8/test/mjsunit/compiler/regress-intoverflow.js
@@ -36,6 +36,7 @@ function testMul(a, b) {
}
}
+%PrepareFunctionForOptimization(testMul);
for (var i=0; i<5; i++) testMul(0,0);
%OptimizeFunctionOnNextCall(testMul);
assertEquals(4611686018427388000, testMul(-0x40000000, -0x40000000));
@@ -48,6 +49,7 @@ function testAdd(a, b) {
}
}
+%PrepareFunctionForOptimization(testAdd);
for (var i=0; i<5; i++) testAdd(0,0);
%OptimizeFunctionOnNextCall(testAdd);
assertEquals(-4294967296, testAdd(-0x40000000, -0x40000000));
@@ -62,6 +64,7 @@ function testSub(a, b) {
}
}
+%PrepareFunctionForOptimization(testSub);
for (var i=0; i<5; i++) testSub(0,0);
%OptimizeFunctionOnNextCall(testSub);
assertEquals(-2147483650, testSub(-0x40000000, 1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-lazy-deopt.js b/deps/v8/test/mjsunit/compiler/regress-lazy-deopt.js
index 766220763b..a0d777fb34 100644
--- a/deps/v8/test/mjsunit/compiler/regress-lazy-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/regress-lazy-deopt.js
@@ -41,6 +41,7 @@ function f(x, y) {
return x >> a[0];
}
+%PrepareFunctionForOptimization(f);
f(42);
f(42);
assertEquals(42, f(42));
diff --git a/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js b/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js
index dca6d5bace..304e9a95d8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js
+++ b/deps/v8/test/mjsunit/compiler/regress-lbranch-double.js
@@ -34,6 +34,7 @@ function foo() {
return Math.sqrt(2.6415) ? 88 : 99;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(88, foo());
assertEquals(88, foo());
%OptimizeFunctionOnNextCall(foo)
diff --git a/deps/v8/test/mjsunit/compiler/regress-loadfield.js b/deps/v8/test/mjsunit/compiler/regress-loadfield.js
index a3da156e3b..f8a9891da8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-loadfield.js
+++ b/deps/v8/test/mjsunit/compiler/regress-loadfield.js
@@ -46,6 +46,8 @@ function test(a) {
}
}
+%PrepareFunctionForOptimization(test);
+
// Create an object with fast backing store properties.
var a = {};
a.p1 = "";
diff --git a/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js b/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js
index e16eba8c5a..43f06bd0c6 100644
--- a/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js
+++ b/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js
@@ -8,6 +8,7 @@ function f(a) {
return Math.sign(+a) < 2;
}
+%PrepareFunctionForOptimization(f);
f(NaN);
f(NaN);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js b/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js
index 368c837163..0b618a1003 100644
--- a/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js
+++ b/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js
@@ -8,6 +8,7 @@ var a = [, 2.121736758e-314];
function foo() { return a[1]; }
+%PrepareFunctionForOptimization(foo);
assertEquals(2.121736758e-314, foo());
assertEquals(2.121736758e-314, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-or.js b/deps/v8/test/mjsunit/compiler/regress-or.js
index 939f2c3ffa..e65a5546d9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-or.js
+++ b/deps/v8/test/mjsunit/compiler/regress-or.js
@@ -38,6 +38,7 @@ function f1(x) {
function g1() { try { return 1; } finally {} }
+%PrepareFunctionForOptimization(f1);
for (var i = 0; i < 5; i++) f1(42);
%OptimizeFunctionOnNextCall(f1);
@@ -55,6 +56,7 @@ function f2(x) {
function g2() { try { return 0; } finally {} }
+%PrepareFunctionForOptimization(f2);
for (var i = 0; i < 5; i++) f2(42);
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/compiler/regress-rep-change.js b/deps/v8/test/mjsunit/compiler/regress-rep-change.js
index c8a0983c44..a0a0add1b9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-rep-change.js
+++ b/deps/v8/test/mjsunit/compiler/regress-rep-change.js
@@ -37,6 +37,8 @@ function test(start) {
for (var i = start; i < 10; i++) { }
}
+%PrepareFunctionForOptimization(test);
+
var n = 3;
for (var i = 0; i < n; ++i) {
diff --git a/deps/v8/test/mjsunit/compiler/regress-shared-deopt.js b/deps/v8/test/mjsunit/compiler/regress-shared-deopt.js
index 669e0e2f1d..f1479bac9e 100644
--- a/deps/v8/test/mjsunit/compiler/regress-shared-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/regress-shared-deopt.js
@@ -47,11 +47,13 @@ function test() {
}
}
+ %PrepareFunctionForOptimization(f4);
f4(9);
f4(11);
%OptimizeFunctionOnNextCall(f4);
f4(12);
+ %PrepareFunctionForOptimization(f5);
f5(9);
f5(11);
%OptimizeFunctionOnNextCall(f5);
diff --git a/deps/v8/test/mjsunit/compiler/regress-shift-left.js b/deps/v8/test/mjsunit/compiler/regress-shift-left.js
index 110e899729..ce9dde4e91 100644
--- a/deps/v8/test/mjsunit/compiler/regress-shift-left.js
+++ b/deps/v8/test/mjsunit/compiler/regress-shift-left.js
@@ -14,6 +14,7 @@
return 1 << tmp1;
}
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(512, f());
})();
@@ -36,6 +37,7 @@
return f;
})();
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(512, f());
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-shift-right-logical.js b/deps/v8/test/mjsunit/compiler/regress-shift-right-logical.js
index f2be2ad52f..41ea33ae33 100644
--- a/deps/v8/test/mjsunit/compiler/regress-shift-right-logical.js
+++ b/deps/v8/test/mjsunit/compiler/regress-shift-right-logical.js
@@ -14,6 +14,7 @@
return 1 >>> tmp1;
}
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f());
})();
@@ -36,6 +37,7 @@
return f;
})();
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f());
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-shift-right.js b/deps/v8/test/mjsunit/compiler/regress-shift-right.js
index 71bcb21f0e..45ee9ef77c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-shift-right.js
+++ b/deps/v8/test/mjsunit/compiler/regress-shift-right.js
@@ -14,6 +14,7 @@
return 1 >> tmp1;
}
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f());
})();
@@ -36,6 +37,7 @@
return f;
})();
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(0, f());
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-stacktrace-methods.js b/deps/v8/test/mjsunit/compiler/regress-stacktrace-methods.js
index 4d2872793d..ff3fac7f7e 100644
--- a/deps/v8/test/mjsunit/compiler/regress-stacktrace-methods.js
+++ b/deps/v8/test/mjsunit/compiler/regress-stacktrace-methods.js
@@ -41,10 +41,16 @@ var o = new Hest();
var s = new Svin();
var v = 0;
+%PrepareFunctionForOptimization(Hest.prototype.one);
for (var i = 0; i < 5; i++) {
o.one(s);
}
%OptimizeFunctionOnNextCall(Hest.prototype.one);
+o.one(s);
+%PrepareFunctionForOptimization(Hest.prototype.three);
+for (var i = 0; i < 5; i++) {
+ o.one(s);
+}
%OptimizeFunctionOnNextCall(Hest.prototype.three);
o.one(s);
@@ -65,5 +71,5 @@ try {
assertTrue(stack.indexOf("38:56") != -1);
assertTrue(stack.indexOf("34:51") != -1);
assertTrue(stack.indexOf("36:38") != -1);
- assertTrue(stack.indexOf("54:5") != -1);
+ assertTrue(stack.indexOf("60:5") != -1);
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js b/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js
index 81231984e0..2daca73bbf 100644
--- a/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js
+++ b/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js
@@ -19,6 +19,7 @@
var a = [,0.1];
+ %PrepareFunctionForOptimization(g);
g(f64, a, 1);
g(f64, a, 1);
%OptimizeFunctionOnNextCall(g);
@@ -35,6 +36,8 @@
}
var a=[,0.1];
+
+ %PrepareFunctionForOptimization(g);
g(a, 1);
g(a, 1);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
index 0d524d20fd..4ba83a2774 100644
--- a/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
+++ b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
@@ -9,6 +9,7 @@ function f(x) {
return 1 + Number(s);
}
+%PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-toint32.js b/deps/v8/test/mjsunit/compiler/regress-toint32.js
index 75892d4775..9840a33036 100644
--- a/deps/v8/test/mjsunit/compiler/regress-toint32.js
+++ b/deps/v8/test/mjsunit/compiler/regress-toint32.js
@@ -38,6 +38,7 @@ function f(x) {
return v;
}
+%PrepareFunctionForOptimization(f);
assertEquals(G, f(G));
assertEquals(G, f(G));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js b/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js
index 1dc3042ea7..7cae7bcaef 100644
--- a/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js
+++ b/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js
@@ -13,6 +13,7 @@ function g(a, b) {
return +a;
}
+%PrepareFunctionForOptimization(g);
g(0);
g(0);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-5573.js b/deps/v8/test/mjsunit/compiler/regress-v8-5573.js
index 216b791a71..2ac503dbf4 100644
--- a/deps/v8/test/mjsunit/compiler/regress-v8-5573.js
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-5573.js
@@ -11,5 +11,6 @@ function f() {
return !global;
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertTrue(f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-5756.js b/deps/v8/test/mjsunit/compiler/regress-v8-5756.js
index b7bfcda52b..f84b6f0179 100644
--- a/deps/v8/test/mjsunit/compiler/regress-v8-5756.js
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-5756.js
@@ -22,6 +22,7 @@ function k() {
}
function f1() {
+ %PrepareFunctionForOptimization(k);
z.toString = k;
z.toString();
z.toString();
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-6077.js b/deps/v8/test/mjsunit/compiler/regress-v8-6077.js
index 0e469882cf..f46b2dba7b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-v8-6077.js
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-6077.js
@@ -63,6 +63,7 @@ for (var i = 0; i < f32.length; i++) {
s += i;
}
+%PrepareFunctionForOptimization(foo);
foo(f32, 0);
foo(f32, 0);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-6631.js b/deps/v8/test/mjsunit/compiler/regress-v8-6631.js
index 58fe360f05..a28594a1ba 100644
--- a/deps/v8/test/mjsunit/compiler/regress-v8-6631.js
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-6631.js
@@ -16,6 +16,7 @@ function f() {
return g(h({}))
};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-variable-liveness-let.js b/deps/v8/test/mjsunit/compiler/regress-variable-liveness-let.js
index 4c6b6936e5..81b55f7a2f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-variable-liveness-let.js
+++ b/deps/v8/test/mjsunit/compiler/regress-variable-liveness-let.js
@@ -11,5 +11,6 @@ function f() {
let x = 23;
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-variable-liveness.js b/deps/v8/test/mjsunit/compiler/regress-variable-liveness.js
index e18741d96e..086275c2f5 100644
--- a/deps/v8/test/mjsunit/compiler/regress-variable-liveness.js
+++ b/deps/v8/test/mjsunit/compiler/regress-variable-liveness.js
@@ -17,6 +17,7 @@ function run() {
}
}
+%PrepareFunctionForOptimization(run);
assertEquals(void 0, run());
%OptimizeFunctionOnNextCall(run);
assertEquals(void 0, run());
diff --git a/deps/v8/test/mjsunit/compiler/regresss-933331.js b/deps/v8/test/mjsunit/compiler/regresss-933331.js
new file mode 100644
index 0000000000..a4af3a08f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regresss-933331.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(r, flag){
+ var x;
+ for(let i = 0; i < 2; i++){
+ r[2] = 0;
+ x = r[0] << (flag ? r[0] : flag)
+ }
+ return x;
+}
+
+ar = [3.1];
+%PrepareFunctionForOptimization(opt);
+opt(ar,1);
+opt(ar,1);
+%OptimizeFunctionOnNextCall(opt);
+assertEquals(24, opt(ar,1));
diff --git a/deps/v8/test/mjsunit/compiler/rest-parameters.js b/deps/v8/test/mjsunit/compiler/rest-parameters.js
index 45b8cb34df..425dc1e13a 100644
--- a/deps/v8/test/mjsunit/compiler/rest-parameters.js
+++ b/deps/v8/test/mjsunit/compiler/rest-parameters.js
@@ -14,6 +14,8 @@ function test(...rest) {
return [rest, f.apply(null, rest)];
}
+%PrepareFunctionForOptimization(test);
+
assertEquals(test(), [[], NaN]);
assertEquals(test(1), [[1], NaN])
assertEquals(test(1, 2), [[1,2], 3]);
@@ -37,6 +39,8 @@ function test(a, ...rest) {
return [rest, a, f.apply(null, rest)];
}
+%PrepareFunctionForOptimization(test);
+
assertEquals(test(), [[], undefined, NaN]);
assertEquals(test(1), [[], 1, NaN]);
assertEquals(test(1, 2), [[2], 1, NaN]);
diff --git a/deps/v8/test/mjsunit/compiler/rotate.js b/deps/v8/test/mjsunit/compiler/rotate.js
index 1c81e496ea..b49f4f9751 100644
--- a/deps/v8/test/mjsunit/compiler/rotate.js
+++ b/deps/v8/test/mjsunit/compiler/rotate.js
@@ -64,27 +64,32 @@ function ROR4(x, sa) {
return (x << (sa)) | (x >>> (32 - sa));
}
+%PrepareFunctionForOptimization(ROR);
assertEquals(1 << ((2 % 32)), ROR(1, 30));
assertEquals(1 << ((2 % 32)), ROR(1, 30));
%OptimizeFunctionOnNextCall(ROR);
assertEquals(1 << ((2 % 32)), ROR(1, 30));
+%PrepareFunctionForOptimization(ROR1);
assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
%OptimizeFunctionOnNextCall(ROR1);
assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
+%PrepareFunctionForOptimization(ROR1);
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
%OptimizeFunctionOnNextCall(ROR1);
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
+%PrepareFunctionForOptimization(ROR1);
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
%OptimizeFunctionOnNextCall(ROR1);
assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR1);
assertEquals(0xFFFFFFFF | 0, ROR1(0xFFFFFFFF, i));
assertEquals(0xFFFFFFFF | 0, ROR1(0xFFFFFFFF, i));
%OptimizeFunctionOnNextCall(ROR1);
@@ -92,6 +97,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR1);
assertEquals(-1, ROR1(-1, i));
assertEquals(-1, ROR1(-1, i));
%OptimizeFunctionOnNextCall(ROR1);
@@ -99,6 +105,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR1);
assertEquals(1 << (32 - (i % 32)), ROR1(1, i));
assertEquals(1 << (32 - (i % 32)), ROR1(1, i));
%OptimizeFunctionOnNextCall(ROR1);
@@ -106,6 +113,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR1);
assertEquals(1 << (32 - (i % 32)), ROR1(1.4, i));
assertEquals(1 << (32 - (i % 32)), ROR1(1.4, i));
%OptimizeFunctionOnNextCall(ROR1);
@@ -114,22 +122,26 @@ for (var i = 0; i <= 100; i++) {
+%PrepareFunctionForOptimization(ROR2);
assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
%OptimizeFunctionOnNextCall(ROR2);
assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
+%PrepareFunctionForOptimization(ROR2);
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
%OptimizeFunctionOnNextCall(ROR2);
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
+%PrepareFunctionForOptimization(ROR2);
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
%OptimizeFunctionOnNextCall(ROR2);
assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR2);
assertEquals(0xFFFFFFFF | 0, ROR2(0xFFFFFFFF, i));
assertEquals(0xFFFFFFFF | 0, ROR2(0xFFFFFFFF, i));
%OptimizeFunctionOnNextCall(ROR2);
@@ -137,6 +149,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR2);
assertEquals(-1, ROR2(-1, i));
assertEquals(-1, ROR2(-1, i));
%OptimizeFunctionOnNextCall(ROR2);
@@ -144,28 +157,33 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR2);
assertEquals(1 << ((i % 32)), ROR2(1, i));
assertEquals(1 << ((i % 32)), ROR2(1, i));
%OptimizeFunctionOnNextCall(ROR2);
assertEquals(1 << ((i % 32)), ROR2(1, i));
}
+%PrepareFunctionForOptimization(ROR3);
assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
%OptimizeFunctionOnNextCall(ROR3);
assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
+%PrepareFunctionForOptimization(ROR3);
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
%OptimizeFunctionOnNextCall(ROR3);
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
+%PrepareFunctionForOptimization(ROR3);
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
%OptimizeFunctionOnNextCall(ROR3);
assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR3);
assertEquals(0xFFFFFFFF | 0, ROR3(0xFFFFFFFF, i));
assertEquals(0xFFFFFFFF | 0, ROR3(0xFFFFFFFF, i));
%OptimizeFunctionOnNextCall(ROR3);
@@ -173,6 +191,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR3);
assertEquals(-1, ROR3(-1, i));
assertEquals(-1, ROR3(-1, i));
%OptimizeFunctionOnNextCall(ROR3);
@@ -180,28 +199,33 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR3);
assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
%OptimizeFunctionOnNextCall(ROR3);
assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
}
+%PrepareFunctionForOptimization(ROR4);
assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
%OptimizeFunctionOnNextCall(ROR4);
assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
+%PrepareFunctionForOptimization(ROR4);
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
%OptimizeFunctionOnNextCall(ROR4);
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
+%PrepareFunctionForOptimization(ROR4);
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
%OptimizeFunctionOnNextCall(ROR4);
assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR4);
assertEquals(0xFFFFFFFF | 0, ROR4(0xFFFFFFFF, i));
assertEquals(0xFFFFFFFF | 0, ROR4(0xFFFFFFFF, i));
%OptimizeFunctionOnNextCall(ROR4);
@@ -209,6 +233,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR4);
assertEquals(-1, ROR4(-1, i));
assertEquals(-1, ROR4(-1, i));
%OptimizeFunctionOnNextCall(ROR4);
@@ -216,6 +241,7 @@ for (var i = 0; i <= 100; i++) {
}
for (var i = 0; i <= 100; i++) {
+ %PrepareFunctionForOptimization(ROR4);
assertEquals(1 << ((i % 32)), ROR4(1, i));
assertEquals(1 << ((i % 32)), ROR4(1, i));
%OptimizeFunctionOnNextCall(ROR4);
@@ -260,48 +286,56 @@ function ROR4_sa40(x) {
}
// ROR1_sa20
+%PrepareFunctionForOptimization(ROR1_sa20);
assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR1_sa20);
assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
// ROR1_sa40
+%PrepareFunctionForOptimization(ROR1_sa40);
assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR1_sa40);
assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
// ROR2_sa20
+%PrepareFunctionForOptimization(ROR2_sa20);
assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
%OptimizeFunctionOnNextCall(ROR2_sa20);
assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
// ROR2_sa40
+%PrepareFunctionForOptimization(ROR2_sa40);
assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR2_sa40);
assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
// ROR3_sa20
+%PrepareFunctionForOptimization(ROR3_sa20);
assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR3_sa20);
assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
// ROR3_sa40
+%PrepareFunctionForOptimization(ROR3_sa40);
assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR3_sa40);
assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
// ROR4_sa20
+%PrepareFunctionForOptimization(ROR4_sa20);
assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
%OptimizeFunctionOnNextCall(ROR4_sa20);
assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
// ROR4_sa40
+%PrepareFunctionForOptimization(ROR4_sa40);
assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
%OptimizeFunctionOnNextCall(ROR4_sa40);
diff --git a/deps/v8/test/mjsunit/compiler/shift-shr.js b/deps/v8/test/mjsunit/compiler/shift-shr.js
index c52ad43ac5..e2bf7abe52 100644
--- a/deps/v8/test/mjsunit/compiler/shift-shr.js
+++ b/deps/v8/test/mjsunit/compiler/shift-shr.js
@@ -19,6 +19,7 @@ function test_shr(left) {
return errors;
}
+%PrepareFunctionForOptimization(test_shr);
assertEquals(0, test_shr(1));
%OptimizeFunctionOnNextCall(test_shr);
for (var i = 5; i >= -5; i--) {
@@ -34,6 +35,7 @@ for (var i = 5; i >= -5; i--) {
return array[y];
}
+ %PrepareFunctionForOptimization(foo);
foo(111, true, new Array(42));
foo(111, true, new Array(42));
%OptimizeFunctionOnNextCall(foo);
@@ -48,6 +50,7 @@ for (var i = 5; i >= -5; i--) {
return array[y];
}
+ %PrepareFunctionForOptimization(foo);
foo(111, true, new Array(42));
foo(111, true, new Array(42));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/simple-deopt.js b/deps/v8/test/mjsunit/compiler/simple-deopt.js
index 7f985acc76..99b7ef3653 100644
--- a/deps/v8/test/mjsunit/compiler/simple-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/simple-deopt.js
@@ -61,6 +61,7 @@ obj.g = g;
function k(o) {
return o.g();
}
+%PrepareFunctionForOptimization(k);
for (var i = 0; i < 5; i++) k(obj);
%OptimizeFunctionOnNextCall(k);
k(obj);
@@ -92,6 +93,7 @@ assertEquals('lit[42]', LiteralToStack(42));
var str = "abc";
var r;
function CallCharAt(n) { return str.charAt(n); }
+%PrepareFunctionForOptimization(CallCharAt);
for (var i = 0; i < 5; i++) {
r = CallCharAt(0);
}
diff --git a/deps/v8/test/mjsunit/compiler/simple-inlining.js b/deps/v8/test/mjsunit/compiler/simple-inlining.js
index 8bd37eae20..3899cf0fc9 100644
--- a/deps/v8/test/mjsunit/compiler/simple-inlining.js
+++ b/deps/v8/test/mjsunit/compiler/simple-inlining.js
@@ -43,6 +43,7 @@ function TestInlineConstant(o) {
var o1 = {};
o1.f = function() { return 42; };
+%PrepareFunctionForOptimization(TestInlineConstant);
for (var i = 0; i < 5; i++) TestInlineConstant(o1);
%OptimizeFunctionOnNextCall(TestInlineConstant);
TestInlineConstant(o1);
@@ -65,6 +66,7 @@ function TestInlineThis(o) {
var o2 = {};
o2.g = function() { return this; };
+%PrepareFunctionForOptimization(TestInlineThis);
for (var i = 0; i < 5; i++) TestInlineThis(o2);
%OptimizeFunctionOnNextCall(TestInlineThis);
TestInlineThis(o2);
@@ -87,6 +89,7 @@ function TestInlineThisX(o) {
var o3 = {y:0,x:42};
o3.h = function() { return this.x; };
+%PrepareFunctionForOptimization(TestInlineThisX);
for (var i = 0; i < 5; i++) TestInlineThisX(o3);
%OptimizeFunctionOnNextCall(TestInlineThisX);
TestInlineThisX(o3);
@@ -109,6 +112,7 @@ function TestInlineThisXLength(o) {
var o4 = {x:[1,2,3]};
o4.h = function() { return this.x.length; };
+%PrepareFunctionForOptimization(TestInlineThisXLength);
for (var i = 0; i < 5; i++) TestInlineThisXLength(o4);
%OptimizeFunctionOnNextCall(TestInlineThisXLength);
TestInlineThisXLength(o4);
@@ -132,6 +136,7 @@ function TestInlineThisXY(o) {
var o6 = {y:42}
var o5 = {e:o6};
o5.h = function() { return this.e.y; };
+%PrepareFunctionForOptimization(TestInlineThisXY);
for (var i = 0; i < 5; i++) TestInlineThisXY(o5);
%OptimizeFunctionOnNextCall(TestInlineThisXY);
TestInlineThisXY(o5);
@@ -154,6 +159,7 @@ function TestInlineThisX0(o) {
var o7 = {x:[42,43,44]};
o7.foo = function() { return this.x[0]; };
+%PrepareFunctionForOptimization(TestInlineThisX0);
for (var i = 0; i < 5; i++) TestInlineThisX0(o7);
%OptimizeFunctionOnNextCall(TestInlineThisX0);
TestInlineThisX0(o7);
diff --git a/deps/v8/test/mjsunit/compiler/smi-stores-opt.js b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
index ca0923abc9..0ee7175396 100644
--- a/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
+++ b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
@@ -43,6 +43,7 @@ function f() {
return [result, literal];
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/spread-call.js b/deps/v8/test/mjsunit/compiler/spread-call.js
index 1b7ae6f301..0a8527ed76 100644
--- a/deps/v8/test/mjsunit/compiler/spread-call.js
+++ b/deps/v8/test/mjsunit/compiler/spread-call.js
@@ -27,6 +27,7 @@ function tests() {
assertEquals(0, countArgs(...arguments));
}
+%PrepareFunctionForOptimization(tests);
tests();
tests();
%OptimizeFunctionOnNextCall(tests);
@@ -38,6 +39,7 @@ function testRest(...args) {
assertEquals(4, countArgs(1, ...args));
assertEquals(5, countArgs(1, 2, ...args));
}
+%PrepareFunctionForOptimization(testRest);
testRest(1, 2, 3);
testRest(1, 2, 3);
%OptimizeFunctionOnNextCall(testRest);
@@ -51,6 +53,7 @@ function testRestAndArgs(a, b, ...args) {
assertEquals(4, countArgs(1, a, b, ...args));
assertEquals(5, countArgs(1, 2, a, b, ...args));
}
+%PrepareFunctionForOptimization(testRestAndArgs);
testRestAndArgs(1, 2, 3);
testRestAndArgs(1, 2, 3);
%OptimizeFunctionOnNextCall(testRestAndArgs);
@@ -63,6 +66,7 @@ function testArgumentsStrict() {
assertEquals(4, countArgs(1, ...arguments));
assertEquals(5, countArgs(1, 2, ...arguments));
}
+%PrepareFunctionForOptimization(testArgumentsStrict);
testArgumentsStrict(1, 2, 3);
testArgumentsStrict(1, 2, 3);
%OptimizeFunctionOnNextCall(testArgumentsStrict);
@@ -74,6 +78,7 @@ function testArgumentsSloppy() {
assertEquals(4, countArgs(1, ...arguments));
assertEquals(5, countArgs(1, 2, ...arguments));
}
+%PrepareFunctionForOptimization(testArgumentsSloppy);
testArgumentsSloppy(1, 2, 3);
testArgumentsSloppy(1, 2, 3);
%OptimizeFunctionOnNextCall(testArgumentsSloppy);
diff --git a/deps/v8/test/mjsunit/compiler/store-elimination.js b/deps/v8/test/mjsunit/compiler/store-elimination.js
index 1806ed963f..7defdd0060 100644
--- a/deps/v8/test/mjsunit/compiler/store-elimination.js
+++ b/deps/v8/test/mjsunit/compiler/store-elimination.js
@@ -81,6 +81,7 @@ function killall() {
%NeverOptimizeFunction(killall);
function test(x, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f());
assertEquals(x, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js b/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js
index 834a873e75..5c0e7b7a44 100644
--- a/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js
+++ b/deps/v8/test/mjsunit/compiler/stress-deopt-count-1.js
@@ -10,6 +10,7 @@ function f(x) {
return x + 1;
}
+%PrepareFunctionForOptimization(f);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js b/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js
index 641a9e8180..d91086a865 100644
--- a/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js
+++ b/deps/v8/test/mjsunit/compiler/stress-deopt-count-2.js
@@ -11,6 +11,7 @@ function f(x) {
return x + 1;
}
+%PrepareFunctionForOptimization(f);
f(1);
%OptimizeFunctionOnNextCall(f);
@@ -32,6 +33,7 @@ assertUnoptimized(f, undefined, undefined, false);
// stress_deopt_count == 6
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f(1);
assertOptimized(f, undefined, undefined, false);
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-number.js b/deps/v8/test/mjsunit/compiler/strict-equal-number.js
index 18cd52aa01..de98b8fe63 100644
--- a/deps/v8/test/mjsunit/compiler/strict-equal-number.js
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-number.js
@@ -9,6 +9,7 @@
(function() {
function foo(x, y) { return x === y; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(0.1, 0.1));
assertTrue(foo(undefined, undefined));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js b/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js
index 1f38d79dfa..016e077cfb 100644
--- a/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js
@@ -11,6 +11,7 @@
function foo() { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
function foo() { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +39,7 @@
function foo() { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -50,6 +53,7 @@
function foo(a) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -66,6 +70,7 @@
function foo(a) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -82,6 +87,7 @@
function foo(a) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -98,6 +104,7 @@
function foo(a, b) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
@@ -118,6 +125,7 @@
function foo(a, b) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
@@ -138,6 +146,7 @@
function foo(a, b) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js b/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js
index aee1ecfa60..2cbb8d2407 100644
--- a/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-symbol.js
@@ -11,6 +11,7 @@
function foo() { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
@@ -24,6 +25,7 @@
function foo(a) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b));
assertFalse(foo(a));
assertTrue(foo(b));
@@ -40,6 +42,7 @@
function foo(a, b) { return a === b; }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(b, b));
assertFalse(foo(a, b));
assertTrue(foo(a, a));
diff --git a/deps/v8/test/mjsunit/compiler/strict-recompile.js b/deps/v8/test/mjsunit/compiler/strict-recompile.js
index 96e8bcab78..acd3fb2d7b 100644
--- a/deps/v8/test/mjsunit/compiler/strict-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/strict-recompile.js
@@ -46,6 +46,7 @@ function do_eval(str) {
}
var eval_foo = do_eval('(' + foo + ')');
+%PrepareFunctionForOptimization(eval_foo);
for (var i = 0; i < 5; i++) assertTrue(eval_foo());
%OptimizeFunctionOnNextCall(eval_foo);
assertTrue(eval_foo());
diff --git a/deps/v8/test/mjsunit/compiler/string-add-try-catch.js b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
index 5ae5b00d18..45eef993c6 100644
--- a/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
@@ -18,6 +18,7 @@ var a = "a".repeat(%StringMaxLength());
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
@@ -34,6 +35,7 @@ var a = "a".repeat(%StringMaxLength());
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
@@ -50,6 +52,7 @@ var a = "a".repeat(%StringMaxLength());
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
@@ -72,6 +75,7 @@ var obj = {
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
@@ -88,6 +92,7 @@ var obj = {
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/string-comparison-opt.js b/deps/v8/test/mjsunit/compiler/string-comparison-opt.js
index 6c884e13e6..3aa68e0a3e 100644
--- a/deps/v8/test/mjsunit/compiler/string-comparison-opt.js
+++ b/deps/v8/test/mjsunit/compiler/string-comparison-opt.js
@@ -8,6 +8,7 @@
function f(a) {
return a.charAt(1) == "";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f("aaa"));
@@ -17,6 +18,7 @@
function f(a) {
return a.charAt(1) < "";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f("aaa"));
@@ -26,6 +28,7 @@
function f(a) {
return a.charAt(1) <= "";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f("aaa"));
@@ -35,6 +38,7 @@
function f(a) {
return a.charAt(1) > "";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(true, f("aaa"));
@@ -44,6 +48,7 @@
function f(a) {
return a.charAt(1) >= "";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(true, f("aaa"));
@@ -54,6 +59,7 @@
function f(a) {
return a.charAt(1) == a.charAt(2);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aab"));
assertEquals(true, f("aaa"));
assertEquals(false, f("acb"));
@@ -67,6 +73,7 @@
function f(a) {
return a.charAt(1) < a.charAt(2);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aab"));
assertEquals(false, f("aaa"));
assertEquals(false, f("acb"));
@@ -80,6 +87,7 @@
function f(a) {
return a.charAt(1) <= a.charAt(2);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aab"));
assertEquals(true, f("aaa"));
assertEquals(false, f("acb"));
@@ -93,6 +101,7 @@
function f(a) {
return a.charAt(1) > a.charAt(2);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aab"));
assertEquals(false, f("aaa"));
assertEquals(true, f("acb"));
@@ -106,6 +115,7 @@
function f(a) {
return a.charAt(1) >= a.charAt(2);
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aab"));
assertEquals(true, f("aaa"));
assertEquals(true, f("acb"));
@@ -120,6 +130,7 @@
function f(a) {
return a.charAt(1) == "b";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
assertEquals(true, f("bbb"));
assertEquals(false, f("ccc"));
@@ -133,6 +144,7 @@
function f(a) {
return a.charAt(1) == "bb";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
%OptimizeFunctionOnNextCall(f);
assertEquals(false, f("aaa"));
@@ -143,6 +155,7 @@
function f(a) {
return a.charAt(1) < "b";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
assertEquals(false, f("bbb"));
assertEquals(false, f("ccc"));
@@ -156,6 +169,7 @@
function f(a) {
return a.charAt(1) < "bb";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
assertEquals(true, f("bbb"));
assertEquals(false, f("ccc"));
@@ -170,6 +184,7 @@
function f(a) {
return a.charAt(1) <= "b";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
assertEquals(true, f("bbb"));
assertEquals(false, f("ccc"));
@@ -183,6 +198,7 @@
function f(a) {
return a.charAt(1) <= "bb";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(true, f("aaa"));
assertEquals(true, f("bbb"));
assertEquals(false, f("ccc"));
@@ -197,6 +213,7 @@
function f(a) {
return a.charAt(1) > "b";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
assertEquals(false, f("bbb"));
assertEquals(true, f("ccc"));
@@ -210,6 +227,7 @@
function f(a) {
return a.charAt(1) > "bb";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
assertEquals(false, f("bbb"));
assertEquals(true, f("ccc"));
@@ -224,6 +242,7 @@
function f(a) {
return a.charAt(1) >= "b";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
assertEquals(true, f("bbb"));
assertEquals(true, f("ccc"));
@@ -237,6 +256,7 @@
function f(a) {
return a.charAt(1) >= "bb";
}
+ %PrepareFunctionForOptimization(f);
assertEquals(false, f("aaa"));
assertEquals(false, f("bbb"));
assertEquals(true, f("ccc"));
diff --git a/deps/v8/test/mjsunit/compiler/string-concat-deopt.js b/deps/v8/test/mjsunit/compiler/string-concat-deopt.js
index 9043b00488..d2c522dbca 100644
--- a/deps/v8/test/mjsunit/compiler/string-concat-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/string-concat-deopt.js
@@ -9,6 +9,7 @@
return "abc".concat();
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abc", f());
assertEquals("abc", f());
%OptimizeFunctionOnNextCall(f);
@@ -20,6 +21,7 @@
return "abc".concat(a);
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
@@ -30,6 +32,8 @@
function f(a) {
return "abc".concat(a);
}
+
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
@@ -41,6 +45,7 @@
return "abc".concat(a);
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
@@ -54,6 +59,7 @@
return "ab".concat("c");
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abc", f());
assertEquals("abc", f());
%OptimizeFunctionOnNextCall(f);
@@ -65,6 +71,7 @@
return "ab".concat("c", a);
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
@@ -75,6 +82,8 @@
function f(a) {
return "ab".concat("c", a);
}
+
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
@@ -86,6 +95,7 @@
return "ab".concat("c", a);
}
+ %PrepareFunctionForOptimization(f);
assertEquals("abcde", f("de"));
assertEquals("abcde", f("de"));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/string-concat-try-catch.js b/deps/v8/test/mjsunit/compiler/string-concat-try-catch.js
index d85a891934..382ae30b85 100644
--- a/deps/v8/test/mjsunit/compiler/string-concat-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/string-concat-try-catch.js
@@ -15,6 +15,7 @@ var a = "a".repeat(%StringMaxLength());
}
}
+ %PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
// Optimize with string length protector check.
@@ -22,6 +23,7 @@ var a = "a".repeat(%StringMaxLength());
foo("a");
assertInstanceof(foo(a), RangeError);
// Optimize without string length protector check.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo("a");
assertInstanceof(foo(a), RangeError);
diff --git a/deps/v8/test/mjsunit/compiler/string-concat-yield.js b/deps/v8/test/mjsunit/compiler/string-concat-yield.js
index d6611d2d36..5b73fea94a 100644
--- a/deps/v8/test/mjsunit/compiler/string-concat-yield.js
+++ b/deps/v8/test/mjsunit/compiler/string-concat-yield.js
@@ -9,6 +9,7 @@ function* foo() {
return f;
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
var gen = foo();
assertEquals('yielded', gen.next('unused').value);
diff --git a/deps/v8/test/mjsunit/compiler/string-from-code-point.js b/deps/v8/test/mjsunit/compiler/string-from-code-point.js
index 165ea0c234..2d4686a0bf 100644
--- a/deps/v8/test/mjsunit/compiler/string-from-code-point.js
+++ b/deps/v8/test/mjsunit/compiler/string-from-code-point.js
@@ -10,6 +10,7 @@
return String.fromCodePoint(x);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals("\u0000", foo(0));
assertEquals("\u0000", foo(-0));
%OptimizeFunctionOnNextCall(foo);
@@ -17,6 +18,10 @@
assertEquals("\u0000", foo(-0));
assertOptimized(foo);
+ // Prepare foo to be re-optimized, ensuring it's bytecode / feedback vector
+ // doesn't get flushed after deoptimization.
+ %PrepareFunctionForOptimization(foo);
+
// Now passing anything outside the valid code point
// range should invalidate the optimized code.
assertThrows(_ => foo(-1));
diff --git a/deps/v8/test/mjsunit/compiler/string-length.js b/deps/v8/test/mjsunit/compiler/string-length.js
index 855a1a6b71..718bcfb4da 100644
--- a/deps/v8/test/mjsunit/compiler/string-length.js
+++ b/deps/v8/test/mjsunit/compiler/string-length.js
@@ -13,6 +13,7 @@ function id(x) { return x; }
function f1(x) {
return x.length;
}
+%PrepareFunctionForOptimization(f1);
assertEquals(0, f1(""));
assertEquals(1, f1("a"));
%OptimizeFunctionOnNextCall(f1);
@@ -23,6 +24,7 @@ function f2(x, y, z) {
x = x ? "" + y : "" + z;
return x.length;
}
+%PrepareFunctionForOptimization(f2);
assertEquals(0, f2(true, "", "a"));
assertEquals(1, f2(false, "", "a"));
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/compiler/string-slice.js b/deps/v8/test/mjsunit/compiler/string-slice.js
index 6c3274753e..5d5defbee0 100644
--- a/deps/v8/test/mjsunit/compiler/string-slice.js
+++ b/deps/v8/test/mjsunit/compiler/string-slice.js
@@ -7,6 +7,7 @@
(function() {
function foo(s) { return s.slice(-1); }
+ %PrepareFunctionForOptimization(foo);
assertEquals('', foo(''));
assertEquals('a', foo('a'));
assertEquals('b', foo('ab'));
@@ -21,6 +22,7 @@
(function() {
function foo(s) { return s.slice(-1, undefined); }
+ %PrepareFunctionForOptimization(foo);
assertEquals('', foo(''));
assertEquals('a', foo('a'));
assertEquals('b', foo('ab'));
diff --git a/deps/v8/test/mjsunit/compiler/switch-bailout.js b/deps/v8/test/mjsunit/compiler/switch-bailout.js
index 084074e0b0..71aa946b14 100644
--- a/deps/v8/test/mjsunit/compiler/switch-bailout.js
+++ b/deps/v8/test/mjsunit/compiler/switch-bailout.js
@@ -37,6 +37,7 @@ function f(x) {
return 99;
}
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 5; i++) f("foo");
%OptimizeFunctionOnNextCall(f);
f("foo");
diff --git a/deps/v8/test/mjsunit/compiler/symbol-protototype.js b/deps/v8/test/mjsunit/compiler/symbol-protototype.js
index 9a707e8a08..b1b2ac6527 100644
--- a/deps/v8/test/mjsunit/compiler/symbol-protototype.js
+++ b/deps/v8/test/mjsunit/compiler/symbol-protototype.js
@@ -7,6 +7,7 @@
function test1(s) {
return s.toString;
}
+%PrepareFunctionForOptimization(test1);
assertSame(test1(Symbol()), Symbol.prototype.toString);
assertSame(test1(Symbol()), Symbol.prototype.toString);
%OptimizeFunctionOnNextCall(test1);
@@ -15,6 +16,7 @@ assertSame(test1(Symbol()), Symbol.prototype.toString);
function test2(s) {
return s.valueOf;
}
+%PrepareFunctionForOptimization(test2);
assertSame(test2(Symbol()), Symbol.prototype.valueOf);
assertSame(test2(Symbol()), Symbol.prototype.valueOf);
%OptimizeFunctionOnNextCall(test2);
@@ -24,6 +26,7 @@ Symbol.prototype.foo = 1;
function test3(s) {
return s["foo"];
}
+%PrepareFunctionForOptimization(test3);
assertEquals(test3(Symbol()), 1);
assertEquals(test3(Symbol()), 1);
%OptimizeFunctionOnNextCall(test3);
@@ -33,6 +36,7 @@ Symbol.prototype.bar = function() { "use strict"; return this; }
function test4(s) {
return s.bar();
}
+%PrepareFunctionForOptimization(test4);
var s = Symbol("foo");
assertEquals(test4(s), s);
assertEquals(test4(s), s);
diff --git a/deps/v8/test/mjsunit/compiler/try-binop.js b/deps/v8/test/mjsunit/compiler/try-binop.js
index 2132ad2c00..f762eedf8f 100644
--- a/deps/v8/test/mjsunit/compiler/try-binop.js
+++ b/deps/v8/test/mjsunit/compiler/try-binop.js
@@ -14,6 +14,7 @@ function mult_left_plain(x) {
}
}
+%PrepareFunctionForOptimization(mult_left_plain);
%OptimizeFunctionOnNextCall(mult_left_plain);
assertEquals("boom", mult_left_plain(boom));
assertEquals(46, mult_left_plain(23));
@@ -26,6 +27,7 @@ function mult_right_plain(x) {
}
}
+%PrepareFunctionForOptimization(mult_right_plain);
%OptimizeFunctionOnNextCall(mult_right_plain);
assertEquals("boom", mult_right_plain(boom));
assertEquals(69, mult_right_plain(23));
@@ -38,6 +40,7 @@ function mult_none_plain(x,y) {
}
}
+%PrepareFunctionForOptimization(mult_none_plain);
%OptimizeFunctionOnNextCall(mult_none_plain);
assertEquals("boom", mult_none_plain(boom, boom));
assertEquals("boom", mult_none_plain(boom, 2));
diff --git a/deps/v8/test/mjsunit/compiler/try-catch-deopt.js b/deps/v8/test/mjsunit/compiler/try-catch-deopt.js
index 2b6372cf28..fb685c0a76 100644
--- a/deps/v8/test/mjsunit/compiler/try-catch-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/try-catch-deopt.js
@@ -20,6 +20,7 @@
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals(43, f());
assertEquals(43, f());
%NeverOptimizeFunction(g);
@@ -50,6 +51,7 @@
return b;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(43, f());
assertEquals(43, f());
%NeverOptimizeFunction(g);
@@ -80,6 +82,7 @@
return h() + 1;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(44, f());
assertEquals(44, f());
%NeverOptimizeFunction(g);
@@ -109,6 +112,7 @@
return a;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(43, f());
assertEquals(43, f());
%NeverOptimizeFunction(g);
@@ -135,6 +139,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
@@ -161,6 +166,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertThrows(function() { f(0) });
assertThrows(function() { f(1) });
%OptimizeFunctionOnNextCall(f);
@@ -190,6 +196,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
@@ -217,6 +224,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/try-context.js b/deps/v8/test/mjsunit/compiler/try-context.js
index 4e6d9b028c..50dd923187 100644
--- a/deps/v8/test/mjsunit/compiler/try-context.js
+++ b/deps/v8/test/mjsunit/compiler/try-context.js
@@ -17,6 +17,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
@@ -37,6 +38,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertThrows(function() { f(0) });
assertThrows(function() { f(1) });
%OptimizeFunctionOnNextCall(f);
@@ -60,6 +62,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
@@ -81,6 +84,7 @@
}
return x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f(0));
assertEquals(24, f(1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/try-deopt.js b/deps/v8/test/mjsunit/compiler/try-deopt.js
index a4a6eb0304..3ca341c5a1 100644
--- a/deps/v8/test/mjsunit/compiler/try-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/try-deopt.js
@@ -13,6 +13,7 @@ function DeoptFromTry(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(DeoptFromTry);
%OptimizeFunctionOnNextCall(DeoptFromTry);
assertEquals(24, DeoptFromTry(23));
@@ -26,6 +27,7 @@ function DeoptFromCatch(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(DeoptFromCatch);
%OptimizeFunctionOnNextCall(DeoptFromCatch);
assertEquals(24, DeoptFromCatch(23));
@@ -39,6 +41,7 @@ function DeoptFromFinally_Return(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(DeoptFromFinally_Return);
%OptimizeFunctionOnNextCall(DeoptFromFinally_Return);
assertEquals(24, DeoptFromFinally_Return(23));
@@ -51,5 +54,6 @@ function DeoptFromFinally_ReThrow(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(DeoptFromFinally_ReThrow);
%OptimizeFunctionOnNextCall(DeoptFromFinally_ReThrow);
assertThrows("DeoptFromFinally_ReThrow(new Error)", Error);
diff --git a/deps/v8/test/mjsunit/compiler/try-finally-deopt.js b/deps/v8/test/mjsunit/compiler/try-finally-deopt.js
index 455bf3477f..ee8da2eca0 100644
--- a/deps/v8/test/mjsunit/compiler/try-finally-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/try-finally-deopt.js
@@ -17,6 +17,7 @@
return global + a;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -36,6 +37,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -59,6 +61,8 @@
return 1;
}
+
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -83,6 +87,8 @@
return 1;
}
+
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -106,6 +112,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
try { f(); } catch(e) {}
try { f(); } catch(e) {}
%OptimizeFunctionOnNextCall(f);
@@ -133,6 +140,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
try { f(); } catch(e) {}
try { f(); } catch(e) {}
%OptimizeFunctionOnNextCall(f);
@@ -157,6 +165,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -186,6 +195,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -211,6 +221,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -241,6 +252,7 @@
return 1;
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
index 8875b8c0c3..52c836a84f 100644
--- a/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
+++ b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
@@ -9,6 +9,7 @@
return a + b - c;
}
+ %PrepareFunctionForOptimization(f0);
assertEquals(4, f0(3, 2, 1));
assertEquals(4, f0(3, 2, 1));
%OptimizeFunctionOnNextCall(f0);
@@ -20,6 +21,7 @@
return a + b - c;
}
+ %PrepareFunctionForOptimization(f1);
assertEquals(4.5, f1(3.5, 2.5, 1.5));
assertEquals(4.5, f1(3.5, 2.5, 1.5));
%OptimizeFunctionOnNextCall(f1);
@@ -34,6 +36,7 @@
return (a >>> 0) + 1;
}
+ %PrepareFunctionForOptimization(f2);
assertEquals(1, f2(0));
assertEquals(1, f2(0));
%OptimizeFunctionOnNextCall(f2);
@@ -50,6 +53,7 @@
return x + b;
}
+ %PrepareFunctionForOptimization(f3);
assertEquals(1, f3(0, 1));
assertEquals(1, f3(0, 1));
%OptimizeFunctionOnNextCall(f3);
@@ -62,6 +66,7 @@
return a << b;
}
+ %PrepareFunctionForOptimization(f4);
assertEquals(24, f4(3, 3));
assertEquals(40, f4(5, 3));
%OptimizeFunctionOnNextCall(f4);
@@ -73,6 +78,7 @@
return a << b;
}
+ %PrepareFunctionForOptimization(f5);
assertEquals(24, f5(3.3, 3.4));
assertEquals(40, f5(5.1, 3.9));
%OptimizeFunctionOnNextCall(f5);
@@ -84,6 +90,7 @@
return a >> b;
}
+ %PrepareFunctionForOptimization(f6);
assertEquals(1, f6(8.3, 3.4));
assertEquals(-2, f6(-16.1, 3.9));
%OptimizeFunctionOnNextCall(f6);
@@ -95,6 +102,7 @@
return a >>> b;
}
+ %PrepareFunctionForOptimization(f7);
assertEquals(1, f7(8.3, 3.4));
assertEquals(536870910, f7(-16.1, 3.9));
%OptimizeFunctionOnNextCall(f7);
diff --git a/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js b/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js
index 459e2b4202..a45561b8c8 100644
--- a/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js
+++ b/deps/v8/test/mjsunit/compiler/type-speculative-safe-integer-add.js
@@ -19,6 +19,7 @@
return y;
}
+ %PrepareFunctionForOptimization(forgetAboutMinus0);
forgetAboutMinus0(1);
assertEquals(Infinity, forgetAboutMinus0(1));
%OptimizeFunctionOnNextCall(forgetAboutMinus0);
@@ -43,6 +44,7 @@
return NumberAdd(f(x), 0);
}
+ %PrepareFunctionForOptimization(forgetAboutNaN);
forgetAboutNaN(false);
assertEquals(2, forgetAboutNaN(false));
%OptimizeFunctionOnNextCall(forgetAboutNaN);
diff --git a/deps/v8/test/mjsunit/compiler/typed-array-constructor.js b/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
index 07d6a7ca4e..fac56632f3 100644
--- a/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/typed-array-constructor.js
@@ -11,6 +11,7 @@ const limit = %MaxSmi() + 1;
try { new Int8Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Int8Array/.test(foo()));
assertTrue(/new Int8Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -22,6 +23,7 @@ const limit = %MaxSmi() + 1;
try { new Uint8Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Uint8Array/.test(foo()));
assertTrue(/new Uint8Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -33,6 +35,7 @@ const limit = %MaxSmi() + 1;
try { new Uint8ClampedArray(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Uint8ClampedArray/.test(foo()));
assertTrue(/new Uint8ClampedArray/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -44,6 +47,7 @@ const limit = %MaxSmi() + 1;
try { new Int16Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Int16Array/.test(foo()));
assertTrue(/new Int16Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -55,6 +59,7 @@ const limit = %MaxSmi() + 1;
try { new Uint16Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Uint16Array/.test(foo()));
assertTrue(/new Uint16Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -66,6 +71,7 @@ const limit = %MaxSmi() + 1;
try { new Int32Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Int32Array/.test(foo()));
assertTrue(/new Int32Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -77,6 +83,7 @@ const limit = %MaxSmi() + 1;
try { new Uint32Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Uint32Array/.test(foo()));
assertTrue(/new Uint32Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -88,6 +95,7 @@ const limit = %MaxSmi() + 1;
try { new Float32Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Float32Array/.test(foo()));
assertTrue(/new Float32Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -100,6 +108,7 @@ const limit = %MaxSmi() + 1;
try { new Float64Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new Float64Array/.test(foo()));
assertTrue(/new Float64Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -111,6 +120,7 @@ const limit = %MaxSmi() + 1;
try { new BigInt64Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new BigInt64Array/.test(foo()));
assertTrue(/new BigInt64Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
@@ -122,6 +132,7 @@ const limit = %MaxSmi() + 1;
try { new BigUint64Array(limit); } catch (e) { return e.stack; }
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(/new BigUint64Array/.test(foo()));
assertTrue(/new BigUint64Array/.test(foo()));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js b/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js
index de4b302017..36575c7f59 100644
--- a/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js
+++ b/deps/v8/test/mjsunit/compiler/typedarray-prototype-tostringtag.js
@@ -24,6 +24,7 @@ const TypedArrayPrototype_toStringTag =
function foo(o) {
return TypedArrayPrototype_toStringTag.call(o);
}
+ %PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(1));
assertEquals(undefined, foo({}));
assertEquals(undefined, foo([]));
@@ -46,6 +47,7 @@ const TypedArrayPrototype_toStringTag =
return TypedArrayProto_toStringTag(value) !== undefined;
}
+ %PrepareFunctionForOptimization(isTypedArray);
assertFalse(isTypedArray(1));
assertFalse(isTypedArray({}));
assertFalse(isTypedArray([]));
@@ -70,6 +72,7 @@ const TypedArrayPrototype_toStringTag =
return TypedArrayProto_toStringTag(value) === 'Uint8Array';
}
+ %PrepareFunctionForOptimization(isUint8Array);
assertFalse(isUint8Array(1));
assertFalse(isUint8Array({}));
assertFalse(isUint8Array([]));
diff --git a/deps/v8/test/mjsunit/compiler/uint32.js b/deps/v8/test/mjsunit/compiler/uint32.js
index 3568e27f03..a39d405258 100644
--- a/deps/v8/test/mjsunit/compiler/uint32.js
+++ b/deps/v8/test/mjsunit/compiler/uint32.js
@@ -40,6 +40,7 @@ function ChangeI2T(arr, i) {
return uint32_array[i];
}
+%PrepareFunctionForOptimization(ChangeI2T);
assertEquals(K1, ChangeI2T(uint32_array, 0));
assertEquals(K2, ChangeI2T(uint32_array, 1));
%OptimizeFunctionOnNextCall(ChangeI2T);
@@ -60,6 +61,7 @@ function Deopt(obj, arr, i) {
return x;
}
+%PrepareFunctionForOptimization(Deopt);
assertEquals(K1, Deopt({x: 0}, uint32_array, 0));
assertEquals(K2, Deopt({x: 0}, uint32_array, 1));
%OptimizeFunctionOnNextCall(Deopt);
@@ -71,6 +73,7 @@ function ChangeI2D(arr) {
return arr[0] + arr[1];
}
+%PrepareFunctionForOptimization(ChangeI2D);
assertEquals(K1 + K2, ChangeI2D(uint32_array));
assertEquals(K1 + K2, ChangeI2D(uint32_array));
%OptimizeFunctionOnNextCall(ChangeI2D);
@@ -80,6 +83,7 @@ function ShrShr(val) {
return (val >>> 0) >>> 1;
}
+%PrepareFunctionForOptimization(ShrShr);
assertEquals(K1, ShrShr(K2 | 0));
assertEquals(K1, ShrShr(K2 | 0));
%OptimizeFunctionOnNextCall(ShrShr);
@@ -89,6 +93,7 @@ function SarShr(val) {
return val >> (-2 >>> 0);
}
+%PrepareFunctionForOptimization(SarShr);
var K3 = 0x80000000;
assertEquals(-2, SarShr(K3 | 0));
assertEquals(-2, SarShr(K3 | 0));
@@ -100,6 +105,7 @@ function Uint32Phi(a, b, c) {
return (i | 0);
}
+%PrepareFunctionForOptimization(Uint32Phi);
var K4 = 0x80000001;
assertEquals(K3 | 0, Uint32Phi(true, K3, K4));
assertEquals(K4 | 0, Uint32Phi(false, K3, K4));
@@ -114,6 +120,7 @@ function NonUint32Phi(a, b, c) {
return (i | 0);
}
+%PrepareFunctionForOptimization(NonUint32Phi);
assertEquals(K3 | 0, NonUint32Phi(true, K3, K4));
assertEquals(K4 | 0, NonUint32Phi(false, K3, K4));
assertEquals(K3 | 0, NonUint32Phi(true, K3, K4));
@@ -132,6 +139,7 @@ function PhiOfPhi(x) {
return (a | 0);
}
+%PrepareFunctionForOptimization(PhiOfPhi);
assertEquals(1, PhiOfPhi(1));
assertEquals(1, PhiOfPhi(1));
%OptimizeFunctionOnNextCall(PhiOfPhi);
@@ -147,6 +155,7 @@ function PhiOfPhiUnsafe(x) {
return a + a;
}
+%PrepareFunctionForOptimization(PhiOfPhiUnsafe);
assertEquals(2, PhiOfPhiUnsafe(1));
assertEquals(2, PhiOfPhiUnsafe(1));
%OptimizeFunctionOnNextCall(PhiOfPhiUnsafe);
@@ -166,6 +175,7 @@ function FillOldArrayWithHeapNumbers(N) {
}
}
+%PrepareFunctionForOptimization(FillOldArrayWithHeapNumbers);
FillOldArrayWithHeapNumbers(1);
FillOldArrayWithHeapNumbers(1);
%OptimizeFunctionOnNextCall(FillOldArrayWithHeapNumbers);
@@ -191,6 +201,7 @@ function Outer(v, f) {
return InnerWithArguments(v >>> 0, f);
}
+%PrepareFunctionForOptimization(Outer);
assertArrayEquals([0x0100, 0x01], Outer(0x0100, Pack));
assertArrayEquals([0x0100, 0x01], Outer(0x0100, Pack));
assertArrayEquals([0x0100, 0x01], Outer(0x0100, Pack));
diff --git a/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
index 21cc5bbc36..63b74eaedb 100644
--- a/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
+++ b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
@@ -9,6 +9,7 @@
a[0] = v & 0xff;
}
+ %PrepareFunctionForOptimization(foo);
var a = new Uint8ClampedArray(4);
foo(a, 1);
foo(a, 2);
@@ -23,6 +24,7 @@
a[0] = v >>> 0;
}
+ %PrepareFunctionForOptimization(foo);
var a = new Uint8ClampedArray(4);
foo(a, 1);
foo(a, 2);
@@ -37,6 +39,7 @@
a[0] = v | 0;
}
+ %PrepareFunctionForOptimization(foo);
var a = new Uint8ClampedArray(4);
foo(a, 1);
foo(a, 2);
@@ -54,6 +57,7 @@
a[0] = v;
}
+ %PrepareFunctionForOptimization(foo);
var a = new Uint8ClampedArray(4);
foo(a, 1);
foo(a, 2);
diff --git a/deps/v8/test/mjsunit/compiler/unsigned-min-max.js b/deps/v8/test/mjsunit/compiler/unsigned-min-max.js
index db91188628..e5e33aae1c 100644
--- a/deps/v8/test/mjsunit/compiler/unsigned-min-max.js
+++ b/deps/v8/test/mjsunit/compiler/unsigned-min-max.js
@@ -10,6 +10,7 @@ function umin(a, b) {
return Math.min(a, b);
}
+%PrepareFunctionForOptimization(umin);
umin(1, 1);
umin(2, 2);
%OptimizeFunctionOnNextCall(umin);
@@ -26,6 +27,7 @@ function umax(a, b) {
return Math.max(a, b);
}
+%PrepareFunctionForOptimization(umax);
umax(1, 1);
umax(2, 2);
%OptimizeFunctionOnNextCall(umax);
diff --git a/deps/v8/test/mjsunit/es6/for-each-in-catch.js b/deps/v8/test/mjsunit/es6/for-each-in-catch.js
index 674cddd047..b38013eeb3 100644
--- a/deps/v8/test/mjsunit/es6/for-each-in-catch.js
+++ b/deps/v8/test/mjsunit/es6/for-each-in-catch.js
@@ -5,25 +5,25 @@
function checkIsRedeclarationError(code) {
try {
eval(`
-checkIsRedeclarationError : {
- break checkIsRedeclarationError;
-${code}
-}
-`);
+ checkIsRedeclarationError: {
+ break checkIsRedeclarationError;
+ ${code}
+ }
+ `);
assertUnreachable();
- } catch(e) {
- assertInstanceof(e, SyntaxError );
- assertTrue( e.toString().indexOf("has already been declared") >= 0 );
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ assertTrue(e.toString().includes("has already been declared"));
}
}
function checkIsNotRedeclarationError(code) {
- assertDoesNotThrow(()=>eval(`
-checkIsNotRedeclarationError_label : {
- break checkIsNotRedeclarationError_label;
-${code}
-}
-`));
+ assertDoesNotThrow(() => eval(`
+ checkIsNotRedeclarationError_label: {
+ break checkIsNotRedeclarationError_label;
+ ${code}
+ }
+ `));
}
@@ -52,143 +52,145 @@ let not_var_e = [
'const {f:e}'
];
-// Check that `for (var ... of ...)` cannot redeclare a simple catch variable
-// but `for (var ... in ...)` can.
+// Check that both `for (var ... of ...)` and `for (var ... in ...)`
+// can redeclare a simple catch variable.
for (let binding of var_e) {
- checkIsRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- for (${binding} of []);
-}
-`);
+ checkIsNotRedeclarationError(`
+ try {
+ throw 0;
+ } catch (e) {
+ for (${binding} of []);
+ }
+ `);
checkIsNotRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- for (${binding} in []);
-}
-`);
+ try {
+ throw 0;
+ } catch (e) {
+ for (${binding} in []);
+ }
+ `);
}
-// Check that the above error occurs even for nested catches.
+// Check that the above applies even for nested catches.
for (let binding of var_e) {
- checkIsRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- try {
- throw 1;
- } catch(f) {
+ checkIsNotRedeclarationError(`
try {
- throw 2;
- } catch({}) {
- for (${binding} of []);
+ throw 0;
+ } catch (e) {
+ try {
+ throw 1;
+ } catch (f) {
+ try {
+ throw 2;
+ } catch ({}) {
+ for (${binding} of []);
+ }
+ }
}
- }
-}
-`);
+ `);
checkIsNotRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- try {
- throw 1;
- } catch(f) {
try {
- throw 2;
- } catch({}) {
- for (${binding} in []);
+ throw 0;
+ } catch (e) {
+ try {
+ throw 1;
+ } catch (f) {
+ try {
+ throw 2;
+ } catch ({}) {
+ for (${binding} in []);
+ }
+ }
}
- }
-}
-`);
+ `);
}
-// Check that the above error does not occur if a declaration scope is between
-// the catch and the loop.
+// Check that the above applies if a declaration scope is between the
+// catch and the loop.
for (let binding of var_e) {
checkIsNotRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- (()=>{for (${binding} of []);})();
-}
-`);
+ try {
+ throw 0;
+ } catch (e) {
+ (()=>{for (${binding} of []);})();
+ }
+ `);
checkIsNotRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- (function(){for (${binding} of []);})();
-}
-`);
+ try {
+ throw 0;
+ } catch (e) {
+ (function() {
+ for (${binding} of []);
+ })();
+ }
+ `);
}
// Check that there is no error when not declaring a var named e.
for (let binding of not_var_e) {
checkIsNotRedeclarationError(`
-try {
- throw 0;
-} catch(e) {
- for (${binding} of []);
-}
-`);
+ try {
+ throw 0;
+ } catch (e) {
+ for (${binding} of []);
+ }
+ `);
}
// Check that there is an error for both for-in and for-of when redeclaring
-// a non-simple catch parameter
+// a non-simple catch parameter.
for (let binding of var_e) {
checkIsRedeclarationError(`
-try {
- throw 0;
-} catch({e}) {
- for (${binding} of []);
-}
-`);
+ try {
+ throw 0;
+ } catch ({e}) {
+ for (${binding} of []);
+ }
+ `);
checkIsRedeclarationError(`
-try {
- throw 0;
-} catch({e}) {
- for (${binding} in []);
-}
-`);
+ try {
+ throw 0;
+ } catch ({e}) {
+ for (${binding} in []);
+ }
+ `);
}
// Check that the above error occurs even for nested catches.
for (let binding of var_e) {
checkIsRedeclarationError(`
-try {
- throw 0;
-} catch({e}) {
- try {
- throw 1;
- } catch(f) {
try {
- throw 2;
- } catch({}) {
- for (${binding} of []);
+ throw 0;
+ } catch ({e}) {
+ try {
+ throw 1;
+ } catch (f) {
+ try {
+ throw 2;
+ } catch ({}) {
+ for (${binding} of []);
+ }
+ }
}
- }
-}
-`);
+ `);
checkIsRedeclarationError(`
-try {
- throw 0;
-} catch({e}) {
- try {
- throw 1;
- } catch(f) {
try {
- throw 2;
- } catch({}) {
- for (${binding} in []);
+ throw 0;
+ } catch ({e}) {
+ try {
+ throw 1;
+ } catch (f) {
+ try {
+ throw 2;
+ } catch ({}) {
+ for (${binding} in []);
+ }
+ }
}
- }
-}
-`);
+ `);
}
diff --git a/deps/v8/test/mjsunit/es6/proxies-ownkeys-clone.js b/deps/v8/test/mjsunit/es6/proxies-ownkeys-clone.js
new file mode 100644
index 0000000000..8fe9598fdb
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/proxies-ownkeys-clone.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var p = new Proxy({a: 1, b: 2}, {
+ ownKeys() { return ['a', 'b']; }
+});
+
+// clone and return a
+function f(a) {
+ var y = {...a}
+ return y;
+}
+
+// Call with different maps to force it into megamorphic state
+f({a: 1, b: 2});
+f({a1: 1, b1: 3});
+f({a2: 1, b2: 3});
+f({a3: 1, b3: 4});
+f({a4: 1, b4: 5});
+
+// Test that y was initialized correctly in the slow path
+var clone = f(p);
+assertEquals(clone.a, 1);
+assertEquals(clone.b, 2);
diff --git a/deps/v8/test/mjsunit/es6/proxies-ownkeys.js b/deps/v8/test/mjsunit/es6/proxies-ownkeys.js
index 7cc0a87b68..3b9011acdc 100644
--- a/deps/v8/test/mjsunit/es6/proxies-ownkeys.js
+++ b/deps/v8/test/mjsunit/es6/proxies-ownkeys.js
@@ -54,9 +54,9 @@ assertEquals(["a", "b", "c"], Reflect.ownKeys(proxy));
keys.length = Math.pow(2, 33);
assertThrows("Reflect.ownKeys(proxy)", RangeError);
-// Check that we allow duplicated keys.
+// Check that we don't allow duplicated keys.
keys = ['a', 'a', 'a']
-assertEquals(keys, Reflect.ownKeys(proxy));
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
// Non-Name results throw.
keys = [1];
@@ -75,9 +75,9 @@ assertThrows("Reflect.ownKeys(proxy)", TypeError);
keys = ["nonconf"];
assertEquals(keys, Reflect.ownKeys(proxy));
-// Check that we allow duplicated keys.
+// Check that we don't allow duplicated keys.
keys = ['nonconf', 'nonconf', 'nonconf']
-assertEquals(keys, Reflect.ownKeys(proxy));
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
// Step 19a: The trap result must all keys of a non-extensible target.
Object.preventExtensions(target);
@@ -89,6 +89,6 @@ assertEquals(keys, Reflect.ownKeys(proxy));
keys = ["nonconf", "target_one", "fantasy"];
assertThrows("Reflect.ownKeys(proxy)", TypeError);
-// Check that we allow duplicated keys.
+// Check that we don't allow duplicated keys.
keys = ['nonconf', 'target_one', 'nonconf', 'nonconf', 'target_one',]
-assertEquals(keys, Reflect.ownKeys(proxy));
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
diff --git a/deps/v8/test/mjsunit/es6/string-endswith.js b/deps/v8/test/mjsunit/es6/string-endswith.js
index b776ccc4ba..c9d5634393 100644
--- a/deps/v8/test/mjsunit/es6/string-endswith.js
+++ b/deps/v8/test/mjsunit/es6/string-endswith.js
@@ -417,3 +417,13 @@ assertThrows(function() {
}, TypeError);
re[Symbol.match] = false;
assertEquals(false, "".startsWith(re));
+
+let didThrow = false;
+try {
+ "".endsWith(/./);
+} catch (err) {
+ didThrow = true;
+ assertEquals(err.name, "TypeError");
+ assertEquals(err.message, "First argument to String.prototype.endsWith must not be a regular expression");
+}
+assertTrue(didThrow);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-sort.js b/deps/v8/test/mjsunit/es6/typedarray-sort.js
index c5c4ff079a..7cd08b1258 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-sort.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-sort.js
@@ -69,6 +69,31 @@ for (var constructor of typedArrayConstructors) {
assertThrows(() => array.sort(), TypeError);
}
+// Check that TypedArray.p.sort is stable.
+for (let constructor of typedArrayConstructors) {
+ // Sort an array [0..kSize-1] modulo 4. If the sort is stable, the array
+ // will be partitioned into 4 parts, where each part has only increasing
+ // elements.
+ const kSize = 128;
+ const kModulo = 4;
+ const kRunSize = kSize / kModulo;
+
+ const template = Array.from({ length: kSize }, (_, i) => i);
+ const array = new constructor(template);
+
+ const compare = (a, b) => (b % kModulo) - (a % kModulo);
+ array.sort(compare);
+
+ function assertIncreasing(from) {
+ for (let i = from + 1; i < from + kRunSize; ++i) {
+ assertTrue(array[i - 1] < array[i]);
+ assertEquals(array[i - 1] % kModulo, array[i] % kModulo);
+ }
+ }
+
+ for (let i = 0; i < kModulo; ++i) assertIncreasing(i * kRunSize);
+}
+
// The following creates a test for each typed element kind, where the array
// to sort consists of some max/min/zero elements.
//
diff --git a/deps/v8/test/mjsunit/es8/object-entries.js b/deps/v8/test/mjsunit/es8/object-entries.js
index 51ce4692e4..f119cfc113 100644
--- a/deps/v8/test/mjsunit/es8/object-entries.js
+++ b/deps/v8/test/mjsunit/es8/object-entries.js
@@ -144,29 +144,11 @@ function TestOrderWithDuplicates(withWarmup) {
});
if (withWarmup) {
- for (const key in P) {}
+ for (const key in O) {};
+ try { for (const key in P) {} } catch {};
}
- log = [];
- assertEquals([
- ["a", 1],
- ["a", 1],
- ["456", 123],
- ["456", 123]
- ], Object.entries(P));
- assertEquals([
- "[[OwnPropertyKeys]]",
- "[[GetOwnProperty]](\"a\")",
- "[[Get]](\"a\")",
- "[[GetOwnProperty]](\"a\")",
- "[[Get]](\"a\")",
- "[[GetOwnProperty]](\"456\")",
- "[[Get]](\"456\")",
- "[[GetOwnProperty]](\"HIDDEN\")",
- "[[GetOwnProperty]](\"HIDDEN\")",
- "[[GetOwnProperty]](\"456\")",
- "[[Get]](\"456\")"
- ], log);
+ assertThrows(() => Object.entries(P), TypeError);
}
TestOrderWithDuplicates();
TestOrderWithDuplicates(true);
diff --git a/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js b/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js
index f88840dba4..0bd84bd6fe 100644
--- a/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js
+++ b/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js
@@ -193,21 +193,7 @@ function TestDuplicateKeys() {
defineProperty(target, name, desc) { assertUnreachable(); }
});
- var result = Object.getOwnPropertyDescriptors(P);
- assertEquals({
- "A": {
- "value": "VALUE",
- "writable": false,
- "enumerable": false,
- "configurable": true
- }
- }, result);
- assertTrue(result.hasOwnProperty("A"));
- assertEquals([
- "ownKeys()",
- "getOwnPropertyDescriptor(A)",
- "getOwnPropertyDescriptor(A)"
- ], log);
+ assertThrows(() => Object.getOwnPropertyDescriptors(P), TypeError);
}
TestDuplicateKeys();
diff --git a/deps/v8/test/mjsunit/es8/object-values.js b/deps/v8/test/mjsunit/es8/object-values.js
index 23fcaed1bc..b66e4af7d3 100644
--- a/deps/v8/test/mjsunit/es8/object-values.js
+++ b/deps/v8/test/mjsunit/es8/object-values.js
@@ -121,20 +121,7 @@ function TestOrderWithDuplicates() {
}
});
- assertEquals([1, 1, 123, 123], Object.values(P));
- assertEquals([
- "[[OwnPropertyKeys]]",
- "[[GetOwnProperty]](\"a\")",
- "[[Get]](\"a\")",
- "[[GetOwnProperty]](\"a\")",
- "[[Get]](\"a\")",
- "[[GetOwnProperty]](\"456\")",
- "[[Get]](\"456\")",
- "[[GetOwnProperty]](\"HIDDEN\")",
- "[[GetOwnProperty]](\"HIDDEN\")",
- "[[GetOwnProperty]](\"456\")",
- "[[Get]](\"456\")",
- ], log);
+ assertThrows(() => Object.values(P), TypeError);
}
TestOrderWithDuplicates();
diff --git a/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js b/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js
index ee0cda66d8..e1fdd43c94 100644
--- a/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js
+++ b/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-assertThrows("try { } catch (e) { var e; for (var e of []) {} }")
+assertDoesNotThrow("try { } catch (e) { var e; for (var e of []) {} }")
diff --git a/deps/v8/test/mjsunit/harmony/array-flat-species.js b/deps/v8/test/mjsunit/harmony/array-flat-species.js
index d04f8a0875..7181c10bea 100644
--- a/deps/v8/test/mjsunit/harmony/array-flat-species.js
+++ b/deps/v8/test/mjsunit/harmony/array-flat-species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-flat
-
{
class MyArray extends Array {
static get [Symbol.species]() {
diff --git a/deps/v8/test/mjsunit/harmony/array-flat.js b/deps/v8/test/mjsunit/harmony/array-flat.js
index 86571e8dce..9a291dc3b0 100644
--- a/deps/v8/test/mjsunit/harmony/array-flat.js
+++ b/deps/v8/test/mjsunit/harmony/array-flat.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-flat
-
assertEquals(Array.prototype.flat.length, 0);
assertEquals(Array.prototype.flat.name, 'flat');
diff --git a/deps/v8/test/mjsunit/harmony/array-flatMap-species.js b/deps/v8/test/mjsunit/harmony/array-flatMap-species.js
index d4159b4801..48f9bea2d0 100644
--- a/deps/v8/test/mjsunit/harmony/array-flatMap-species.js
+++ b/deps/v8/test/mjsunit/harmony/array-flatMap-species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-flat
-
{
class MyArray extends Array {
static get [Symbol.species]() {
diff --git a/deps/v8/test/mjsunit/harmony/array-flatMap.js b/deps/v8/test/mjsunit/harmony/array-flatMap.js
index 9f0426fe7f..65a4025603 100644
--- a/deps/v8/test/mjsunit/harmony/array-flatMap.js
+++ b/deps/v8/test/mjsunit/harmony/array-flatMap.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-flat --allow-natives-syntax
+// Flags: --allow-natives-syntax
assertEquals(Array.prototype.flatMap.length, 1);
assertEquals(Array.prototype.flatMap.name, 'flatMap');
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
index ef48cadcc6..ccb7c79f93 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -297,4 +297,8 @@
}{
assertThrows(() => BigInt.asUintN(3, 12), TypeError);
assertEquals(4n, BigInt.asUintN(3, "12"));
+}{
+ // crbug.com/936506
+ assertEquals(1n, BigInt.asUintN(15, 0x100000001n));
+ assertEquals(1n, BigInt.asUintN(15, 0x10000000000000001n));
}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-8808.js b/deps/v8/test/mjsunit/harmony/regress/regress-8808.js
new file mode 100644
index 0000000000..8c63936382
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-8808.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-class-fields
+
+assertThrows(() => eval(`
+ class Foo {
+ #x = 1;
+ destructureX() {
+ const { #x: x } = this;
+ return x;
+ }
+ }
+`), SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/symbol-description.js b/deps/v8/test/mjsunit/harmony/symbol-description.js
index ccba44c978..39de1b065d 100644
--- a/deps/v8/test/mjsunit/harmony/symbol-description.js
+++ b/deps/v8/test/mjsunit/harmony/symbol-description.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-symbol-description
-
{
let desc = Object.getOwnPropertyDescriptor(Symbol.prototype, 'description');
assertEquals(desc.set, undefined);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index 88fb020101..c1ec4070f4 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -4,151 +4,88 @@
// Flags: --harmony-weak-refs
-(function TestConstructWeakFactory() {
- let wf = new WeakFactory(() => {});
- assertEquals(wf.toString(), "[object WeakFactory]");
- assertNotSame(wf.__proto__, Object.prototype);
- assertSame(wf.__proto__.__proto__, Object.prototype);
+(function TestConstructFinalizationGroup() {
+ let fg = new FinalizationGroup(() => {});
+ assertEquals(fg.toString(), "[object FinalizationGroup]");
+ assertNotSame(fg.__proto__, Object.prototype);
+ assertSame(fg.__proto__.__proto__, Object.prototype);
})();
-(function TestWeakFactoryConstructorCallAsFunction() {
+(function TestFinalizationGroupConstructorCallAsFunction() {
let caught = false;
let message = "";
try {
- let f = WeakFactory(() => {});
+ let f = FinalizationGroup(() => {});
} catch (e) {
message = e.message;
caught = true;
} finally {
assertTrue(caught);
- assertEquals(message, "Constructor WeakFactory requires 'new'");
+ assertEquals(message, "Constructor FinalizationGroup requires 'new'");
}
})();
-(function TestConstructWeakFactoryCleanupNotCallable() {
- let message = "WeakFactory: cleanup must be callable";
- assertThrows(() => { let wf = new WeakFactory(); }, TypeError, message);
- assertThrows(() => { let wf = new WeakFactory(1); }, TypeError, message);
- assertThrows(() => { let wf = new WeakFactory(null); }, TypeError, message);
+(function TestConstructFinalizationGroupCleanupNotCallable() {
+ let message = "FinalizationGroup: cleanup must be callable";
+ assertThrows(() => { let fg = new FinalizationGroup(); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationGroup(1); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationGroup(null); }, TypeError, message);
})();
-(function TestConstructWeakFactoryWithCallableProxyAsCleanup() {
+(function TestConstructFinalizationGroupWithCallableProxyAsCleanup() {
let handler = {};
let obj = () => {};
let proxy = new Proxy(obj, handler);
- let wf = new WeakFactory(proxy);
+ let fg = new FinalizationGroup(proxy);
})();
-(function TestConstructWeakFactoryWithNonCallableProxyAsCleanup() {
- let message = "WeakFactory: cleanup must be callable";
+(function TestConstructFinalizationGroupWithNonCallableProxyAsCleanup() {
+ let message = "FinalizationGroup: cleanup must be callable";
let handler = {};
let obj = {};
let proxy = new Proxy(obj, handler);
- assertThrows(() => { let wf = new WeakFactory(proxy); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationGroup(proxy); }, TypeError, message);
})();
-(function TestMakeCell() {
- let wf = new WeakFactory(() => {});
- let wc = wf.makeCell({});
- assertEquals(wc.toString(), "[object WeakCell]");
- assertNotSame(wc.__proto__, Object.prototype);
- assertSame(wc.__proto__.__proto__, Object.prototype);
- assertEquals(wc.holdings, undefined);
-
- let holdings_desc = Object.getOwnPropertyDescriptor(wc.__proto__, "holdings");
- assertEquals(true, holdings_desc.configurable);
- assertEquals(false, holdings_desc.enumerable);
- assertEquals("function", typeof holdings_desc.get);
- assertEquals(undefined, holdings_desc.set);
-
- let clear_desc = Object.getOwnPropertyDescriptor(wc.__proto__, "clear");
- assertEquals(true, clear_desc.configurable);
- assertEquals(false, clear_desc.enumerable);
- assertEquals("function", typeof clear_desc.value);
+(function TestRegisterWithNonObjectTarget() {
+ let fg = new FinalizationGroup(() => {});
+ let message = "FinalizationGroup.prototype.register: target must be an object";
+ assertThrows(() => fg.register(1, "holdings"), TypeError, message);
+ assertThrows(() => fg.register(false, "holdings"), TypeError, message);
+ assertThrows(() => fg.register("foo", "holdings"), TypeError, message);
+ assertThrows(() => fg.register(Symbol(), "holdings"), TypeError, message);
+ assertThrows(() => fg.register(null, "holdings"), TypeError, message);
+ assertThrows(() => fg.register(undefined, "holdings"), TypeError, message);
})();
-(function TestMakeCellWithHoldings() {
- let wf = new WeakFactory(() => {});
- let obj = {a: 1};
- let holdings = {b: 2};
- let wc = wf.makeCell(obj, holdings);
- assertSame(wc.holdings, holdings);
-})();
-
-(function TestMakeCellWithHoldingsSetHoldings() {
- let wf = new WeakFactory(() => {});
- let obj = {a: 1};
- let holdings = {b: 2};
- let wc = wf.makeCell(obj, holdings);
- assertSame(wc.holdings, holdings);
- wc.holdings = 5;
- assertSame(wc.holdings, holdings);
-})();
-
-(function TestMakeCellWithHoldingsSetHoldingsStrict() {
- "use strict";
- let wf = new WeakFactory(() => {});
- let obj = {a: 1};
- let holdings = {b: 2};
- let wc = wf.makeCell(obj, holdings);
- assertSame(wc.holdings, holdings);
- assertThrows(() => { wc.holdings = 5; }, TypeError);
- assertSame(wc.holdings, holdings);
-})();
-
-(function TestMakeCellWithNonObject() {
- let wf = new WeakFactory(() => {});
- let message = "WeakFactory.prototype.makeCell: target must be an object";
- assertThrows(() => wf.makeCell(), TypeError, message);
- assertThrows(() => wf.makeCell(1), TypeError, message);
- assertThrows(() => wf.makeCell(false), TypeError, message);
- assertThrows(() => wf.makeCell("foo"), TypeError, message);
- assertThrows(() => wf.makeCell(Symbol()), TypeError, message);
- assertThrows(() => wf.makeCell(null), TypeError, message);
- assertThrows(() => wf.makeCell(undefined), TypeError, message);
-})();
-
-(function TestMakeCellWithProxy() {
+(function TestRegisterWithProxy() {
let handler = {};
let obj = {};
let proxy = new Proxy(obj, handler);
- let wf = new WeakFactory(() => {});
- let wc = wf.makeCell(proxy);
+ let fg = new FinalizationGroup(() => {});
+ fg.register(proxy);
})();
-(function TestMakeCellTargetAndHoldingsSameValue() {
- let wf = new WeakFactory(() => {});
+(function TestRegisterTargetAndHoldingsSameValue() {
+ let fg = new FinalizationGroup(() => {});
let obj = {a: 1};
// SameValue(target, holdings) not ok
- assertThrows(() => wf.makeCell(obj, obj), TypeError,
- "WeakFactory.prototype.makeCell: target and holdings must not be same");
+ assertThrows(() => fg.register(obj, obj), TypeError,
+ "FinalizationGroup.prototype.register: target and holdings must not be same");
let holdings = {a: 1};
- let wc = wf.makeCell(obj, holdings);
+ fg.register(obj, holdings);
})();
-(function TestMakeCellWithoutWeakFactory() {
- assertThrows(() => WeakFactory.prototype.makeCell.call({}, {}), TypeError);
+(function TestRegisterWithoutFinalizationGroup() {
+ assertThrows(() => FinalizationGroup.prototype.register.call({}, {}, "holdings"), TypeError);
// Does not throw:
- let wf = new WeakFactory(() => {});
- WeakFactory.prototype.makeCell.call(wf, {});
+ let fg = new FinalizationGroup(() => {});
+ FinalizationGroup.prototype.register.call(fg, {}, "holdings");
})();
-(function TestHoldingsWithoutWeakCell() {
- let wf = new WeakFactory(() => {});
- let wc = wf.makeCell({});
- let holdings_getter = Object.getOwnPropertyDescriptor(wc.__proto__, "holdings").get;
- assertThrows(() => holdings_getter.call({}), TypeError);
- // Does not throw:
- holdings_getter.call(wc);
-})();
-
-(function TestClearWithoutWeakCell() {
- let wf = new WeakFactory(() => {});
- let wc = wf.makeCell({});
- let clear = Object.getOwnPropertyDescriptor(wc.__proto__, "clear").value;
- assertThrows(() => clear.call({}), TypeError);
- // Does not throw:
- clear.call(wc);
+(function TestUnregisterWithNonExistentKey() {
+ let fg = new FinalizationGroup(() => {});
+ fg.unregister({"k": "whatever"});
})();
(function TestWeakRefConstructor() {
@@ -194,21 +131,10 @@
let wr = new WeakRef(proxy);
})();
-(function TestCleanupSomeWithoutWeakFactory() {
- assertThrows(() => WeakFactory.prototype.cleanupSome.call({}), TypeError);
+(function TestCleanupSomeWithoutFinalizationGroup() {
+ assertThrows(() => FinalizationGroup.prototype.cleanupSome.call({}), TypeError);
// Does not throw:
- let wf = new WeakFactory(() => {});
- let rv = WeakFactory.prototype.cleanupSome.call(wf);
+ let fg = new FinalizationGroup(() => {});
+ let rv = FinalizationGroup.prototype.cleanupSome.call(fg);
assertEquals(undefined, rv);
})();
-
-(function TestDerefWithoutWeakRef() {
- let wf = new WeakFactory(() => {});
- let wc = wf.makeCell({});
- let wr = new WeakRef({});
- let deref = Object.getOwnPropertyDescriptor(wr.__proto__, "deref").value;
- assertThrows(() => deref.call({}), TypeError);
- assertThrows(() => deref.call(wc), TypeError);
- // Does not throw:
- deref.call(wr);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
index f8e44c355c..20726284bb 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
@@ -7,48 +7,47 @@
let cleanup_call_count = 0;
let cleanup = function(iter) {
if (cleanup_call_count == 0) {
- // First call: iterate 2 of the 3 cells
- let cells = [];
- for (wc of iter) {
- cells.push(wc);
- // Don't iterate the rest of the cells
- if (cells.length == 2) {
+ // First call: iterate 2 of the 3 holdings
+ let holdings_list = [];
+ for (holdings of iter) {
+ holdings_list.push(holdings);
+ // Don't iterate the rest of the holdings
+ if (holdings_list.length == 2) {
break;
}
}
- assertEquals(cells.length, 2);
- assertTrue(cells[0].holdings < 3);
- assertTrue(cells[1].holdings < 3);
+ assertEquals(holdings_list.length, 2);
+ assertTrue(holdings_list[0] < 3);
+ assertTrue(holdings_list[1] < 3);
// Update call count only after the asserts; this ensures that the test
// fails even if the exceptions inside the cleanup function are swallowed.
cleanup_call_count++;
} else {
- // Second call: iterate one leftover cell and one new cell.
+ // Second call: iterate one leftover holdings and one holdings.
assertEquals(1, cleanup_call_count);
- let cells = [];
- for (wc of iter) {
- cells.push(wc);
+ let holdings_list = [];
+ for (holdings of iter) {
+ holdings_list.push(holdings);
}
- assertEquals(cells.length, 2);
- assertTrue((cells[0].holdings < 3 && cells[1].holdings == 100) ||
- (cells[1].holdings < 3 && cells[0].holdings == 100));
+ assertEquals(holdings_list.length, 2);
+ assertTrue((holdings_list[0] < 3 && holdings_list[1] == 100) ||
+ (holdings_list[1] < 3 && holdings_list[0] == 100));
// Update call count only after the asserts; this ensures that the test
// fails even if the exceptions inside the cleanup function are swallowed.
cleanup_call_count++;
}
}
-let wf = new WeakFactory(cleanup);
-// Create 3 objects and WeakCells pointing to them. The objects need to be
-// inside a closure so that we can reliably kill them!
-let weak_cells = [];
+let fg = new FinalizationGroup(cleanup);
+// Create 3 objects and register them in the FinalizationGroup. The objects need
+// to be inside a closure so that we can reliably kill them!
(function() {
let objects = [];
for (let i = 0; i < 3; ++i) {
objects[i] = {a: i};
- weak_cells[i] = wf.makeCell(objects[i], i);
+ fg.register(objects[i], i);
}
gc();
@@ -58,14 +57,14 @@ let weak_cells = [];
objects = [];
})();
-// This GC will discover dirty WeakCells.
+// This GC will reclaim the targets.
gc();
assertEquals(0, cleanup_call_count);
let timeout_func_1 = function() {
assertEquals(1, cleanup_call_count);
- // Assert that the cleanup function won't be called unless new WeakCells appear.
+ // Assert that the cleanup function won't be called unless new targets appear.
setTimeout(timeout_func_2, 0);
}
@@ -74,9 +73,9 @@ setTimeout(timeout_func_1, 0);
let timeout_func_2 = function() {
assertEquals(1, cleanup_call_count);
- // Create a new WeakCells to be cleaned up.
+ // Create a new object and register it.
let obj = {};
- let wc = wf.makeCell(obj, 100);
+ let wc = fg.register(obj, 100);
obj = null;
gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
index 02f05ac8e2..97ab1dbd80 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -9,15 +9,15 @@ let r = Realm.create();
let cleanup = Realm.eval(r, "var stored_global; function cleanup() { stored_global = globalThis; } cleanup");
let realm_global_this = Realm.eval(r, "globalThis");
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
+// Create an object and a register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
let weak_cell;
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, {});
// object goes out of scope.
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
index 6a5bcfa821..c6b834e8fb 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
@@ -19,17 +19,17 @@ let log = [];
let cleanup = (iter) => {
log.push("cleanup");
- for (wc of iter) { }
+ for (holdings of iter) { }
}
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
let o = null;
(function() {
// Use a closure here to avoid other references to o which might keep it alive
// (e.g., stack frames pointing to it).
o = {};
- wf.makeCell(o);
+ fg.register(o, {});
})();
let microtask_after_cleanup = () => {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
index 2e46830093..1d275a19aa 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -9,15 +9,15 @@ let r = Realm.create();
let cleanup = Realm.eval(r, "var stored_global; let cleanup = new Proxy(function() { stored_global = globalThis;}, {}); cleanup");
let realm_global_this = Realm.eval(r, "globalThis");
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
+// Create an object and register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
let weak_cell;
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, "holdings");
// object goes out of scope.
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
index 631f43c012..0cef0a1af5 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
@@ -5,31 +5,31 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_count = 0;
-let cleanup_cells = [];
+let cleanup_holdings = [];
let cleanup = function(iter) {
- for (wc of iter) {
- cleanup_cells.push(wc);
+ for (holdings of iter) {
+ cleanup_holdings.push(holdings);
}
++cleanup_count;
}
-let wf = new WeakFactory(cleanup);
-let weak_cell;
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "this is the key"};
(function() {
let o = {};
- weak_cell = wf.makeCell(o);
+ weak_cell = fg.register(o, "holdings", key);
- // cleanupSome won't do anything since there are no dirty WeakCells.
- wf.cleanupSome();
+ // cleanupSome won't do anything since there are no reclaimed targets.
+ fg.cleanupSome();
assertEquals(0, cleanup_count);
})();
// GC will detect the WeakCell as dirty.
gc();
-// Clear the WeakCell just before we would've called cleanupSome.
-weak_cell.clear();
+// Unregister the tracked object just before calling cleanupSome.
+fg.unregister(key);
-wf.cleanupSome();
+fg.cleanupSome();
assertEquals(0, cleanup_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
index 84a946d390..1d3ceda3f2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
@@ -5,29 +5,28 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_count = 0;
-let cleanup_cells = [];
+let cleanup_holdings = [];
let cleanup = function(iter) {
- for (wc of iter) {
- cleanup_cells.push(wc);
+ for (holdings of iter) {
+ cleanup_holdings.push(holdings);
}
++cleanup_count;
}
-let wf = new WeakFactory(cleanup);
-let weak_cell;
+let fg = new FinalizationGroup(cleanup);
(function() {
let o = {};
- weak_cell = wf.makeCell(o);
+ fg.register(o, "holdings");
- // cleanupSome won't do anything since there are no dirty WeakCells.
- wf.cleanupSome();
+ // cleanupSome won't do anything since there are no reclaimed targets.
+ fg.cleanupSome();
assertEquals(0, cleanup_count);
})();
-// GC will detect the WeakCell as dirty.
+// GC will detect o as dead.
gc();
-wf.cleanupSome();
+fg.cleanupSome();
assertEquals(1, cleanup_count);
-assertEquals(1, cleanup_cells.length);
-assertEquals(weak_cell, cleanup_cells[0]);
+assertEquals(1, cleanup_holdings.length);
+assertEquals("holdings", cleanup_holdings[0]);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js
deleted file mode 100644
index a5aa537ff2..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-let cleanup_call_count = 0;
-let cleanup = function(iter) {
- ++cleanup_call_count;
-}
-
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
-let weak_cell;
-
-(function() {
- let object = {};
- weak_cell = wf.makeCell(object);
-
- // Clear the WeakCell before the GC has a chance to discover it.
- weak_cell.clear();
-
- // Call clear again (just to assert we handle this gracefully).
- weak_cell.clear();
-
- // object goes out of scope.
-})();
-
-// This GC will discover dirty WeakCells.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function won't be called, since the WeakCell was cleared.
-let timeout_func = function() {
- assertEquals(0, cleanup_call_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js
deleted file mode 100644
index 98410d5d0e..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-// Test that WeakCell.prototype.clear() also clears the WeakFactory pointer of
-// WeakCell. The only way to observe this is to assert that the WeakCell no
-// longer keeps its WeakFactory alive after clear() has been called.
-
-let weak_cell;
-let weak_cell_pointing_to_factory;
-
-let cleanup1_call_count = 0;
-let cleanup2_call_count = 0;
-
-let cleanup1 = function() {
- ++cleanup1_call_count;
-}
-
-let cleanup2 = function() {
- ++cleanup2_call_count;
-}
-
-let wf1 = new WeakFactory(cleanup1);
-
-(function(){
- let wf2 = new WeakFactory(cleanup2);
-
- (function() {
- let object = {};
- weak_cell = wf2.makeCell(object);
- // object goes out of scope.
- })();
-
- weak_cell_pointing_to_factory = wf1.makeCell(wf2);
- // wf goes out of scope
-})();
-
-weak_cell.clear();
-gc();
-
-// Assert that weak_cell_pointing_to_factory now got cleared.
-let timeout_func = function() {
- assertEquals(1, cleanup1_call_count);
- assertEquals(0, cleanup2_call_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js
deleted file mode 100644
index 794f356119..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
-let cleanup = function(iter) {
- for (wc of iter) {
- // See which WeakCell we're iterating over and clear the other one.
- if (wc == weak_cell1) {
- weak_cell2.clear();
- } else {
- assertSame(wc, weak_cell2);
- weak_cell1.clear();
- }
- ++cleanup_weak_cell_count;
- }
- ++cleanup_call_count;
-}
-
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
-let weak_cell1;
-let weak_cell2;
-
-(function() {
- let object1 = {};
- weak_cell1 = wf.makeCell(object1);
- let object2 = {};
- weak_cell2 = wf.makeCell(object2);
-
- // object1 and object2 go out of scope.
-})();
-
-// This GC will discover dirty WeakCells and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function was called and iterated one WeakCell (but not the other one).
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_weak_cell_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js
index 367cd9a9c0..ea35a2e63f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js
@@ -7,18 +7,19 @@
let cleanup_called = false;
let cleanup = function(iter) {
assertFalse(cleanup_called);
- let cells = [];
- for (wc of iter) {
- cells.push(wc);
+ let holdings_list = [];
+ for (holdings of iter) {
+ holdings_list.push(holdings);
}
- assertEquals(cells.length, 1);
- assertEquals(cells[0].holdings, "this is my cell");
+ assertEquals(holdings_list.length, 1);
+ assertEquals(holdings_list[0].a, "this is the holdings object");
cleanup_called = true;
}
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
let o1 = {};
-let wc1 = wf.makeCell(o1, "this is my cell");
+let holdings = {'a': 'this is the holdings object'};
+fg.register(o1, holdings);
gc();
assertFalse(cleanup_called);
@@ -26,9 +27,9 @@ assertFalse(cleanup_called);
// Drop the last references to o1.
o1 = null;
-// Drop the last reference to the WeakCell. The WeakFactory keeps it alive, so
-// the cleanup function will be called as normal.
-wc1 = null;
+// Drop the last reference to the holdings. The FinalizationGroup keeps it
+// alive, so the cleanup function will be called as normal.
+holdings = null;
gc();
assertFalse(cleanup_called);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
index f6627be19e..bd66f1ce1d 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
@@ -7,27 +7,26 @@
let cleanup_called = false;
let cleanup = function(iter) {
assertFalse(cleanup_called);
- let cells = [];
- for (wc of iter) {
- cells.push(wc);
+ let holdings_list = [];
+ for (holdings of iter) {
+ holdings_list.push(holdings);
}
- assertEquals(1, cells.length);
- assertEquals(weak_cell, cells[0]);
+ assertEquals(1, holdings_list.length);
+ assertEquals("holdings", holdings_list[0]);
cleanup_called = true;
}
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
let weak_ref;
-let weak_cell;
(function() {
let o = {};
weak_ref = new WeakRef(o);
- weak_cell = wf.makeCell(o);
+ fg.register(o, "holdings");
})();
// Since the WeakRef was created during this turn, it is not cleared by GC. The
-// WeakCell is not cleared either, since the WeakRef keeps the target object
-// alive.
+// pointer inside the FinalizationGroup is not cleared either, since the WeakRef
+// keeps the target object alive.
gc();
(function() {
assertNotEquals(undefined, weak_ref.deref());
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js
index 2f3915478e..a1cff3aaa0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js
@@ -6,66 +6,66 @@
// Flags: --no-stress-flush-bytecode
let cleanup0_call_count = 0;
-let cleanup0_weak_cell_count = 0;
+let cleanup0_holdings_count = 0;
let cleanup1_call_count = 0;
-let cleanup1_weak_cell_count = 0;
+let cleanup1_holdings_count = 0;
let cleanup0 = function(iter) {
- for (wc of iter) {
- ++cleanup0_weak_cell_count;
+ for (holdings of iter) {
+ ++cleanup0_holdings_count;
}
++cleanup0_call_count;
}
let cleanup1 = function(iter) {
- for (wc of iter) {
- ++cleanup1_weak_cell_count;
+ for (holdings of iter) {
+ ++cleanup1_holdings_count;
}
++cleanup1_call_count;
}
-let wf0 = new WeakFactory(cleanup0);
-let wf1 = new WeakFactory(cleanup1);
+let fg0 = new FinalizationGroup(cleanup0);
+let fg1 = new FinalizationGroup(cleanup1);
-// Create 1 WeakCell for each WeakFactory and kill the objects they point to.
+// Register 1 weak reference for each FinalizationGroup and kill the objects they point to.
(function() {
// The objects need to be inside a closure so that we can reliably kill them.
let objects = [];
objects[0] = {};
objects[1] = {};
- wf0.makeCell(objects[0]);
- wf1.makeCell(objects[1]);
+ fg0.register(objects[0], "holdings0-0");
+ fg1.register(objects[1], "holdings1-0");
// Drop the references to the objects.
objects = [];
- // Will schedule both wf0 and wf1 for cleanup.
+ // Will schedule both fg0 and fg1 for cleanup.
gc();
})();
// Before the cleanup task has a chance to run, do the same thing again, so both
-// factories are (again) scheduled for cleanup. This has to be a IIFE function
+// FinalizationGroups are (again) scheduled for cleanup. This has to be a IIFE function
// (so that we can reliably kill the objects) so we cannot use the same function
// as before.
(function() {
let objects = [];
objects[0] = {};
objects[1] = {};
- wf0.makeCell(objects[0]);
- wf1.makeCell(objects[1]);
+ fg0.register(objects[0], "holdings0-1");
+ fg1.register(objects[1], "holdings1-1");
objects = [];
gc();
})();
let timeout_func = function() {
assertEquals(1, cleanup0_call_count);
- assertEquals(2, cleanup0_weak_cell_count);
+ assertEquals(2, cleanup0_holdings_count);
assertEquals(1, cleanup1_call_count);
- assertEquals(2, cleanup1_weak_cell_count);
+ assertEquals(2, cleanup1_holdings_count);
}
-// Give the cleanup task a chance to run. All WeakCells to cleanup will be
-// available during the same invocation of the cleanup function.
+// Give the cleanup task a chance to run. All holdings will be iterated during
+// the same invocation of the cleanup function.
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
index 9fef051122..73aac76378 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
@@ -7,29 +7,25 @@
let cleanup_called = false;
let cleanup = function(iter) {
assertFalse(cleanup_called);
- let cells = [];
- for (wc of iter) {
- cells.push(wc);
+ let holdings_list = [];
+ for (holdings of iter) {
+ holdings_list.push(holdings);
}
- assertEquals(cells.length, 2);
- if (cells[0] == wc1) {
- assertEquals(cells[0].holdings, 1);
- assertEquals(cells[1], wc2);
- assertEquals(cells[1].holdings, 2);
+ assertEquals(holdings_list.length, 2);
+ if (holdings_list[0] == 1) {
+ assertEquals(holdings_list[1], 2);
} else {
- assertEquals(cells[0], wc2);
- assertEquals(cells[0].holdings, 2);
- assertEquals(cells[1], wc1);
- assertEquals(cells[1].holdings, 1);
+ assertEquals(holdings_list[0], 2);
+ assertEquals(holdings_list[1], 1);
}
cleanup_called = true;
}
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
let o1 = {};
let o2 = {};
-let wc1 = wf.makeCell(o1, 1);
-let wc2 = wf.makeCell(o2, 2);
+fg.register(o1, 1);
+fg.register(o2, 2);
gc();
assertFalse(cleanup_called);
@@ -37,8 +33,8 @@ assertFalse(cleanup_called);
// Drop the last references to o1 and o2.
o1 = null;
o2 = null;
-// GC will clear the WeakCells; the cleanup function will be called the next time
-// we enter the event loop.
+// GC will reclaim the target objects; the cleanup function will be called the
+// next time we enter the event loop.
gc();
assertFalse(cleanup_called);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index 98a33df240..51e721401a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -5,28 +5,26 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
+let cleanup_holdings_count = 0;
let cleanup = function(iter) {
- for (wc of iter) {
- ++cleanup_weak_cell_count;
+ for (holdings of iter) {
+ ++cleanup_holdings_count;
}
++cleanup_call_count;
}
-let wf1 = new WeakFactory(cleanup);
-let wf2 = new WeakFactory(cleanup);
+let fg1 = new FinalizationGroup(cleanup);
+let fg2 = new FinalizationGroup(cleanup);
-// Create two objects and WeakCells pointing to them. The objects need to be inside
-// a closure so that we can reliably kill them!
-let weak_cell1;
-let weak_cell2;
+// Create two objects and register them in FinalizationGroups. The objects need
+// to be inside a closure so that we can reliably kill them!
(function() {
let object1 = {};
- weak_cell1 = wf1.makeCell(object1);
+ fg1.register(object1, "holdings1");
let object2 = {};
- weak_cell2 = wf2.makeCell(object2);
+ fg2.register(object2, "holdings2");
// object1 and object2 go out of scope.
})();
@@ -35,10 +33,10 @@ let weak_cell2;
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the WeakCells.
+// Assert that the cleanup function was called and iterated the holdings.
let timeout_func = function() {
assertEquals(2, cleanup_call_count);
- assertEquals(2, cleanup_weak_cell_count);
+ assertEquals(2, cleanup_holdings_count);
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
new file mode 100644
index 0000000000..ac3dc6041a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_holdings_count = 0;
+let cleanup = function(iter) {
+ for (holdings of iter) {
+ assertEquals(holdings, undefined);
+ ++cleanup_holdings_count;
+ }
+ ++cleanup_call_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+
+// Create an object and register it in the FinalizationGroup. The object needs to be inside
+// a closure so that we can reliably kill them!
+
+(function() {
+ let object = {};
+ fg.register(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will reclaim the target object and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the holdings.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_holdings_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index 3392d7fbb9..f6480f86b6 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -5,42 +5,42 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
+let cleanup_holdings_count = 0;
let cleanup = function(iter) {
- for (wc of iter) {
- assertSame(wc, weak_cell);
- ++cleanup_weak_cell_count;
+ for (holdings of iter) {
+ assertEquals("holdings", holdings);
+ ++cleanup_holdings_count;
}
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
-let weak_cell;
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "this is the key"};
+// Create an object and register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, "holdings", key);
// object goes out of scope.
})();
-// This GC will discover dirty WeakCells and schedule cleanup.
+// This GC will reclaim the target object and schedule cleanup.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the WeakCell.
+// Assert that the cleanup function was called and iterated the holdings.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_weak_cell_count);
+ assertEquals(1, cleanup_holdings_count);
- // Clear an already iterated over WeakCell.
- weak_cell.clear();
+ // Unregister an already iterated over weak reference.
+ fg.unregister(key);
// Assert that it didn't do anything.
setTimeout(() => { assertEquals(1, cleanup_call_count); }, 0);
- setTimeout(() => { assertEquals(1, cleanup_weak_cell_count); }, 0);
+ setTimeout(() => { assertEquals(1, cleanup_holdings_count); }, 0);
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index 1fd0fbf3b0..10b8bc67ff 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -9,30 +9,27 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
-let weak_cell;
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "this is the key"};
+// Create an object and register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
(function() {
let object = {};
- weak_cell = wf.makeCell(object, "my holdings");
+ fg.register(object, "my holdings", key);
// Clear the WeakCell before the GC has a chance to discover it.
- let return_value = weak_cell.clear();
+ let return_value = fg.unregister(key);
assertEquals(undefined, return_value);
- // Assert holdings got cleared too.
- assertEquals(undefined, weak_cell.holdings);
-
// object goes out of scope.
})();
-// This GC will discover dirty WeakCells.
+// This GC will reclaim the target object.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function won't be called, since the WeakCell was cleared.
+// Assert that the cleanup function won't be called, since we called unregister.
let timeout_func = function() {
assertEquals(0, cleanup_call_count);
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
new file mode 100644
index 0000000000..e6ea150027
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup = function(iter) {
+ ++cleanup_call_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "this is the key"};
+// Create an object and register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
+
+(function() {
+ let object = {};
+ fg.register(object, "holdings", key);
+
+ // Unregister before the GC has a chance to discover the object.
+ fg.unregister(key);
+
+ // Call unregister again (just to assert we handle this gracefully).
+ fg.unregister(key);
+
+ // object goes out of scope.
+})();
+
+// This GC will reclaim the target object.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function won't be called, since the weak reference
+// was unregistered.
+let timeout_func = function() {
+ assertEquals(0, cleanup_call_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
index 6c06d7af74..aa9eab20ff 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
@@ -5,37 +5,37 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
+let cleanup_holdings_count = 0;
let cleanup = function(iter) {
- // Clear the WeakCell before we've iterated through it.
- weak_cell.clear();
+ // Unregister before we've iterated through the holdings.
+ fg.unregister(key);
for (wc of iter) {
- ++cleanup_weak_cell_count;
+ ++cleanup_holdings_count;
}
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
-// a closure so that we can reliably kill them!
-let weak_cell;
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "the key"};
+// Create an object and register it in the FinalizationGroup. The object needs
+// to be inside a closure so that we can reliably kill them!
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, "holdings", key);
// object goes out of scope.
})();
-// This GC will discover dirty WeakCells and schedule cleanup.
+// This GC will discover unretained targets and schedule cleanup.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called, but didn't iterate any weak cells.
+// Assert that the cleanup function was called, but didn't iterate any holdings.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
- assertEquals(0, cleanup_weak_cell_count);
+ assertEquals(0, cleanup_holdings_count);
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 0aab366f97..84ec3aaef8 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -5,24 +5,24 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
+let cleanup_holdings_count = 0;
let cleanup = function(iter) {
- for (wc of iter) {
- assertSame(wc, weak_cell);
- wc.clear();
- ++cleanup_weak_cell_count;
+ for (holdings of iter) {
+ assertEquals(holdings, "holdings");
+ fg.unregister(key);
+ ++cleanup_holdings_count;
}
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
+let fg = new FinalizationGroup(cleanup);
+// Create an object and register it in the FinalizationGroup. The object needs to be inside
// a closure so that we can reliably kill them!
-let weak_cell;
+let key = {"k": "this is the key"};
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, "holdings", key);
// object goes out of scope.
})();
@@ -34,7 +34,7 @@ assertEquals(0, cleanup_call_count);
// Assert that the cleanup function was called and iterated the WeakCell.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_weak_cell_count);
+ assertEquals(1, cleanup_holdings_count);
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index 9dcea5ded5..39706a7b9b 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -5,37 +5,38 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup_weak_cell_count = 0;
+let cleanup_holdings_count = 0;
let cleanup = function(iter) {
- for (wc of iter) {
- assertSame(wc, weak_cell);
- ++cleanup_weak_cell_count;
+ for (holdings of iter) {
+ assertEquals(holdings, "holdings");
+ ++cleanup_holdings_count;
}
- // Clear an already iterated over WeakCell.
- weak_cell.clear();
+ // Unregister an already iterated over weak reference.
+ fg.unregister(key);
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
+let fg = new FinalizationGroup(cleanup);
+let key = {"k": "this is the key"};
+
+// Create an object and register it in the FinalizationGroup. The object needs to be inside
// a closure so that we can reliably kill them!
-let weak_cell;
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, "holdings", key);
// object goes out of scope.
})();
-// This GC will discover dirty WeakCells and schedule cleanup.
+// This GC will reclaim the target object and schedule cleanup.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the WeakCell.
+// Assert that the cleanup function was called and iterated the holdings.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_weak_cell_count);
+ assertEquals(1, cleanup_holdings_count);
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
new file mode 100644
index 0000000000..67ed227502
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_holdings_count = 0;
+let cleanup = function(iter) {
+ for (holdings of iter) {
+ // See which target we're iterating over and unregister the other one.
+ if (holdings == 1) {
+ fg.unregister(key2);
+ } else {
+ assertSame(holdings, 2);
+ fg.unregister(key1);
+ }
+ ++cleanup_holdings_count;
+ }
+ ++cleanup_call_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+let key1 = {"k": "first key"};
+let key2 = {"k": "second key"};
+// Create two objects and register them in the FinalizationGroup. The objects
+// need to be inside a closure so that we can reliably kill them!
+
+(function() {
+ let object1 = {};
+ fg.register(object1, 1, key1);
+ let object2 = {};
+ fg.register(object2, 2, key2);
+
+ // object1 and object2 go out of scope.
+})();
+
+// This GC will reclaim target objects and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated one holdings (but not the other one).
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_holdings_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
new file mode 100644
index 0000000000..748b7065c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_holdings_count = 0;
+let cleanup = function(iter) {
+ for (holdings of iter) {
+ assertEquals("holdings2", holdings);
+ ++cleanup_holdings_count;
+ }
+ ++cleanup_call_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+let key1 = {"k": "key1"};
+let key2 = {"k": "key2"};
+// Create three objects and register them in the FinalizationGroup. The objects
+// need to be inside a closure so that we can reliably kill them!
+
+(function() {
+ let object1a = {};
+ fg.register(object1a, "holdings1a", key1);
+
+ let object1b = {};
+ fg.register(object1b, "holdings1b", key1);
+
+ let object2 = {};
+ fg.register(object2, "holdings2", key2);
+
+ // Unregister before the GC has a chance to discover the objects.
+ fg.unregister(key1);
+
+ // objects go out of scope.
+})();
+
+// This GC will reclaim the target objects.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function will be called only for the reference which
+// was not unregistered.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_holdings_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 159fb0b140..2466568397 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -9,14 +9,14 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let wf = new WeakFactory(cleanup);
-// Create an object and a WeakCell pointing to it. The object needs to be inside
+let key = {"k": "this is my key"};
+let fg = new FinalizationGroup(cleanup);
+// Create an object and register it in the FinalizationGroup. The object needs to be inside
// a closure so that we can reliably kill them!
-let weak_cell;
(function() {
let object = {};
- weak_cell = wf.makeCell(object);
+ fg.register(object, {}, key);
// object goes out of scope.
})();
@@ -25,10 +25,10 @@ let weak_cell;
gc();
assertEquals(0, cleanup_call_count);
-// Clear the WeakCell before cleanup has ran.
-weak_cell.clear();
+// Unregister the object from the FinalizationGroup before cleanup has ran.
+fg.unregister(key);
-// Assert that the cleanup function won't be called, since the WeakCell was cleared.
+// Assert that the cleanup function won't be called.
let timeout_func = function() {
assertEquals(0, cleanup_call_count);
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index eb365986d7..170a52df10 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -8,16 +8,17 @@ let cleanup_called = false;
let cleanup = function(iter) {
assertFalse(cleanup_called);
let result = iter.next();
- assertEquals(result.value, wc);
+ assertEquals(result.value, holdings);
assertFalse(result.done);
result = iter.next();
assertTrue(result.done);
cleanup_called = true;
}
-let wf = new WeakFactory(cleanup);
+let fg = new FinalizationGroup(cleanup);
let o = {};
-let wc = wf.makeCell(o);
+let holdings = {'h': 55};
+fg.register(o, holdings);
gc();
assertFalse(cleanup_called);
diff --git a/deps/v8/test/mjsunit/integrity-level-map-update.js b/deps/v8/test/mjsunit/integrity-level-map-update.js
new file mode 100644
index 0000000000..b4e066f7de
--- /dev/null
+++ b/deps/v8/test/mjsunit/integrity-level-map-update.js
@@ -0,0 +1,166 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function SealAndReconfigure() {
+ function C() { this.x = 1; this.y = 1; Object.seal(this); }
+
+ let c1 = new C();
+
+ c1.x = 0.1;
+
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ // The objects c2, c3 and c4 should follow the same transition
+ // path that we reconfigured c1 to.
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+
+ c2.x = 0.1;
+ c3.x = 0.1;
+ c4.x = 0.1;
+
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+})();
+
+(function SealAndReconfigureWithIC() {
+ function C() { this.x = 1; this.y = 1; Object.seal(this); }
+
+ let c1 = new C();
+
+ function g(o) {
+ o.x = 0.1;
+ }
+
+ g(c1);
+
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ // The objects c2, c3 and c4 should follow the same transition
+ // path that we reconfigured c1 to.
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+
+ g(c2);
+ g(c3);
+ g(c4);
+
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+})();
+
+(function SealReconfigureAndMigrateWithIC() {
+ function C() { this.x = 1; this.y = 1; Object.seal(this); }
+
+ let c1 = new C();
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ function g(o) {
+ o.x = 0.1;
+ }
+
+ g(c1);
+
+ // Now c2, c3 and c4 have deprecated maps.
+ assertFalse(%HaveSameMap(c1, c2));
+ assertFalse(%HaveSameMap(c1, c3));
+ assertFalse(%HaveSameMap(c1, c4));
+
+ g(c2);
+ g(c3);
+ g(c4);
+
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+})();
+
+(function SealReconfigureAndMigrateWithOptCode() {
+ function C() { this.x = 1; this.y = 1; Object.seal(this); }
+
+ let c1 = new C();
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ function g(o) {
+ o.x = 0.1;
+ }
+
+ g(c1);
+ g(c2);
+ g(c3);
+ %OptimizeFunctionOnNextCall(g);
+ g(c4);
+
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+})();
+
+(function PreventExtensionsAndReconfigure() {
+ function C() { this.x = 1; this.y = 1; Object.preventExtensions(this); }
+
+ let c1 = new C();
+
+ function g(o) {
+ o.x = 0.1;
+ }
+
+ g(c1);
+
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ c2.x = 0.1;
+ c3.x = 0.1;
+ c4.x = 0.1;
+
+ assertTrue(%HaveSameMap(c1, c2));
+ assertTrue(%HaveSameMap(c1, c3));
+ assertTrue(%HaveSameMap(c1, c4));
+})();
+
+(function PreventExtensionsSealAndReconfigure() {
+ function C() {
+ this.x = 1;
+ this.y = 1;
+ Object.preventExtensions(this);
+ Object.seal(this);
+ }
+
+ let c1 = new C();
+
+ function g(o) {
+ o.x = 0.1;
+ }
+
+ g(c1);
+
+ let c2 = new C();
+ let c3 = new C();
+ let c4 = new C();
+
+ c2.x = 0.1;
+ c3.x = 0.1;
+ c4.x = 0.1;
+
+ // Ideally, all the objects would have the same map, but at the moment
+ // we shortcut the unnecessary integrity level transitions.
+ assertTrue(%HaveSameMap(c2, c3));
+ assertTrue(%HaveSameMap(c2, c4));
+})();
diff --git a/deps/v8/test/mjsunit/keyed-has-ic-module-export.js b/deps/v8/test/mjsunit/keyed-has-ic-module-export.js
new file mode 100644
index 0000000000..5183157ac4
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-has-ic-module-export.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+export var a = "A";
+export var b = "B";
+export var c = "C";
diff --git a/deps/v8/test/mjsunit/keyed-has-ic-module-import.js b/deps/v8/test/mjsunit/keyed-has-ic-module-import.js
new file mode 100644
index 0000000000..77a42925f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-has-ic-module-import.js
@@ -0,0 +1,70 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// MODULE
+
+import * as mod from "keyed-has-ic-module-export.js";
+
+function testIn(obj, key) {
+ return key in obj;
+}
+
+function expectTrue(obj, key) {
+ assertTrue(testIn(obj, key));
+}
+
+function expectFalse(obj, key) {
+ assertFalse(testIn(obj, key));
+}
+
+var tests = {
+ monomporphicTrue: function() {
+ expectTrue(mod, "a");
+ expectTrue(mod, "a");
+ expectTrue(mod, "a");
+ },
+
+ polymprohicKeyTrue: function() {
+ expectTrue(mod, "a");
+ expectTrue(mod, "b");
+ expectTrue(mod, "c");
+ },
+
+ monomorphicFalse: function() {
+ expectFalse(mod, "d");
+ expectFalse(mod, "d");
+ expectFalse(mod, "d");
+ },
+
+ polymorphicKeyFalse: function() {
+ expectFalse(mod, "d");
+ expectFalse(mod, "e");
+ expectFalse(mod, "f");
+ },
+
+ polymorphicTrue: function() {
+ var o = {a: "A"};
+ expectTrue(mod, "a");
+ expectTrue(o, "a");
+ expectTrue(mod, "a");
+ expectTrue(o, "a");
+ },
+
+ polymorphicFalse: function() {
+ var o = {a: "A"};
+ expectFalse(mod, "d");
+ expectFalse(o, "d");
+ expectFalse(mod, "d");
+ expectFalse(o, "d");
+ }
+};
+
+for (let test in tests) {
+ %DeoptimizeFunction(testIn);
+ %ClearFunctionFeedback(testIn);
+ tests[test]();
+ %OptimizeFunctionOnNextCall(testIn);
+ tests[test]();
+}
diff --git a/deps/v8/test/mjsunit/keyed-has-ic.js b/deps/v8/test/mjsunit/keyed-has-ic.js
new file mode 100644
index 0000000000..9e6fe25cc8
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-has-ic.js
@@ -0,0 +1,402 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function testIn(obj, key) {
+ return key in obj;
+}
+
+function expectTrue(obj, key) {
+ assertTrue(testIn(obj, key));
+}
+
+function expectFalse(obj, key) {
+ assertFalse(testIn(obj, key));
+}
+
+var tests = {
+ TestMonomorphicPackedSMIArray: function() {
+ var a = [0, 1, 2];
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectTrue(a, 2);
+ expectFalse(a, 3);
+ },
+
+ TestMonomorphicPackedArrayPrototypeProperty: function()
+ {
+ var a = [0, 1, 2];
+
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectFalse(a, 3);
+ Array.prototype[3] = 3;
+ expectTrue(a, 3);
+
+ // ensure the prototype change doesn't affect later tests
+ delete Array.prototype[3];
+ assertFalse((3 in Array.prototype));
+ expectFalse(a, 3);
+ },
+
+ TestMonomorphicPackedDoubleArray: function() {
+ var a = [0.0, 1.1, 2.2];
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectTrue(a, 2);
+ expectFalse(a, 3);
+ },
+
+ TestMonomorphicPackedArray: function() {
+ var a = ["A", "B", {}];
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectTrue(a, 2);
+ expectFalse(a, 3);
+ },
+
+ TestMonomorphicHoleyArray: function() {
+ var a = [0, 1, 2];
+ a[4] = 4;
+
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectTrue(a, 2);
+ expectFalse(a, 3);
+ expectTrue(a, 4);
+ },
+
+ TestMonomorphicTypedArray: function() {
+ var a = new Int32Array(3);
+ expectTrue(a, 0);
+ expectTrue(a, 1);
+ expectTrue(a, 2);
+ expectFalse(a, 3);
+ expectFalse(a, 4);
+ },
+
+ TestPolymorphicPackedArrays: function() {
+ var a = [0, 1, 2];
+ var b = [0.0, 1.1, 2.2];
+ var c = ["A", "B", {}];
+ expectTrue(c, 0);
+ expectTrue(b, 1);
+ expectTrue(a, 2);
+ expectTrue(c, 1);
+ expectTrue(b, 2);
+ expectTrue(a, 0);
+ expectFalse(c, 3);
+ expectFalse(b, 4);
+ expectFalse(a, 5);
+ },
+
+ TestPolymorphicMixedArrays: function() {
+ var a = new Array(3); // holey SMI
+ var b = [0.0,1.1,2.2]; // packed double
+ var c = new Int8Array(3); // typed array
+
+ expectFalse(a, 0);
+ expectTrue(b, 1);
+ expectTrue(c, 2);
+ expectFalse(a, 1);
+ expectTrue(b, 2);
+ expectTrue(c, 0);
+ expectFalse(a, 3);
+ expectFalse(b, 4);
+ expectFalse(c, 5);
+ },
+
+ TestMegamorphicArrays: function() {
+ var a = [0,1,2,3] // packed SMI
+ var b = new Array(3); // holey SMI
+ var c = [0.0,1.1,2.2]; // packed double
+ var d = ['a', 'b', 'c'] // packed
+ var e = new Int8Array(3); // typed array
+ var f = new Uint8Array(3); // typed array
+ var g = new Int32Array(3); // typed array
+
+ expectTrue(a, 0);
+ expectFalse(b, 1);
+ expectTrue(c, 2);
+ expectFalse(d, 3);
+ expectFalse(e, 4);
+ expectFalse(f, 5);
+ expectFalse(g, 6);
+ expectFalse(a, 5);
+ expectFalse(b, 4);
+ expectFalse(c, 3);
+ expectTrue(d, 2);
+ expectTrue(e, 1);
+ expectTrue(f, 0);
+ expectTrue(g, 0);
+ },
+
+ TestMonomorphicObject: function() {
+ var a = { a: "A", b: "B" };
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ },
+
+ TestMonomorphicProxyHasPropertyNoTrap: function() {
+ var a = new Proxy({a: 'A'}, {});
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ },
+
+ TestMonomorphicProxyNoPropertyNoTrap: function() {
+ var a = new Proxy({}, {});
+
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ },
+
+ TestMonomorphicProxyHasPropertyHasTrap: function() {
+ var a = new Proxy({a: 'A'}, { has: function() {return false;}});
+
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ },
+
+ TestMonomorphicProxyNoPropertyHasTrap: function() {
+ var a = new Proxy({}, { has: function() { return true; }});
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ },
+
+ TestMonomorphicObjectPrototype: function() {
+ var a = { b: "B" };
+
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ expectFalse(a, 'a');
+ Object.prototype.a = 'A';
+ expectTrue(a, 'a');
+ delete Object.prototype.a;
+ assertFalse((a in Object.prototype));
+ expectFalse(a, 'a');
+ },
+
+ TestPolymorphicObject: function() {
+ var a = { a: "A" };
+ var b = { a: "A", b: "B" };
+ var c = { b: "B", c: "C" };
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectTrue(b, 'a');
+ expectFalse(c, 'a');
+ expectTrue(a, 'a');
+ expectTrue(b, 'a');
+ expectFalse(c, 'a');
+ },
+
+ TestMegamorphicObject: function() {
+ var a = { a: "A" };
+ var b = { a: "A", b: "B" };
+ var c = { b: "B", c: "C" };
+ var d = { b: "A", a: "B" };
+ var e = { e: "E", a: "A" };
+ var f = { f: "F", b: "B", c: "C" };
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectTrue(b, 'a');
+ expectFalse(c, 'a');
+ expectTrue(d, 'a');
+ expectTrue(e, 'a');
+ expectFalse(f, 'a');
+ expectTrue(a, 'a');
+ expectTrue(b, 'a');
+ expectFalse(c, 'a');
+ expectTrue(d, 'a');
+ expectTrue(e, 'a');
+ expectFalse(f, 'a');
+ },
+
+ TestPolymorphicKeys: function() {
+ var a = { a: "A", b: "B" };
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'b');
+ expectFalse(a, 'c');
+ expectTrue(a, 'a');
+ expectTrue(a, 'b');
+ expectFalse(a, 'c');
+ expectTrue(a, 'a');
+ expectTrue(a, 'b');
+ expectFalse(a, 'c');
+ },
+
+ TestPolymorphicMixed: function() {
+ var a = { a: "A" };
+ var b = new Proxy({}, {});
+ var c = new Int32Array(3);
+
+ expectTrue(a, 'a');
+ expectTrue(a, 'a');
+ expectFalse(b, 'a');
+ expectFalse(c, 'a');
+ expectTrue(a, 'a');
+ expectFalse(b, 'a');
+ expectFalse(c, 'a');
+ },
+};
+
+for (test in tests) {
+ %DeoptimizeFunction(testIn);
+ %ClearFunctionFeedback(testIn);
+ tests[test]();
+ %OptimizeFunctionOnNextCall(testIn);
+ tests[test]();
+}
+
+// test function prototypes.
+(function() {
+ var o = function() {};
+
+ var proto = function() {
+ assertTrue("prototype" in o);
+ o.prototype;
+ }
+
+ proto();
+ proto();
+ proto();
+})();
+
+// `in` is not allowed on string
+(function() {
+ function test() {
+ 0 in "string"
+ };
+
+ assertThrows(test, TypeError);
+})();
+
+// `in` is allowed on `this` even when `this` is a string
+(function() {
+ function test() {
+ assertTrue("length" in this);
+ };
+
+ test.call("");
+ test.call("");
+ test.call("");
+})();
+
+(function() {
+ var index = 0;
+ function test(i) {
+ return index in arguments;
+ };
+
+ assertFalse(test())
+ assertFalse(test())
+ assertTrue(test(0));
+ assertTrue(test(0,1));
+
+ index = 2;
+ assertFalse(test())
+ assertFalse(test(0));
+ assertFalse(test(0,1));
+ assertTrue(test(0,1,2));
+})();
+
+(function() {
+ function test(a) {
+ arguments[3] = 1;
+ return 2 in arguments;
+ };
+
+ assertFalse(test(1));
+ assertFalse(test(1));
+ assertFalse(test(1));
+})();
+
+(function() {
+ function test(o, k) {
+ try {
+ k in o;
+ } catch (e) {
+ return false;
+ }
+ return true;
+ }
+
+ var str = "string";
+ // this will place slow_stub in the IC for strings.
+ assertFalse(test(str, "length"));
+ assertFalse(test(str, "length"));
+
+ // this turns the cache polymorphic, and causes generats LoadElement
+ // handlers for everything in the cache. This test ensures that
+ // KeyedLoadIC::LoadElementHandler can handle seeing string maps.
+ var ary = [0,1,2,3];
+ assertTrue(test(ary, 1));
+ assertTrue(test(ary, 1));
+
+ assertFalse(test(str, 0));
+ assertFalse(test(str, 0));
+})();
+
+(function() {
+ function test(o, k) {
+ try {
+ k in o;
+ } catch (e) {
+ return false;
+ }
+ return true;
+ }
+
+ var str = "string";
+ assertFalse(test(str, "length"));
+ assertFalse(test(str, "length"));
+ assertFalse(test(str, "length"));
+})();
+
+(function() {
+ function test(o, k) {
+ try {
+ k in o;
+ } catch (e) {
+ return false;
+ }
+ return true;
+ }
+
+ var str = "string";
+ assertFalse(test(str, 0));
+ assertFalse(test(str, 0));
+ assertFalse(test(str, 0));
+})();
+
+(function() {
+ function test(o, k) {
+ try {
+ k in o;
+ } catch (e) {
+ return false;
+ }
+ return true;
+ }
+
+ var ary = [0,1,2,3];
+ assertTrue(test(ary, 1));
+ assertTrue(test(ary, 1));
+
+ var str = "string";
+ assertFalse(test(str, 0));
+ assertFalse(test(str, 0));
+ assertFalse(test(str, 0));
+})();
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index d5c796228c..916a7d554f 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -126,13 +126,6 @@ test(function() {
[].join(o);
}, "Cannot convert object to primitive value", TypeError);
-// kCircularStructure
-test(function() {
- var o = {};
- o.o = o;
- JSON.stringify(o);
-}, "Converting circular structure to JSON", TypeError);
-
// kConstructorNotFunction
test(function() {
Map();
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 41f2caee7a..cedb51b64e 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -107,14 +107,21 @@ var assertNotNull;
// Assert that the passed function or eval code throws an exception.
// The optional second argument is an exception constructor that the
// thrown exception is checked against with "instanceof".
-// The optional third argument is a message type string that is compared
-// to the type property on the thrown exception.
+// The optional third argument is a message type string or RegExp object that is
+// compared to the message of the thrown exception.
var assertThrows;
// Assert that the passed function throws an exception.
// The exception is checked against the second argument using assertEquals.
var assertThrowsEquals;
+// Assert that the passed promise does not resolve, but eventually throws an
+// exception. The optional second argument is an exception constructor that the
+// thrown exception is checked against with "instanceof".
+// The optional third argument is a message type string or RegExp object that is
+// compared to the message of the thrown exception.
+var assertThrowsAsync;
+
// Assert that the passed function or eval code does not throw an exception.
var assertDoesNotThrow;
@@ -213,7 +220,7 @@ var prettyPrinted;
// TODO(neis): Remove try-catch once BigInts are enabled by default.
try {
BigIntPrototypeValueOf = BigInt.prototype.valueOf;
- } catch(e) {}
+ } catch (e) {}
function classOf(object) {
// Argument must not be null or undefined.
@@ -480,45 +487,68 @@ var prettyPrinted;
}
};
+ function executeCode(code) {
+ if (typeof code === 'function') return code();
+ if (typeof code === 'string') return eval(code);
+ failWithMessage(
+ 'Given code is neither function nor string, but ' + (typeof code) +
+ ': <' + prettyPrinted(code) + '>');
+ }
+
+ function checkException(e, type_opt, cause_opt) {
+ if (type_opt !== undefined) {
+ assertEquals('function', typeof type_opt);
+ assertInstanceof(e, type_opt);
+ }
+ if (RegExp !== undefined && cause_opt instanceof RegExp) {
+ assertMatches(cause_opt, e.message, 'Error message');
+ } else if (cause_opt !== undefined) {
+ assertEquals(cause_opt, e.message, 'Error message');
+ }
+ }
assertThrows = function assertThrows(code, type_opt, cause_opt) {
+ if (type_opt !== undefined && typeof type_opt !== 'function') {
+ failWithMessage(
+ 'invalid use of assertThrows, maybe you want assertThrowsEquals');
+ }
try {
- if (typeof code === 'function') {
- code();
- } else {
- eval(code);
- }
+ executeCode(code);
} catch (e) {
- if (typeof type_opt === 'function') {
- assertInstanceof(e, type_opt);
- } else if (type_opt !== void 0) {
- failWithMessage(
- 'invalid use of assertThrows, maybe you want assertThrowsEquals');
- }
- if (arguments.length >= 3) {
- if (cause_opt instanceof RegExp) {
- assertMatches(cause_opt, e.message, "Error message");
- } else {
- assertEquals(cause_opt, e.message, "Error message");
- }
- }
- // Success.
+ checkException(e, type_opt, cause_opt);
return;
}
- failWithMessage("Did not throw exception");
+ let msg = 'Did not throw exception';
+ if (type_opt !== undefined && type_opt.name !== undefined)
+ msg += ', expected ' + type_opt.name;
+ failWithMessage(msg);
};
-
assertThrowsEquals = function assertThrowsEquals(fun, val) {
try {
fun();
- } catch(e) {
+ } catch (e) {
assertSame(val, e);
return;
}
- failWithMessage("Did not throw exception");
+ failWithMessage('Did not throw exception, expected ' + prettyPrinted(val));
};
+ assertThrowsAsync = function assertThrowsAsync(promise, type_opt, cause_opt) {
+ if (type_opt !== undefined && typeof type_opt !== 'function') {
+ failWithMessage(
+ 'invalid use of assertThrows, maybe you want assertThrowsEquals');
+ }
+ let msg = 'Promise did not throw exception';
+ if (type_opt !== undefined && type_opt.name !== undefined)
+ msg += ', expected ' + type_opt.name;
+ return assertPromiseResult(
+ promise,
+ // Use setTimeout to throw the error again to get out of the promise
+ // chain.
+ res => setTimeout(_ => fail('<throw>', res, msg), 0),
+ e => checkException(e, type_opt, cause_opt));
+ };
assertInstanceof = function assertInstanceof(obj, type) {
if (!(obj instanceof type)) {
@@ -533,15 +563,11 @@ var prettyPrinted;
}
};
-
- assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
+ assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
try {
- if (typeof code === 'function') {
- return code();
- } else {
- return eval(code);
- }
+ executeCode(code);
} catch (e) {
+ if (e instanceof MjsUnitAssertionError) throw e;
failWithMessage("threw an exception: " + (e.message || e));
}
};
@@ -584,13 +610,16 @@ var prettyPrinted;
}
assertPromiseResult = function(promise, success, fail) {
+ if (success !== undefined) assertEquals('function', typeof success);
+ if (fail !== undefined) assertEquals('function', typeof fail);
+ assertInstanceof(promise, Promise);
const stack = (new Error()).stack;
var test_promise = promise.then(
result => {
try {
if (--promiseTestCount == 0) testRunner.notifyDone();
- if (success) success(result);
+ if (success !== undefined) success(result);
} catch (e) {
// Use setTimeout to throw the error again to get out of the promise
// chain.
@@ -602,7 +631,7 @@ var prettyPrinted;
result => {
try {
if (--promiseTestCount == 0) testRunner.notifyDone();
- if (!fail) throw result;
+ if (fail === undefined) throw result;
fail(result);
} catch (e) {
// Use setTimeout to throw the error again to get out of the promise
@@ -667,7 +696,9 @@ var prettyPrinted;
// option is provided. Such tests must add --opt to flags comment.
assertFalse((opt_status & V8OptimizationStatus.kNeverOptimize) !== 0,
"test does not make sense with --no-opt");
- assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0, name_opt);
+ assertTrue(
+ (opt_status & V8OptimizationStatus.kIsFunction) !== 0,
+ 'should be a function: ' + name_opt);
if (skip_if_maybe_deopted &&
(opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
// When --deopt-every-n-times flag is specified it's no longer guaranteed
@@ -675,7 +706,9 @@ var prettyPrinted;
// to stress test the deoptimizer.
return;
}
- assertTrue((opt_status & V8OptimizationStatus.kOptimized) !== 0, name_opt);
+ assertTrue(
+ (opt_status & V8OptimizationStatus.kOptimized) !== 0,
+ 'should be optimized: ' + name_opt);
}
isNeverOptimizeLiteMode = function isNeverOptimizeLiteMode() {
@@ -772,7 +805,7 @@ var prettyPrinted;
return frame;
});
return "" + error.message + "\n" + ArrayPrototypeJoin.call(stack, "\n");
- } catch(e) {};
+ } catch (e) {};
return error.stack;
}
})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 884c7cca92..da5f4d14f6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -33,7 +33,6 @@
'harmony/modules-skip*': [SKIP],
'regress/modules-skip*': [SKIP],
'wasm/exceptions-utils': [SKIP],
- 'wasm/wasm-constants': [SKIP],
'wasm/wasm-module-builder': [SKIP],
# All tests in the bug directory are expected to fail.
@@ -78,6 +77,9 @@
# Too slow in debug mode and under turbofan.
'regress/regress-4595': [PASS, NO_VARIANTS, ['mode == debug', SKIP]],
+ # Too slow in debug mode, due to large allocations.
+ 'regress/regress-crbug-941743': [PASS, ['mode == debug', SKIP], ['(arch == arm or arch == arm64) and simulator_run == True', SKIP]],
+
##############################################################################
# Only RegExp stuff tested, no need for extensive optimizing compiler tests.
'regexp-global': [PASS, NO_VARIANTS],
@@ -323,7 +325,7 @@
}], # 'gc_stress == True'
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# Skip tests not suitable for lite_mode.
# TODO(8596): We cache the templates in the feedback vector. In lite mode
@@ -343,15 +345,16 @@
'regress/regress-trap-allocation-memento': [SKIP],
'regress/regress-4121': [SKIP],
- # Slow tests without feedback vectors
- # TODO(mythria): Investigate why they are slow and either fix if
- # possible are update the reason why they are slow.
- 'spread-large-string': [SKIP],
- 'spread-large-array': [SKIP],
-
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'regress/regress-5888': [SKIP],
+ 'regress/regress-5911': [SKIP],
+ 'regress/regress-813440': [SKIP],
+ 'regress/regress-crbug-746835': [SKIP],
+ 'regress/regress-crbug-772056': [SKIP],
+ 'regress/regress-crbug-816961': [SKIP],
'regress/wasm/*': [SKIP],
'tools/compiler-trace-flags': [SKIP],
+ 'regress/regress-8947': [SKIP],
'wasm/*': [SKIP],
# Other tests that use asm / wasm / optimized code.
@@ -367,6 +370,7 @@
'regress/regress-6700': [SKIP],
'regress/regress-6838-2': [SKIP],
'regress/regress-6838-3': [SKIP],
+ 'regress/regress-crbug-934138': [SKIP],
# Timeouts in lite / jitless mode.
'asm/embenchen/*': [SKIP],
@@ -380,7 +384,157 @@
'regress/regress-crbug-721835': [SKIP],
'regress/regress-crbug-759327': [SKIP],
'regress/regress-crbug-898974': [SKIP],
-}], # 'lite_mode'
+}], # 'lite_mode or variant == jitless'
+
+##############################################################################
+['lite_mode', {
+ # TODO(v8:8510): Tests that currently fail with lazy source positions.
+ 'stack-traces-overflow': [SKIP],
+}], # lite_mode
+
+##############################################################################
+['variant == jitless', {
+ # https://crbug.com/v8/7777
+ 'array-literal-transitions': [SKIP],
+ 'array-push5': [SKIP],
+ 'array-shift4': [SKIP],
+ 'array-store-and-grow': [SKIP],
+ 'code-coverage-block-opt': [SKIP],
+ 'compiler/abstract-equal-receiver': [SKIP],
+ 'compiler/abstract-equal-symbol': [SKIP],
+ 'compiler/abstract-equal-undetectable': [SKIP],
+ 'compiler/array-buffer-is-view': [SKIP],
+ 'compiler/array-multiple-receiver-maps': [SKIP],
+ 'compiler/array-push-3': [SKIP],
+ 'compiler/array-slice-clone': [SKIP],
+ 'compiler/constant-fold-cow-array': [SKIP],
+ 'compiler/dataview-deopt': [SKIP],
+ 'compiler/dataview-get': [SKIP],
+ 'compiler/dataview-neutered': [SKIP],
+ 'compiler/dataview-set': [SKIP],
+ 'compiler/deopt-array-builtins': [SKIP],
+ 'compiler/deopt-array-push': [SKIP],
+ 'compiler/deopt-inlined-from-call': [SKIP],
+ 'compiler/deopt-numberoroddball-binop': [SKIP],
+ 'compiler/deopt-string-outofbounds': [SKIP],
+ 'compiler/dont-flush-code-marked-for-opt': [SKIP],
+ 'compiler/increment-typefeedback': [SKIP],
+ 'compiler/inlined-array-pop-opt': [SKIP],
+ 'compiler/inlined-call': [SKIP],
+ 'compiler/integral32-add-sub': [SKIP],
+ 'compiler/manual-concurrent-recompile': [SKIP],
+ 'compiler/math-imul': [SKIP],
+ 'compiler/native-context-specialization-hole-check': [SKIP],
+ 'compiler/number-abs': [SKIP],
+ 'compiler/number-ceil': [SKIP],
+ 'compiler/number-comparison-truncations': [SKIP],
+ 'compiler/number-divide': [SKIP],
+ 'compiler/number-floor': [SKIP],
+ 'compiler/number-max': [SKIP],
+ 'compiler/number-min': [SKIP],
+ 'compiler/number-modulus': [SKIP],
+ 'compiler/number-round': [SKIP],
+ 'compiler/number-toboolean': [SKIP],
+ 'compiler/number-trunc': [SKIP],
+ 'compiler/optimized-float32array-length': [SKIP],
+ 'compiler/optimized-float64array-length': [SKIP],
+ 'compiler/optimized-int32array-length': [SKIP],
+ 'compiler/optimized-uint32array-length': [SKIP],
+ 'compiler/opt-next-call': [SKIP],
+ 'compiler/opt-next-call-turbo': [SKIP],
+ 'compiler/promise-resolve-stable-maps': [SKIP],
+ 'compiler/redundancy-elimination': [SKIP],
+ 'compiler/regress-5320': [SKIP],
+ 'compiler/regress-compare-negate': [SKIP],
+ 'compiler/stress-deopt-count-1': [SKIP],
+ 'compiler/stress-deopt-count-2': [SKIP],
+ 'compiler/string-from-code-point': [SKIP],
+ 'compiler/uint8-clamped-array': [SKIP],
+ 'constant-folding-2': [SKIP],
+ 'default-nospec': [SKIP],
+ 'deopt-minus-zero': [SKIP],
+ 'deopt-recursive-eager-once': [SKIP],
+ 'deopt-recursive-lazy-once': [SKIP],
+ 'deopt-recursive-soft-once': [SKIP],
+ 'deopt-unlinked': [SKIP],
+ 'deopt-with-fp-regs': [SKIP],
+ 'deserialize-optimize-inner': [SKIP],
+ 'div-mul-minus-one': [SKIP],
+ 'elements-transition-hoisting': [SKIP],
+ 'ensure-growing-store-learns': [SKIP],
+ 'es6/array-iterator-turbo': [SKIP],
+ 'es6/block-let-crankshaft': [SKIP],
+ 'es6/block-let-crankshaft-sloppy': [SKIP],
+ 'es6/block-scoping': [SKIP],
+ 'es6/block-scoping-sloppy': [SKIP],
+ 'es6/collections-constructor-custom-iterator': [SKIP],
+ 'es6/collections-constructor-iterator-side-effect': [SKIP],
+ 'es6/collections-constructor-with-modified-array-prototype': [SKIP],
+ 'es6/collections-constructor-with-modified-protoype': [SKIP],
+ 'es6/map-constructor-entry-side-effect': [SKIP],
+ 'es6/map-constructor-entry-side-effect2': [SKIP],
+ 'es6/map-constructor-entry-side-effect3': [SKIP],
+ 'es6/map-constructor-entry-side-effect4': [SKIP],
+ 'field-type-tracking': [SKIP],
+ 'getters-on-elements': [SKIP],
+ 'ignition/throw-if-hole': [SKIP],
+ 'ignition/throw-if-not-hole': [SKIP],
+ 'ignition/throw-super-not-called': [SKIP],
+ 'keyed-load-hole-to-undefined': [SKIP],
+ 'keyed-load-with-string-key': [SKIP],
+ 'keyed-load-with-symbol-key': [SKIP],
+ 'math-deopt': [SKIP],
+ 'math-floor-of-div-minus-zero': [SKIP],
+ 'modules-turbo1': [SKIP],
+ 'never-optimize': [SKIP],
+ 'object-seal': [SKIP],
+ 'optimized-map': [SKIP],
+ 'regress/regress-2132': [SKIP],
+ 'regress/regress-2250': [SKIP],
+ 'regress/regress-2315': [SKIP],
+ 'regress/regress-2339': [SKIP],
+ 'regress/regress-2451': [SKIP],
+ 'regress/regress-252797': [SKIP],
+ 'regress/regress-2618': [SKIP],
+ 'regress/regress-3176': [SKIP],
+ 'regress/regress-3650-3': [SKIP],
+ 'regress/regress-3709': [SKIP],
+ 'regress/regress-385565': [SKIP],
+ 'regress/regress-4380': [SKIP],
+ 'regress/regress-5404': [SKIP],
+ 'regress/regress-5790': [SKIP],
+ 'regress/regress-5802': [SKIP],
+ 'regress/regress-6607-1': [SKIP],
+ 'regress/regress-6607-2': [SKIP],
+ 'regress/regress-6941': [SKIP],
+ 'regress/regress-6948': [SKIP],
+ 'regress/regress-6989': [SKIP],
+ 'regress/regress-6991': [SKIP],
+ 'regress/regress-7014-1': [SKIP],
+ 'regress/regress-7014-2': [SKIP],
+ 'regress/regress-7135': [SKIP],
+ 'regress/regress-7254': [SKIP],
+ 'regress/regress-7510': [SKIP],
+ 'regress/regress-794825': [SKIP],
+ 'regress/regress-8913': [SKIP],
+ 'regress/regress-crbug-554831': [SKIP],
+ 'regress/regress-crbug-587068': [SKIP],
+ 'regress/regress-crbug-594183': [SKIP],
+ 'regress/regress-crbug-882233-2': [SKIP],
+ 'regress/regress-embedded-cons-string': [SKIP],
+ 'regress/regress-map-invalidation-2': [SKIP],
+ 'regress/regress-param-local-type': [SKIP],
+ 'regress/regress-store-uncacheable': [SKIP],
+ 'regress/regress-v8-5697': [SKIP],
+ 'shared-function-tier-up-turbo': [SKIP],
+ 'shift-for-integer-div': [SKIP],
+ 'sin-cos': [SKIP],
+ 'smi-mul': [SKIP],
+ 'smi-mul-const': [SKIP],
+ 'string-deopt': [SKIP],
+ 'strong-rooted-literals': [SKIP],
+ 'unary-minus-deopt': [SKIP],
+}], # variant == jitless
##############################################################################
['byteorder == big', {
@@ -425,6 +579,9 @@
# BUG(v8:4016)
'regress/regress-crbug-467047': [SKIP],
+ # OOMing tests
+ 'regress/regress-500980': [SKIP],
+
# Slow tests.
'array-concat': [PASS, SLOW],
'array-indexing': [PASS, SLOW],
@@ -718,13 +875,13 @@
##############################################################################
['system == android', {
# Tests consistently failing on Android.
- # Unable to change locale on Android:
- 'icu-date-to-string': [FAIL],
- 'regress/regress-6288': [FAIL],
+ # Setting the locale with environment variables unavailable
+ 'icu-date-to-string': [SKIP],
+ 'icu-date-lord-howe': [SKIP],
+ 'regress/regress-6288': [SKIP],
# OOM:
'regress/regress-748069': [FAIL],
'regress/regress-752764': [FAIL],
- 'regress/regress-779407': [FAIL],
# Flaky OOM:
'regress/regress-852258': [SKIP],
}], # 'system == android'
@@ -792,7 +949,6 @@
'compiler/native-context-specialization-hole-check': [PASS, FAIL],
'elements-transition-hoisting': [PASS, FAIL],
'es6/collections-constructor-custom-iterator': [PASS, FAIL],
- 'harmony/weakrefs/clear-clears-factory-pointer': [PASS, FAIL],
'ignition/throw-if-not-hole': [PASS, FAIL],
'keyed-load-with-symbol-key': [PASS, FAIL],
'object-seal': [PASS, FAIL],
@@ -1003,4 +1159,9 @@
'regress/regress-913844': [SKIP],
}],
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js
index 23f5af0f0c..8ff63b4fc0 100644
--- a/deps/v8/test/mjsunit/object-freeze.js
+++ b/deps/v8/test/mjsunit/object-freeze.js
@@ -396,3 +396,112 @@ assertTrue(Object.isFrozen(obj));
assertFalse(Object.getOwnPropertyDescriptor(args, "length").writable);
assertFalse(Object.getOwnPropertyDescriptor(args, "callee").writable);
})();
+
+// Test packed element array built-in functions with freeze.
+function testPackedFrozenArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ // Verify that the value can't be written
+ obj1 = new Array(...obj);
+ var length = obj.length;
+ for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj1[i], obj[i]);
+ }
+ // for symbol we cannot compare directly
+ assertTrue(typeof obj[length-1] == 'symbol');
+
+ // Verify that the length can't be written by builtins.
+ assertTrue(Array.isArray(obj));
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(); }, TypeError);
+ assertThrows(function() { obj.unshift(); }, TypeError);
+ assertThrows(function() { obj.copyWithin(0,0); }, TypeError);
+ assertThrows(function() { obj.fill(0); }, TypeError);
+ assertThrows(function() { obj.reverse(); }, TypeError);
+ assertThrows(function() { obj.sort(); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertTrue(Object.isFrozen(obj));
+
+ // Verify search, filter, iterator
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf('a'), 4);
+ assertFalse(obj.includes(Symbol("test")));
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x=='a'), 4);
+ assertTrue(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 1);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, undefined);
+ assertEquals(iterator.next().value, null);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, undefined]);
+ assertEquals(iterator.next().value, [1, null]);
+}
+
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.freeze(obj);
+testPackedFrozenArray1(obj);
+
+// Verify change from sealed to frozen
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.seal(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testPackedFrozenArray1(obj);
+
+// Verify change from non-extensible to frozen
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.preventExtensions(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testPackedFrozenArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for frozen packed array
+function testPackedFrozenArray2(arr) {
+ assertTrue(Object.isFrozen(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [['a'], ['b'], ['c']]);
+ assertEquals(arr.flatMap(x => [x]), arr);
+ assertEquals(arr.flat(), arr);
+ assertEquals(arr.join('-'), "a-b-c");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), "abc");
+ assertEquals(arr.reduceRight(reducer), "cba");
+ assertEquals(arr.slice(0, 1), ['a']);
+}
+var arr1 = new Array('a', 'b', 'c');
+assertTrue(%HasPackedElements(arr1));
+Object.freeze(arr1);
+testPackedFrozenArray2(arr1);
+
+// Verify change from sealed to frozen
+var arr2 = new Array('a', 'b', 'c');
+assertTrue(%HasPackedElements(arr2));
+Object.seal(arr2);
+Object.freeze(arr2);
+testPackedFrozenArray2(arr2);
+
+// Verify change from non-extensible to frozen
+var arr2 = new Array('a', 'b', 'c');
+assertTrue(%HasPackedElements(arr2));
+Object.preventExtensions(arr2);
+Object.freeze(arr2);
+testPackedFrozenArray2(arr2);
diff --git a/deps/v8/test/mjsunit/object-get-own-property-names.js b/deps/v8/test/mjsunit/object-get-own-property-names.js
index aee6585680..7ce24fe5ce 100644
--- a/deps/v8/test/mjsunit/object-get-own-property-names.js
+++ b/deps/v8/test/mjsunit/object-get-own-property-names.js
@@ -28,6 +28,9 @@
// Test ES5 section 15.2.3.4 Object.getOwnPropertyNames.
// Check simple cases.
+var obj = {};
+assertEquals(0, Object.getOwnPropertyNames(obj).length);
+
var obj = { a: 1, b: 2};
var propertyNames = Object.getOwnPropertyNames(obj);
propertyNames.sort();
@@ -52,6 +55,13 @@ assertEquals("a", propertyNames[0]);
assertEquals("c", propertyNames[1]);
// Check that non-enumerable properties are being returned.
+var obj = {};
+Object.defineProperty(obj, 'x', {
+ value: 1,
+ enumerable: false
+});
+assertEquals(1, Object.getOwnPropertyNames(obj).length);
+
var propertyNames = Object.getOwnPropertyNames([1, 2]);
propertyNames.sort();
assertEquals(3, propertyNames.length);
diff --git a/deps/v8/test/mjsunit/object-keys.js b/deps/v8/test/mjsunit/object-keys.js
index d20556c905..847a673730 100644
--- a/deps/v8/test/mjsunit/object-keys.js
+++ b/deps/v8/test/mjsunit/object-keys.js
@@ -4,6 +4,31 @@
// Flags: --allow-natives-syntax
+// Ensure empty keys are handled properly
+(function() {
+ const a = {};
+ let k = Object.keys(a);
+ %HeapObjectVerify(k);
+ assertEquals(0, k.length);
+})();
+
+// Ensure non-enumerable keys are handled properly
+(function() {
+ const a = {};
+ Object.defineProperty(a, 'x', {
+ value: 1,
+ enumerable: false
+ });
+ let k = Object.keys(a);
+ %HeapObjectVerify(k);
+ assertEquals(0, k.length);
+
+ a.y = 2;
+ k = Object.keys(a);
+ %HeapObjectVerify(k);
+ assertEquals(1, k.length);
+})();
+
// Ensure that mutation of the Object.keys result doesn't affect the
// enumeration cache for fast-mode objects.
(function() {
diff --git a/deps/v8/test/mjsunit/object-prevent-extensions.js b/deps/v8/test/mjsunit/object-prevent-extensions.js
index 9f3091ebb4..9e8cd03a7a 100644
--- a/deps/v8/test/mjsunit/object-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/object-prevent-extensions.js
@@ -160,3 +160,90 @@ assertFalse(Object.isExtensible(obj2));
assertFalse(Object.isSealed(obj));
assertFalse(Object.isSealed(obj2));
assertTrue(%HaveSameMap(obj, obj2));
+
+// Test packed element array built-in functions with preventExtensions.
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+
+// Verify that the length can't be written by builtins.
+assertThrows(function() { obj.push(1); }, TypeError);
+assertThrows(function() { obj.unshift(1); }, TypeError);
+assertThrows(function() { obj.splice(0, 0, 1); }, TypeError);
+
+// Verify search, filter, iterator
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(obj.lastIndexOf(1), 2);
+assertEquals(obj.indexOf('a'), 4);
+assertFalse(obj.includes(Symbol("test")));
+assertEquals(obj.find(x => x==0), undefined);
+assertEquals(obj.findIndex(x => x=='a'), 4);
+assertTrue(obj.some(x => typeof x == 'symbol'));
+assertFalse(obj.every(x => x == -1));
+var filteredArray = obj.filter(e => typeof e == "symbol");
+assertEquals(filteredArray.length, 1);
+assertEquals(obj.map(x => x), obj);
+var countPositiveNumber = 0;
+obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+});
+assertEquals(countPositiveNumber, 1);
+assertEquals(obj.length, obj.concat([]).length);
+var iterator = obj.values();
+assertEquals(iterator.next().value, undefined);
+assertEquals(iterator.next().value, null);
+var iterator = obj.keys();
+assertEquals(iterator.next().value, 0);
+assertEquals(iterator.next().value, 1);
+var iterator = obj.entries();
+assertEquals(iterator.next().value, [0, undefined]);
+assertEquals(iterator.next().value, [1, null]);
+
+// Verify that the value can be written
+var length = obj.length;
+for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+}
+
+// Verify flat, map, flatMap, join, reduce, reduceRight for sealed packed array
+var arr = ['a', 'b', 'c'];
+assertTrue(%HasPackedElements(arr));
+Object.preventExtensions(arr);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(arr.map(x => [x]), [['a'], ['b'], ['c']]);
+assertEquals(arr.flatMap(x => [x]), arr);
+assertEquals(arr.flat(), arr);
+assertEquals(arr.join('-'), "a-b-c");
+const reducer = (accumulator, currentValue) => accumulator + currentValue;
+assertEquals(arr.reduce(reducer), "abc");
+assertEquals(arr.reduceRight(reducer), "cba");
+assertEquals(arr.slice(0, 1), ['a']);
+
+// Verify change content of sealed packed array
+arr.sort();
+assertEquals(arr.join(''), "abc");
+arr.reverse();
+assertEquals(arr.join(''), "cba");
+arr.copyWithin(0, 1, 2);
+assertEquals(arr.join(''),"bba");
+arr.fill('d');
+assertEquals(arr.join(''), "ddd");
+arr.pop();
+assertEquals(arr.join(''), "dd");
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index 265e50abc3..51459908ca 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -153,7 +153,6 @@ arr[0] = 'foo';
// We should be able to overwrite the existing value.
assertEquals('foo', arr[0]);
-
// Test that isSealed returns the correct value even if configurable
// has been set to false on all properties manually and the extensible
// flag has also been set to false manually.
@@ -393,3 +392,104 @@ assertTrue((new Sealed()).prototypeExists);
obj = new Int32Array(10)
Object.seal(obj);
assertTrue(Object.isSealed(obj));
+
+// Test packed element array built-in functions with seal.
+function testPackedSealedArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+
+ // Verify that the length can't be written by builtins.
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(1); }, TypeError);
+ assertThrows(function() { obj.unshift(1); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+
+ // Verify search, filter, iterator
+ obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+ assertTrue(%HasPackedElements(obj));
+ Object.seal(obj);
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf('a'), 4);
+ assertFalse(obj.includes(Symbol("test")));
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x=='a'), 4);
+ assertTrue(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 1);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, undefined);
+ assertEquals(iterator.next().value, null);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, undefined]);
+ assertEquals(iterator.next().value, [1, null]);
+
+ // Verify that the value can be written
+ var length = obj.length;
+ for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+ }
+};
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.seal(obj);
+testPackedSealedArray1(obj);
+
+// Verify after transition from preventExtensions
+obj = new Array(undefined, null, 1, -1, 'a', Symbol("test"));
+assertTrue(%HasPackedElements(obj));
+Object.preventExtensions(obj);
+Object.seal(obj);
+testPackedSealedArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for sealed packed array
+function testPackedSealedArray2(arr) {
+ assertTrue(Object.isSealed(arr));
+ assertFalse(Object.isFrozen(arr));
+ assertEquals(arr.map(x => [x]), [['a'], ['b'], ['c']]);
+ assertEquals(arr.flatMap(x => [x]), arr);
+ assertEquals(arr.flat(), arr);
+ assertEquals(arr.join('-'), "a-b-c");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), "abc");
+ assertEquals(arr.reduceRight(reducer), "cba");
+ assertEquals(arr.slice(0, 1), ['a']);
+ // Verify change content of sealed packed array
+ arr.sort();
+ assertEquals(arr.join(''), "abc");
+ arr.reverse();
+ assertEquals(arr.join(''), "cba");
+ arr.copyWithin(0, 1, 2);
+ assertEquals(arr.join(''),"bba");
+ arr.fill('d');
+ assertEquals(arr.join(''), "ddd");
+}
+
+var arr1 = new Array('a', 'b', 'c');
+assertTrue(%HasPackedElements(arr1));
+Object.seal(arr1);
+testPackedSealedArray2(arr1);
+
+var arr2 = new Array('a', 'b', 'c');
+assertTrue(%HasPackedElements(arr2));
+Object.preventExtensions(arr2);
+Object.seal(arr2);
+testPackedSealedArray2(arr2);
diff --git a/deps/v8/test/mjsunit/optimized-includes-polymorph.js b/deps/v8/test/mjsunit/optimized-includes-polymorph.js
new file mode 100644
index 0000000000..55dc22978b
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-includes-polymorph.js
@@ -0,0 +1,117 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o1 = {};
+var o2 = {};
+var a = [0, 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,0,0];
+var b = [,,,,,2,3,4];
+var c = [o1, o2];
+var d = [,,, o2, o1];
+var e = [0.5,3,4];
+var f = [,,,,0.5,3,4];
+
+function checkIncludes(ary, value) {
+ return ary.includes(value)
+}
+
+function checkIndexOf(ary, value, expected) {
+ return ary.indexOf(value) == expected;
+}
+
+function expectIncludes(ary, value) {
+ assertTrue(checkIncludes(ary, value));
+}
+
+function expectNotIncludes(ary, value) {
+ assertFalse(checkIncludes(ary, value));
+}
+
+function expectIndexOf(ary, value, expected) {
+ assertTrue(checkIndexOf(ary, value, expected));
+}
+
+var testIncludes = {
+ polymorphic: function() {
+ expectIncludes(a, 21);
+ expectIncludes(b, 4);
+ expectIncludes(c, o2);
+ expectIncludes(d, o1);
+ expectNotIncludes(a, o1);
+ expectNotIncludes(b, o2);
+ expectNotIncludes(c, 3);
+ expectNotIncludes(d, 4);
+ },
+
+ polymorphicDouble: function() {
+ expectIncludes(e, 3);
+ expectIncludes(f, 0.5);
+ expectNotIncludes(e, 10);
+ expectNotIncludes(f, 0.25);
+ },
+
+ polymorphicMixed: function() {
+ expectIncludes(a, 21);
+ expectIncludes(b, 4);
+ expectIncludes(c, o2);
+ expectIncludes(d, o1);
+ expectIncludes(e, 3);
+ expectIncludes(f, 0.5);
+ expectNotIncludes(a, o1);
+ expectNotIncludes(b, o2);
+ expectNotIncludes(c, 3);
+ expectNotIncludes(d, 4);
+ expectNotIncludes(e, 10);
+ expectNotIncludes(f, 0.25);
+ },
+};
+
+var testIndexOf = {
+ polymorphic: function() {
+ expectIndexOf(a, 21, 21);
+ expectIndexOf(b, 4, 7);
+ expectIndexOf(c, o2, 1);
+ expectIndexOf(d, o1, 4);
+ expectIndexOf(a, o1, -1);
+ expectIndexOf(b, o2, -1);
+ expectIndexOf(c, 3, -1);
+ expectIndexOf(d, 4, -1);
+ },
+
+ polymorphicDouble: function() {
+ expectIndexOf(e, 3, 1);
+ expectIndexOf(f, 0.5, 4);
+ expectIndexOf(e, 10, -1);
+ expectIndexOf(f, 0.25, -1);
+ },
+
+ polymorphicMixed: function() {
+ expectIndexOf(a, 21, 21);
+ expectIndexOf(b, 4, 7);
+ expectIndexOf(c, o2, 1);
+ expectIndexOf(d, o1, 4);
+ expectIndexOf(e, 3, 1);
+ expectIndexOf(f, 0.5, 4);
+ expectIndexOf(a, o1, -1);
+ expectIndexOf(b, o2, -1);
+ expectIndexOf(c, 3, -1);
+ expectIndexOf(d, 4, -1);
+ expectIndexOf(e, 10, -1);
+ expectIndexOf(f, 0.25, -1);
+ },
+};
+
+function runTests(tests, func) {
+ for (test in tests) {
+ %DeoptimizeFunction(func);
+ %ClearFunctionFeedback(func);
+ tests[test]();
+ %OptimizeFunctionOnNextCall(func);
+ tests[test]();
+ }
+}
+
+runTests(testIncludes, checkIncludes)
+runTests(testIndexOf, checkIndexOf)
diff --git a/deps/v8/test/mjsunit/optimized-reduce.js b/deps/v8/test/mjsunit/optimized-reduce.js
index efcb7ccd1c..345f731947 100644
--- a/deps/v8/test/mjsunit/optimized-reduce.js
+++ b/deps/v8/test/mjsunit/optimized-reduce.js
@@ -5,6 +5,30 @@
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
// Flags: --opt --no-always-opt
+// Unknown field access leads to eager-deopt unrelated to reduce, should still
+// lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ // For this particular eager deopt point to work, we need to dodge
+ // TurboFan's soft-deopts through a non-inlined and non-optimized function
+ // call to foo().
+ function foo(o, deopt) { if (deopt) { o.abc = 3; }}
+ %NeverOptimizeFunction(foo);
+ function eagerDeoptInCalled(deopt) {
+ return a.reduce((r, v, i, o) => {
+ if (i === 7) {
+ foo(a, deopt);
+ }
+ return r + "S";
+ }, "H");
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertEquals("HSSSSSSSSSS", eagerDeoptInCalled(true));
+})();
+
// Make sure we gracefully handle the case of an empty array in
// optimized code.
(function() {
diff --git a/deps/v8/test/mjsunit/optimized-reduceright.js b/deps/v8/test/mjsunit/optimized-reduceright.js
index 2689a39de5..f0b20e09a6 100644
--- a/deps/v8/test/mjsunit/optimized-reduceright.js
+++ b/deps/v8/test/mjsunit/optimized-reduceright.js
@@ -5,6 +5,30 @@
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
// Flags: --opt --no-always-opt
+// Unknown field access leads to eager-deopt unrelated to reduceright, should
+// still lead to correct result.
+(() => {
+ const a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ // For this particular eager deopt point to work, we need to dodge
+ // TurboFan's soft-deopts through a non-inlined and non-optimized function
+ // call to foo().
+ function foo(o, deopt) { if (deopt) { o.abc = 3; }}
+ %NeverOptimizeFunction(foo);
+ function eagerDeoptInCalled(deopt) {
+ return a.reduceRight((r, v, i, o) => {
+ if (i === 7) {
+ foo(a, deopt);
+ }
+ return r + "S";
+ }, "H");
+ }
+ eagerDeoptInCalled();
+ eagerDeoptInCalled();
+ %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+ eagerDeoptInCalled();
+ assertEquals("HSSSSSSSSSS", eagerDeoptInCalled(true));
+})();
+
// Make sure we gracefully handle the case of an empty array in
// optimized code.
(function() {
diff --git a/deps/v8/test/mjsunit/parallel-optimize-disabled.js b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
index bcd7110263..83970ae096 100644
--- a/deps/v8/test/mjsunit/parallel-optimize-disabled.js
+++ b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --concurrent-recompilation
+// Flags: --concurrent-recompilation --turbo-inlining
// Flags: --allow-natives-syntax --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
diff --git a/deps/v8/test/mjsunit/regress-930045.js b/deps/v8/test/mjsunit/regress-930045.js
new file mode 100644
index 0000000000..8983c2014a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-930045.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields
+
+(function CaptureStackTracePrivateSymbol() {
+ var o = {};
+ Object.preventExtensions(o);
+
+ try { Error.captureStackTrace(o); } catch (e) {}
+ try { Error.captureStackTrace(o); } catch (e) {}
+})();
+
+(function PrivateFieldAfterPreventExtensions() {
+ class C {
+ constructor() {
+ this.x = 1;
+ Object.preventExtensions(this);
+ }
+ }
+
+ class D extends C {
+ #i = 42;
+
+ set(i) { this.#i = i; }
+ get(i) { return this.#i; }
+ }
+
+ let d = new D();
+ d.x = 0.1;
+ assertEquals(42, d.get());
+ d.set(43);
+ assertEquals(43, d.get());
+})();
diff --git a/deps/v8/test/mjsunit/regress-932101.js b/deps/v8/test/mjsunit/regress-932101.js
new file mode 100644
index 0000000000..720ee11fef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-932101.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+o = Object("A");
+o.x = 1;
+Object.seal(o);
+o.x = 0.1
+
+o[1] = "b";
+assertEquals(undefined, o[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-3218530.js b/deps/v8/test/mjsunit/regress/regress-3218530.js
index 247f3dfe67..54c66dfded 100644
--- a/deps/v8/test/mjsunit/regress/regress-3218530.js
+++ b/deps/v8/test/mjsunit/regress/regress-3218530.js
@@ -33,7 +33,7 @@ var p = "floor";
function test() {
var bignumber = 31363200000;
- assertDoesNotThrow(assertEquals(m[p](Math.round(bignumber/864E5)/7)+1, 52));
+ assertEquals(m[p](Math.round(bignumber/864E5)/7)+1, 52);
}
test();
diff --git a/deps/v8/test/mjsunit/regress/regress-3255.js b/deps/v8/test/mjsunit/regress/regress-3255.js
index 0c5ee4ff00..3526d600c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-3255.js
+++ b/deps/v8/test/mjsunit/regress/regress-3255.js
@@ -16,4 +16,4 @@ f(str, 0);
f(str, 0);
// This is just to trigger elements validation, object already broken.
-%SetKeyedProperty(str, 1, 'y', 0);
+%SetKeyedProperty(str, 1, 'y');
diff --git a/deps/v8/test/mjsunit/regress/regress-5888.js b/deps/v8/test/mjsunit/regress/regress-5888.js
index 0725ac4285..6481c79338 100644
--- a/deps/v8/test/mjsunit/regress/regress-5888.js
+++ b/deps/v8/test/mjsunit/regress/regress-5888.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-5911.js b/deps/v8/test/mjsunit/regress/regress-5911.js
index 0175fd3e2c..9d6d4ae5b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-5911.js
+++ b/deps/v8/test/mjsunit/regress/regress-5911.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-687.js b/deps/v8/test/mjsunit/regress/regress-687.js
index a917a447f2..9dffcc1a9e 100644
--- a/deps/v8/test/mjsunit/regress/regress-687.js
+++ b/deps/v8/test/mjsunit/regress/regress-687.js
@@ -29,9 +29,8 @@
// update a accessor property to a data property using Object.defineProperty.
var obj = { get value() {}, set value (v) { throw "Error";} };
-assertDoesNotThrow(
- Object.defineProperty(obj, "value",
- { value: 5, writable:true, configurable: true }));
+Object.defineProperty(obj, "value",
+ { value: 5, writable:true, configurable: true });
var desc = Object.getOwnPropertyDescriptor(obj, "value");
assertEquals(obj.value, 5);
assertTrue(desc.configurable);
@@ -49,7 +48,7 @@ var proto = {
var create = Object.create(proto);
assertEquals(create.value, undefined);
-assertDoesNotThrow(create.value = 4);
+create.value = 4;
assertEquals(create.value, 4);
// These tests where provided in bug 959, but are all related to the this issue.
diff --git a/deps/v8/test/mjsunit/regress/regress-7254.js b/deps/v8/test/mjsunit/regress/regress-7254.js
index 8231a8fd74..3c514c4a0b 100644
--- a/deps/v8/test/mjsunit/regress/regress-7254.js
+++ b/deps/v8/test/mjsunit/regress/regress-7254.js
@@ -9,6 +9,7 @@ function foo(a) {
a[1] = "";
}
+%PrepareFunctionForOptimization(foo);
foo([0,0].map(x => x));
foo([0,0].map(x => x));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-813440.js b/deps/v8/test/mjsunit/regress/regress-813440.js
index 8fcb695f5a..f4df95daae 100644
--- a/deps/v8/test/mjsunit/regress/regress-813440.js
+++ b/deps/v8/test/mjsunit/regress/regress-813440.js
@@ -4,7 +4,6 @@
// Flags: --invoke-weak-callbacks --omit-quit --expose-wasm --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/regress-863810.js b/deps/v8/test/mjsunit/regress/regress-863810.js
index 0ee1330310..3bec6f5ae9 100644
--- a/deps/v8/test/mjsunit/regress/regress-863810.js
+++ b/deps/v8/test/mjsunit/regress/regress-863810.js
@@ -4,7 +4,6 @@
// Flags: --no-liftoff --no-wasm-tier-up --no-future --debug-code
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/regress-8913.js b/deps/v8/test/mjsunit/regress/regress-8913.js
new file mode 100644
index 0000000000..9403334d72
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8913.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo(t) { return 'a'.concat(t); }
+
+foo(1);
+foo(1);
+%OptimizeFunctionOnNextCall(foo);
+foo(1);
+%OptimizeFunctionOnNextCall(foo);
+foo(1);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-8947.js b/deps/v8/test/mjsunit/regress/regress-8947.js
new file mode 100644
index 0000000000..17507b1002
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8947.js
@@ -0,0 +1,49 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function testCallReexportedJSFunc() {
+ print(arguments.callee.name);
+
+ function dothrow() {
+ throw "exception";
+ }
+
+ var builder = new WasmModuleBuilder();
+ const imp_index = builder.addImport("w", "m", kSig_i_v);
+ builder.addExport("exp", imp_index);
+ var exp = builder.instantiate({w: {m: dothrow}}).exports.exp;
+
+ builder.addImport("w", "m", kSig_i_v);
+ builder.addFunction("main", kSig_i_v)
+ .addBody([
+ kExprCallFunction, 0, // --
+ ]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({w: {m: exp}}).exports.main;
+ assertThrowsEquals(main, "exception");
+})();
+
+(function testCallReexportedAPIFunc() {
+ print(arguments.callee.name);
+
+ var builder = new WasmModuleBuilder();
+ const imp_index = builder.addImport("w", "m", kSig_i_v);
+ builder.addExport("exp", imp_index);
+ var exp = builder.instantiate({w: {m: WebAssembly.Module}}).exports.exp;
+
+ builder.addImport("w", "m", kSig_i_v);
+ builder.addFunction("main", kSig_i_v)
+ .addBody([
+ kExprCallFunction, 0, // --
+ ]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({w: {m: exp}}).exports.main;
+ assertThrows(main, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-912162.js b/deps/v8/test/mjsunit/regress/regress-912162.js
new file mode 100644
index 0000000000..b661725bb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-912162.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Array();
+a.prototype = a;
+
+function f() {
+ a.length = 0x2000001;
+ a.push();
+}
+
+({}).__proto__ = a;
+
+f()
+f()
+
+a.length = 1;
+a.fill(-255);
+
+%HeapObjectVerify(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-917755.js b/deps/v8/test/mjsunit/regress/regress-917755.js
index 49803ae2d3..6cd4a24d37 100644
--- a/deps/v8/test/mjsunit/regress/regress-917755.js
+++ b/deps/v8/test/mjsunit/regress/regress-917755.js
@@ -2,11 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+assertThrows(`
{
function a() {}
}
{
+ // Duplicate lexical declarations are only allowed if they are both sloppy
+ // block functions (see bug 4693). In this case the sloppy block function
+ // conflicts with the lexical variable declaration, causing a syntax error.
let a;
function a() {};
}
+`, SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-926036.js b/deps/v8/test/mjsunit/regress/regress-926036.js
new file mode 100644
index 0000000000..3c8f49c956
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-926036.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("async() => { for await (var a ;;) {} }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-930486.js b/deps/v8/test/mjsunit/regress/regress-930486.js
new file mode 100644
index 0000000000..8b4aafae81
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-930486.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var __v_49026 = function () {};
+
+__v_49026.prototype = undefined;
+__v_49026.x = 23;
+__v_49026.prototype = new ArrayBuffer();
+__v_49026.x = 2147483649;
diff --git a/deps/v8/test/mjsunit/regress/regress-932953.js b/deps/v8/test/mjsunit/regress/regress-932953.js
new file mode 100644
index 0000000000..5e211c79d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-932953.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function NonExtensibleBetweenSetterAndGetter() {
+ o = {};
+ o.x = 42;
+ o.__defineGetter__("y", function() { });
+ Object.preventExtensions(o);
+ o.__defineSetter__("y", function() { });
+ o.x = 0.1;
+})();
+
+(function InterleavedIntegrityLevel() {
+ o = {};
+ o.x = 42;
+ o.__defineSetter__("y", function() { });
+ Object.preventExtensions(o);
+ o.__defineGetter__("y", function() { return 44; });
+ Object.seal(o);
+ o.x = 0.1;
+ assertEquals(44, o.y);
+})();
+
+(function TryUpdateRepeatedIntegrityLevel() {
+ function C() {
+ this.x = 0;
+ this.x = 1;
+ Object.preventExtensions(this);
+ Object.seal(this);
+ }
+
+ const o1 = new C();
+ const o2 = new C();
+ const o3 = new C();
+
+ function f(o) {
+ return o.x;
+ }
+
+ // Warm up the IC.
+ f(o1);
+ f(o1);
+ f(o1);
+
+ // Reconfigure to double field.
+ o3.x = 0.1;
+
+ // Migrate o2 to the new shape.
+ f(o2);
+
+ %OptimizeFunctionOnNextCall(f);
+ f(o1);
+
+ assertTrue(%HaveSameMap(o1, o2));
+ assertTrue(%HaveSameMap(o1, o3));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-933179.js b/deps/v8/test/mjsunit/regress/regress-933179.js
new file mode 100644
index 0000000000..a9e7f07bee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-933179.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = { ...{ length : 1 } };
+
+o.x = 1;
+delete o.x;
+
+o.length = 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-933776.js b/deps/v8/test/mjsunit/regress/regress-933776.js
new file mode 100644
index 0000000000..6736348106
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-933776.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__v_51351 = /[^]$/gm;
+"a\nb\rc\n\rd\r\ne".replace(__v_51351, "*$1");
diff --git a/deps/v8/test/mjsunit/regress/regress-936077.js b/deps/v8/test/mjsunit/regress/regress-936077.js
new file mode 100644
index 0000000000..fcd5254bd2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-936077.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --allow-natives-syntax
+// Flags: --concurrent-inlining --function-context-specialization
+
+function main() {
+ var obj = {};
+ function foo() { return obj[0]; };
+ gc();
+ obj.x = 10;
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+}
+main();
+main();
diff --git a/deps/v8/test/mjsunit/regress/regress-940361.js b/deps/v8/test/mjsunit/regress/regress-940361.js
new file mode 100644
index 0000000000..28b21ad145
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-940361.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const re = /abc/;
+
+// Move the test method one prototype up.
+re.__proto__.__proto__.test = re.__proto__.test;
+delete re.__proto__.test;
+
+function foo(s) {
+ return re.test(s);
+}
+
+assertTrue(foo('abc'));
+assertTrue(foo('abc'));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo('abc'));
+assertFalse(foo('ab'));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715455.js b/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
index 21ec165683..87b240227d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
@@ -20,6 +20,6 @@ for (var i = 0; i < test_set.length; ++i) {
src = src.replace(/MODULE/g, "Module" + i);
src = src.replace(/LIMIT/g, test_set[i]);
var module = eval("(" + src + ")");
- assertDoesNotThrow(module(this).f());
+ module(this).f();
assertFalse(%IsAsmWasmCode(module));
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
index 380f968560..d9fb4d51d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-913222.js b/deps/v8/test/mjsunit/regress/regress-crbug-913222.js
new file mode 100644
index 0000000000..6410c54645
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-913222.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+__v_0 = '(function() {\n';
+for (var __v_1 = 0; __v_1 < 10000; __v_1++) {
+ __v_0 += ' return function() {\n';
+}
+assertThrows(()=>eval(__v_0), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-926819.js b/deps/v8/test/mjsunit/regress/regress-crbug-926819.js
new file mode 100644
index 0000000000..060c72f60e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-926819.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("a(function(){{let f;function f}})", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-926856.js b/deps/v8/test/mjsunit/regress/regress-crbug-926856.js
new file mode 100644
index 0000000000..a3fa934483
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-926856.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Need a fast array with enough elements to surpass
+// kMaxRegularHeapObjectSize.
+var size = 63392;
+var a = [];
+function build() {
+ for (let i = 0; i < size; i++) {
+ a.push(i);
+ }
+}
+
+build();
+
+function c(v) { return v + 0.5; }
+a.map(c);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-930948-base.js b/deps/v8/test/mjsunit/regress/regress-crbug-930948-base.js
new file mode 100644
index 0000000000..740f42a288
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-930948-base.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --allow-natives-syntax
+
+function foo() {
+ return [undefined].map(Math.asin);
+}
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-930948.js b/deps/v8/test/mjsunit/regress/regress-crbug-930948.js
new file mode 100644
index 0000000000..06dcf40646
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-930948.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --allow-natives-syntax
+
+// This checks that TransitionAndStoreNumberElement silences NaNs.
+function foo() {
+ return [undefined].map(Math.asin);
+}
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+// This checks that TransitionAndStoreElement silences NaNs.
+function bar(b) {
+ return [undefined].map(x => b ? Math.asin(x) : "string");
+}
+bar(true);
+bar(false);
+bar(true);
+bar(false);
+%OptimizeFunctionOnNextCall(bar);
+bar(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-931664.js b/deps/v8/test/mjsunit/regress/regress-crbug-931664.js
new file mode 100644
index 0000000000..b4fc85367e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-931664.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt(){
+ for(l in('a')){
+ try{
+ for(a in('')) {
+ for(let arg2 in(+(arg2)));
+ }
+ }
+ finally{}
+ }
+}
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-932034.js b/deps/v8/test/mjsunit/regress/regress-crbug-932034.js
new file mode 100644
index 0000000000..56758b4f74
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-932034.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --mock-arraybuffer-allocator
+
+// Verify on 32-bit architectures, a byte length overflow is handled gracefully.
+try {
+ new BigInt64Array(%MaxSmi());
+} catch(e) {
+ assertInstanceof(e, RangeError);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-933214.js b/deps/v8/test/mjsunit/regress/regress-crbug-933214.js
new file mode 100644
index 0000000000..56b96ac86f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-933214.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(`
+ function __v_0() {
+ function __v_2() {
+ try {
+ function* __v_0() {}
+ function __v_0() {}
+ }
+ }
+ }`, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-934138.js b/deps/v8/test/mjsunit/regress/regress-crbug-934138.js
new file mode 100644
index 0000000000..2d23486717
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-934138.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestTrailingJunkAfterExport() {
+ function Module() {
+ "use asm";
+ function f() {}
+ return {f: f}
+ %kaboom;
+ }
+ assertThrows(() => Module(), ReferenceError);
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestExportWithSemicolon() {
+ function Module() {
+ "use asm";
+ function f() {}
+ return {f: f};
+ // appreciate the semicolon
+ }
+ assertDoesNotThrow(() => Module());
+ assertTrue(%IsAsmWasmCode(Module));
+})();
+
+(function TestExportWithoutSemicolon() {
+ function Module() {
+ "use asm";
+ function f() {}
+ return {f: f}
+ // appreciate the nothingness
+ }
+ assertDoesNotThrow(() => Module());
+ assertTrue(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-934166.js b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
new file mode 100644
index 0000000000..d6fae7136b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+{
+ for(let i = 0; i < 10; ++i){
+ try{
+ // Carefully constructed by a fuzzer to use a new register for s(), whose
+ // write is dead due to the unconditional throw after s()=N, but which is
+ // read in the ({...g}) call, which therefore must also be marked dead and
+ // elided.
+ with(f&&g&&(s()=N)({...g})){}
+ } catch {}
+ %OptimizeOsr();
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-935932.js b/deps/v8/test/mjsunit/regress/regress-crbug-935932.js
new file mode 100644
index 0000000000..a34b7743e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-935932.js
@@ -0,0 +1,90 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(func, expect) {
+ assertTrue(func() == expect);
+ %OptimizeFunctionOnNextCall(func);
+ assertTrue(func() == expect);
+}
+
+// Check loading a constant off the global.
+var v0 = 10;
+function check_v0() { return "v0" in this; }
+test(check_v0, true);
+
+// make it non-constant.
+v0 = 0;
+test(check_v0, true);
+
+// test a missing value.
+function check_v1() { return "v1" in this; }
+test(check_v1, false);
+this.v1 = 3;
+test(check_v1, true);
+delete this.v1;
+test(check_v1, false);
+
+// test undefined.
+var v2;
+function check_v2() { return "v2" in this; }
+test(check_v2, true);
+
+// test a constant object.
+var v3 = {};
+function check_v3() { return "v3" in this; }
+test(check_v3, true);
+// make the object non-constant.
+v3 = [];
+test(check_v3, true);
+
+// test non-configurable
+Object.defineProperty(this, "v4", { value: {}, configurable: false});
+function check_v4() { return "v4" in this; }
+test(check_v4, true);
+
+// Test loading from arrays with different prototypes.
+(function() {
+ function testIn(index, array) {
+ return index in array;
+ }
+
+ let a = [];
+ a.__proto__ = [0,1,2];
+ a[1] = 3;
+
+ // First load will set IC to Load handle with allow hole to undefined conversion false.
+ assertTrue(testIn(0, a));
+ // Second load will hit ICMiss when hole is loaded. Seeing the same map twice, the IC will be set megamorphic.
+ assertTrue(testIn(0, a));
+ %OptimizeFunctionOnNextCall(testIn);
+ // Test JIT to ensure proper handling.
+ assertTrue(testIn(0, a));
+
+ %ClearFunctionFeedback(testIn);
+ %DeoptimizeFunction(testIn);
+
+ // First load will set IC to Load handle with allow hole to undefined conversion false.
+ assertTrue(testIn(0, a));
+ %OptimizeFunctionOnNextCall(testIn);
+ // Test JIT to ensure proper handling if hole is loaded.
+ assertTrue(testIn(0, a));
+
+ // Repeat the same testing for access out-of-bounds of the array, but in bounds of it's prototype.
+ %ClearFunctionFeedback(testIn);
+ %DeoptimizeFunction(testIn);
+
+ assertTrue(testIn(2, a));
+ assertTrue(testIn(2, a));
+ %OptimizeFunctionOnNextCall(testIn);
+ assertTrue(testIn(2, a));
+
+ %ClearFunctionFeedback(testIn);
+ %DeoptimizeFunction(testIn);
+
+ assertTrue(testIn(2, a));
+ %OptimizeFunctionOnNextCall(testIn);
+ assertTrue(testIn(2, a));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-936302.js b/deps/v8/test/mjsunit/regress/regress-crbug-936302.js
new file mode 100644
index 0000000000..c8d3c136a1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-936302.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+(function() {
+ 'use strict';
+
+ function baz() {
+ 'use asm';
+ function f() {}
+ return {f: f};
+ }
+
+ function foo(x) {
+ baz(x);
+ %DeoptimizeFunction(foo);
+ }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-937618.js b/deps/v8/test/mjsunit/regress/regress-crbug-937618.js
new file mode 100644
index 0000000000..71ea8a8507
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-937618.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let target = {0:42, a:42};
+
+let proxy = new Proxy(target, {
+ has: function() { return false; },
+});
+
+Object.preventExtensions(target);
+
+function testLookupElementInProxy() {
+ 0 in proxy;
+}
+
+// 9.5.7 [[HasProperty]] 9. states that if the trap returns false, and the
+// target hasOwnProperty, and the target is non-extensible, throw a type error.
+
+assertThrows(testLookupElementInProxy, TypeError);
+assertThrows(testLookupElementInProxy, TypeError);
+%OptimizeFunctionOnNextCall(testLookupElementInProxy);
+assertThrows(testLookupElementInProxy, TypeError);
+
+function testLookupPropertyInProxy(){
+ "a" in proxy;
+}
+
+assertThrows(testLookupPropertyInProxy, TypeError);
+assertThrows(testLookupPropertyInProxy, TypeError);
+%OptimizeFunctionOnNextCall(testLookupPropertyInProxy);
+assertThrows(testLookupPropertyInProxy, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-937649.js b/deps/v8/test/mjsunit/regress/regress-crbug-937649.js
new file mode 100644
index 0000000000..5cc5a3db9a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-937649.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x) {
+ const i = x > 0;
+ const dv = new DataView(ab);
+ return dv.getUint16(i);
+ }
+ const ab = new ArrayBuffer(2);
+ foo(0);
+ foo(0);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(0);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-941743.js b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
new file mode 100644
index 0000000000..8fc4ad4322
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noenable-slow-asserts
+
+// This call ensures that TurboFan won't inline array constructors.
+Array(2**30);
+
+// Set up a fast holey smi array, and generate optimized code.
+let a = [1, 2, ,,, 3];
+function mapping(a) {
+ return a.map(v => v);
+}
+mapping(a);
+mapping(a);
+%OptimizeFunctionOnNextCall(mapping);
+mapping(a);
+
+// Now lengthen the array, but ensure that it points to a non-dictionary
+// backing store.
+a.length = (32 * 1024 * 1024)-1;
+a.fill(1,0);
+a.push(2);
+a.length += 500;
+// Now, the non-inlined array constructor should produce an array with
+// dictionary elements: causing a crash.
+mapping(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5848.js b/deps/v8/test/mjsunit/regress/regress-v8-5848.js
new file mode 100644
index 0000000000..9db3666cc5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5848.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const inlineFromParser = 50 ** 50;
+
+const i = 50;
+const fromRuntimePowOp = i ** i;
+const fromRuntimeMath = Math.pow(i, i);
+
+// inlineFromParser === fromRuntimeOp === fromRuntimeMath
+
+assertEquals(inlineFromParser, fromRuntimePowOp);
+assertEquals(inlineFromParser - fromRuntimePowOp, 0);
+
+assertEquals(inlineFromParser, fromRuntimeMath);
+assertEquals(inlineFromParser - fromRuntimeMath, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8799.js b/deps/v8/test/mjsunit/regress/regress-v8-8799.js
new file mode 100644
index 0000000000..a6710904ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8799.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stress-flush-bytecode
+
+// Ensure tagged template objects are cached even after bytecode flushing.
+var f = (x) => eval`a${x}b`;
+var a = f();
+gc();
+assertSame(a, f());
diff --git a/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js b/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js
index a76ad017d9..b1e92fa4ec 100644
--- a/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js
+++ b/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
index d1dae276d3..791d2a1d2d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
@@ -264,7 +264,6 @@ var __v_11 = this;
var __v_12 = {};
var __v_13 = {};
try {
- load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-__v_1.js");
__v_2 = 0x10000;
} catch (e) {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5531.js b/deps/v8/test/mjsunit/regress/wasm/regress-5531.js
index 1363f96264..cea547d09c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5531.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5531.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
index 2e56da853d..77c436119c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function AddTest() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5860.js b/deps/v8/test/mjsunit/regress/wasm/regress-5860.js
index b193323dd1..961e52d2dc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5860.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5860.js
@@ -4,7 +4,6 @@
//
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let module1 = (() => {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5884.js b/deps/v8/test/mjsunit/regress/wasm/regress-5884.js
index 8677f105ee..c6013d3f86 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5884.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5884.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-6054.js b/deps/v8/test/mjsunit/regress/wasm/regress-6054.js
index 7b309b6f82..3afb371131 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-6054.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6054.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-6164.js b/deps/v8/test/mjsunit/regress/wasm/regress-6164.js
index 3035ea5249..ed728f5acc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-6164.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6164.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-644682.js b/deps/v8/test/mjsunit/regress/wasm/regress-644682.js
index b58c0d9b10..a48e5aeef0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-644682.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-644682.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-648079.js b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
index acc6146ef5..fbb5414480 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-648079.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Non-standard opcodes.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-651961.js b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
index bf08200d30..f42f431703 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-654377.js b/deps/v8/test/mjsunit/regress/wasm/regress-654377.js
index 871da72114..455139f0bb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-654377.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-654377.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-663994.js b/deps/v8/test/mjsunit/regress/wasm/regress-663994.js
index da3d7c7771..9643a86acb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-663994.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-663994.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-667745.js b/deps/v8/test/mjsunit/regress/wasm/regress-667745.js
index 68c880303b..cae5122ca5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-667745.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-667745.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-684858.js b/deps/v8/test/mjsunit/regress/wasm/regress-684858.js
index bfef7fcc8e..1ac3cc6f2a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-684858.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-684858.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var name = 'regression_684858';
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-688876.js b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
index 02932b4812..5e142b3417 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-689450.js b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
index 9a4989c633..bcd25387b4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-689450.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-6931.js b/deps/v8/test/mjsunit/regress/wasm/regress-6931.js
index 364e95a680..5edf25761f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-6931.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-6931.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-699485.js b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
index a44b14b031..8a70afa591 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
index 44e60330b4..21a84bcf28 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Non-standard opcodes.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7033.js b/deps/v8/test/mjsunit/regress/wasm/regress-7033.js
index 17d79c896f..58dff5e2f9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7033.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7033.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7035.js b/deps/v8/test/mjsunit/regress/wasm/regress-7035.js
index cd69c7d1b4..73485494b3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7035.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7035.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
index b9ad1a0be4..6d2cd351fb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --expose-gc
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Build two instances, instance 2 is interpreted, and calls instance 1 (via
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-708714.js b/deps/v8/test/mjsunit/regress/wasm/regress-708714.js
index 10cd67ad8d..dc90a0aba3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-708714.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-708714.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-709684.js b/deps/v8/test/mjsunit/regress/wasm/regress-709684.js
index 1ca0cb63fd..a6e03e0a0b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-709684.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-709684.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let importingModuleBinary1 = (() => {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-710844.js b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
index 20c8154e4a..3bafe41c0f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-711203.js b/deps/v8/test/mjsunit/regress/wasm/regress-711203.js
index 46f274a8b0..beca86d378 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-711203.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-711203.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js b/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
index 0954f807dd..85e93e07c9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
@@ -4,7 +4,6 @@
// Flags: --wasm-interpret-all --wasm-lazy-compilation
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-722445.js b/deps/v8/test/mjsunit/regress/wasm/regress-722445.js
index f6a96dc60d..5868d76190 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-722445.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-722445.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-724846.js b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
index 628d58f294..b215b6021a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Flags: --wasm-max-mem-pages=49152
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-724851.js b/deps/v8/test/mjsunit/regress/wasm/regress-724851.js
index 18834795d2..5c4c421a1c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-724851.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724851.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-724972.js b/deps/v8/test/mjsunit/regress/wasm/regress-724972.js
index 2af403ce20..cbe5d35d54 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-724972.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724972.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-727222.js b/deps/v8/test/mjsunit/regress/wasm/regress-727222.js
index 6b3f2faf5f..3096334096 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-727222.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-727222.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-727560.js b/deps/v8/test/mjsunit/regress/wasm/regress-727560.js
index f92d879a2e..e9ed441860 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-727560.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-727560.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
{
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-729991.js b/deps/v8/test/mjsunit/regress/wasm/regress-729991.js
index 85a9ae7231..8cabd515b3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-729991.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-729991.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-734246.js b/deps/v8/test/mjsunit/regress/wasm/regress-734246.js
index 57f98949f8..b861141db7 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-734246.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734246.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-734345.js b/deps/v8/test/mjsunit/regress/wasm/regress-734345.js
index f55a06288e..d7486d2d1e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-734345.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734345.js
@@ -4,7 +4,6 @@
// Flags: --expose-gc
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
builder1 = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
index 748c74139f..81f45fe6a5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7364.js b/deps/v8/test/mjsunit/regress/wasm/regress-7364.js
index 8e66295b70..f508585ebb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7364.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7364.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const exportingModuleBinary = (() => {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-736584.js b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
index 033732f368..0e027f3a57 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let mem = new WebAssembly.Memory({initial: 0});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
index 41f758efb1..b5cae8daa4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-737069.js b/deps/v8/test/mjsunit/regress/wasm/regress-737069.js
index c68d10f06d..e4c4fae895 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-737069.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-737069.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let binary = new Binary;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
index 52985c3297..a191c828d6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Flags: --wasm-lazy-compilation
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7422.js b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
index 87896b4c35..71e1eb89bd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7499.js b/deps/v8/test/mjsunit/regress/wasm/regress-7499.js
index 71f246decf..74e4d53a17 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7499.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7499.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7508.js b/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
index 7c07d2d7e3..10ce500a44 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7508.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-752423.js b/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
index 15ee9a6c34..938ecbf252 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-752423.js
@@ -6,7 +6,6 @@
'use strict';
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7565.js b/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
index 055bfc0c59..c9d4e2ca88 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7565.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-757217.js b/deps/v8/test/mjsunit/regress/wasm/regress-757217.js
index 218b090c45..28e554b87a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-757217.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-757217.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7579.js b/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
index 40cf12317f..876a76cad9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7579.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7582.js b/deps/v8/test/mjsunit/regress/wasm/regress-7582.js
index 476a0e18e8..d8b5e9d7fc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7582.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7582.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-763439.js b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
index 1f90e0a017..ef84b97859 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-763697.js b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
index faf74e1cff..c831a55fba 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763697.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --no-experimental-wasm-simd
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-766003.js b/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
index d8a1ea1ebf..3aaff40636 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-interpret-all
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
__v_6 = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-769637.js b/deps/v8/test/mjsunit/regress/wasm/regress-769637.js
index c2e783014a..71aaa45bfd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-769637.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-769637.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
index e1581fcdd8..81b9e8f2a9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-interpret-all
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
assertThrows(() => {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
index 56e6f2ceb8..e8547c8175 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-interpret-all
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
assertThrows(() => {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-775366.js b/deps/v8/test/mjsunit/regress/wasm/regress-775366.js
index e8db923896..69a1f68dc0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-775366.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-775366.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
+load("test/mjsunit/wasm/wasm-module-builder.js");
(function BadTypeSection() {
var data = bytes(
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
index 12d7e6b5da..72638b1685 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7785.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --experimental-wasm-anyref
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testAnyRefNull() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-778917.js b/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
index 083f1d12e3..c7eb033d95 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-interpret-all
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
index a94f061c2b..008ab16159 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-784050.js b/deps/v8/test/mjsunit/regress/wasm/regress-784050.js
index 8f1a79002c..acf4539aee 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-784050.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-784050.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7914.js b/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
index ede4668d08..48f8b902f0 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
index cd6c4e2728..73b47bdd78 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
index 8aa0241923..657b2c0013 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-797846.js b/deps/v8/test/mjsunit/regress/wasm/regress-797846.js
index 6a4fd5c5f7..1470de4fc6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-797846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-797846.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// We need a module with one valid function.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-800756.js b/deps/v8/test/mjsunit/regress/wasm/regress-800756.js
index 2d29997cef..76afc88d8f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-800756.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-800756.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801785.js b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
index 1870d7e8f1..105fd4bc38 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Flags: --print-wasm-code
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801850.js b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
index b56af694a9..0e0f5c249c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801850.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-802244.js b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
index 0b8decb637..aeaf850365 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-802244.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803427.js b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
index d3ab31b4c9..26b1413c3b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803788.js b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
index e7fa3aaa8f..17325538f1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
index c30ed152f8..78ee6bd1d2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
@@ -4,7 +4,6 @@
// Flags: --no-wasm-disable-structured-cloning
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostModule() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808012.js b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
index ae613ceb54..a54b88a5e1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
index bcf8469a14..57920de09d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808848.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// The number of locals must be greater than the constant defined here:
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
index ecf6476c37..d78c07f36c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --throws
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let kTableSize = 3;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8094.js b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
index a35d583a4a..dc78366ed8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8094.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Instantiate a throwing module.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8095.js b/deps/v8/test/mjsunit/regress/wasm/regress-8095.js
index 66ffc0d4b7..7d21932ec4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8095.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8095.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Prepare a special error object to throw.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-812005.js b/deps/v8/test/mjsunit/regress/wasm/regress-812005.js
index 979b769bbc..ba49987de5 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-812005.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-812005.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
index 2cf50892fc..5b3281a150 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder1 = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-819869.js b/deps/v8/test/mjsunit/regress/wasm/regress-819869.js
index f2606fb610..a32928ab0c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-819869.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-819869.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-820802.js b/deps/v8/test/mjsunit/regress/wasm/regress-820802.js
index 224a2260f5..214e71819f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-820802.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-820802.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-824681.js b/deps/v8/test/mjsunit/regress/wasm/regress-824681.js
index 18ca3d0b5d..9d712e6ddb 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-824681.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-824681.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let chain = Promise.resolve();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-827806.js b/deps/v8/test/mjsunit/regress/wasm/regress-827806.js
index c06e0fae96..8576de2e79 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-827806.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-827806.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
try {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-831463.js b/deps/v8/test/mjsunit/regress/wasm/regress-831463.js
index 65d1213dd0..2818ad350b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-831463.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-831463.js
@@ -4,7 +4,6 @@
// Flags: --wasm-interpret-all
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
index 378e38e03c..145f415221 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function ExportedFunctionsImportedOrder() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js b/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
index 9161f098e0..45af23cde2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
@@ -4,7 +4,6 @@
// Flags: --wasm-interpret-all
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let instance;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834693.js b/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
index dac0e8578d..ad51b2a400 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
@@ -4,7 +4,6 @@
// flags: --wasm-lazy-compilation
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var module = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-836141.js b/deps/v8/test/mjsunit/regress/wasm/regress-836141.js
index b37dbea628..5ac58042e4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-836141.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-836141.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-837417.js b/deps/v8/test/mjsunit/regress/wasm/regress-837417.js
index 9dcc299ecf..ef1d3cfaf6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-837417.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-837417.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-840757.js b/deps/v8/test/mjsunit/regress/wasm/regress-840757.js
index 0887401336..ad1cf9f64f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-840757.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-840757.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// Also enable predictable mode. Otherwise, concurrent recompilation will be
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
index 83f5c9d4b8..d54507cc59 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
@@ -4,7 +4,6 @@
// Flags: --no-wasm-trap-handler
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-843563.js b/deps/v8/test/mjsunit/regress/wasm/regress-843563.js
index 8c18cfa7a9..ca22299254 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-843563.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-843563.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
index ebc97a95b4..0488723e4f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-math-intrinsics --validate-asm --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
function verbose(args) {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
index 5d782b747c..da5f44925a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
@@ -4,7 +4,6 @@
// Flags: --wasm-shared-engine --no-wasm-disable-structured-cloning --allow-natives-syntax --experimental-wasm-threads
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854011.js b/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
index 11863368f3..b0356a873f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854011.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
index e2146ca365..d6c4829acd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-864509.js b/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
index 822c06750a..19e3bfcfb8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-864509.js
@@ -4,7 +4,6 @@
// Flags: --liftoff --no-wasm-tier-up --no-future --wasm-tier-mask-for-testing=2
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-875556.js b/deps/v8/test/mjsunit/regress/wasm/regress-875556.js
index e1ea426f87..cc8bc5cc3e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-875556.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-875556.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --expose-wasm --experimental-wasm-mv
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8846.js b/deps/v8/test/mjsunit/regress/wasm/regress-8846.js
new file mode 100644
index 0000000000..e508572ecd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8846.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --wasm-test-streaming
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestAsyncCompileExceptionSection() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("thrw", kSig_v_v)
+ .addBody([
+ kExprThrow, except,
+ ]).exportFunc();
+ function step1(buffer) {
+ assertPromiseResult(WebAssembly.compile(buffer), module => step2(module));
+ }
+ function step2(module) {
+ assertPromiseResult(WebAssembly.instantiate(module), inst => step3(inst));
+ }
+ function step3(instance) {
+ assertThrows(() => instance.exports.thrw(), WebAssembly.RuntimeError);
+ }
+ step1(builder.toBuffer());
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8896.js b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
new file mode 100644
index 0000000000..51f1b27188
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestSerializeDeserializeRuntimeCall() {
+ var builder = new WasmModuleBuilder();
+ var except = builder.addException(kSig_v_v);
+ builder.addFunction("f", kSig_v_v)
+ .addBody([
+ kExprThrow, except,
+ ]).exportFunc();
+ var wire_bytes = builder.toBuffer();
+ var module = new WebAssembly.Module(wire_bytes);
+ var instance1 = new WebAssembly.Instance(module);
+ var serialized = %SerializeWasmModule(module);
+ module = %DeserializeWasmModule(serialized, wire_bytes);
+ var instance2 = new WebAssembly.Instance(module);
+ assertThrows(() => instance2.exports.f(), WebAssembly.RuntimeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-894307.js b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
index 5aef9eba86..f40388fcb4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-894374.js b/deps/v8/test/mjsunit/regress/wasm/regress-894374.js
index fb9cb3b4fe..02be0088cf 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-894374.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-894374.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
index 7967d99756..b49881f472 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-910824.js b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
index 7c8f154496..b795425b1f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
index c12013c9f8..e9d4026308 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-916869.js b/deps/v8/test/mjsunit/regress/wasm/regress-916869.js
index 6acd5d68d7..30bb011364 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-916869.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-916869.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
index fd7ab99020..b74572ac8a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588.js
index cb07bb5280..b07814415e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917588.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
index 9d461cfd84..1e5c1a4488 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918149.js b/deps/v8/test/mjsunit/regress/wasm/regress-918149.js
index f19a26d2a3..cc790301d8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-918149.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918149.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918284.js b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
index 05614edf3c..dadbf3f7ea 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918917.js b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
index 725287ae74..f007957c6f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919308.js b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
index cb10662290..8c454413e8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
index 58273f666b..706d3cc7f4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922432.js b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
index 8f1ad11ebc..f6175b3a63 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestTruncatedBrOnExnInLoop() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922670.js b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
index d5617df238..2988eddf30 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
index 3af0e86e1e..4d44509598 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
new file mode 100644
index 0000000000..0549a769fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addFunction(undefined, sig)
+ .addBody([
+ kExprGetLocal, 2,
+ kExprIf, kWasmStmt,
+ kExprBlock, kWasmStmt
+ ]);
+builder.addExport('main', 0);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924905.js b/deps/v8/test/mjsunit/regress/wasm/regress-924905.js
index 5db3583e4c..9dbdf7e299 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-924905.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924905.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-925671.js b/deps/v8/test/mjsunit/regress/wasm/regress-925671.js
new file mode 100644
index 0000000000..c6113c272e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-925671.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-tier-mask-for-testing=1
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('f0', kSig_v_v).addBody([]);
+builder.addFunction('f1', kSig_v_v).addBody([]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-935138.js b/deps/v8/test/mjsunit/regress/wasm/regress-935138.js
new file mode 100644
index 0000000000..e0fe535e27
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-935138.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestAsyncCompileMultipleCodeSections() {
+ let binary = new Binary();
+ binary.emit_header();
+ binary.push(kTypeSectionCode, 4, 1, kWasmFunctionTypeForm, 0, 0);
+ binary.push(kFunctionSectionCode, 2, 1, 0);
+ binary.push(kCodeSectionCode, 6, 1, 4, 0, kExprGetLocal, 0, kExprEnd);
+ binary.push(kCodeSectionCode, 6, 1, 4, 0, kExprGetLocal, 0, kExprEnd);
+ let buffer = Uint8Array.from(binary).buffer;
+ assertPromiseResult(WebAssembly.compile(buffer), assertUnreachable,
+ e => assertInstanceof(e, WebAssembly.CompileError));
+})();
diff --git a/deps/v8/test/mjsunit/string-external-cached.js b/deps/v8/test/mjsunit/string-external-cached.js
index cd368f660a..dd8a99382b 100644
--- a/deps/v8/test/mjsunit/string-external-cached.js
+++ b/deps/v8/test/mjsunit/string-external-cached.js
@@ -53,7 +53,7 @@ function test() {
assertEquals('B', charat_str[i].charAt(3*16 + 11));
}
- charat_short = "012";
+ charat_short = "01234";
try { // String can only be externalized once
externalizeString(charat_short, true);
} catch (ex) { }
diff --git a/deps/v8/test/mjsunit/string-externalize.js b/deps/v8/test/mjsunit/string-externalize.js
index dd861e8816..22840c264a 100644
--- a/deps/v8/test/mjsunit/string-externalize.js
+++ b/deps/v8/test/mjsunit/string-externalize.js
@@ -45,7 +45,7 @@ function test() {
assertTrue(isOneByteString(str));
var twoByteExternalWithOneByteData =
- "AA" + dont_inline();
+ "AAAA" + dont_inline();
externalizeString(twoByteExternalWithOneByteData, true /* force two-byte */);
assertFalse(isOneByteString(twoByteExternalWithOneByteData));
@@ -54,12 +54,10 @@ function test() {
externalizeString(realTwoByteExternalString);
assertFalse(isOneByteString(realTwoByteExternalString));
- assertTrue(isOneByteString(["a", twoByteExternalWithOneByteData].join("")));
+ assertFalse(isOneByteString(["a", twoByteExternalWithOneByteData].join("")));
- // Appending a two-byte string that contains only ascii chars should
- // still produce an ascii cons.
var str1 = str + twoByteExternalWithOneByteData;
- assertTrue(isOneByteString(str1));
+ assertFalse(isOneByteString(str1));
// Force flattening of the string.
var old_length = str1.length - twoByteExternalWithOneByteData.length;
@@ -70,11 +68,11 @@ function test() {
assertEquals("A", str1[i]);
}
- // Flattened string should still be ascii.
- assertTrue(isOneByteString(str1));
+ // Flattened string should still be two-byte.
+ assertFalse(isOneByteString(str1));
- // Lower-casing an ascii string should produce ascii.
- assertTrue(isOneByteString(str1.toLowerCase()));
+ // Lower-casing an ascii string should produce two-byte.
+ assertFalse(isOneByteString(str1.toLowerCase()));
assertFalse(isOneByteString(["a", realTwoByteExternalString].join("")));
diff --git a/deps/v8/test/mjsunit/switch.js b/deps/v8/test/mjsunit/switch.js
index 4722e9e5d8..4b27789ad9 100644
--- a/deps/v8/test/mjsunit/switch.js
+++ b/deps/v8/test/mjsunit/switch.js
@@ -515,3 +515,13 @@ test_switches(true);
}
assertEquals(1, i);
})();
+
+assertThrows(function() {
+ function f(){}
+ // The f()-- unconditionally throws a ReferenceError.
+ switch(f()--) {
+ // This label is dead.
+ default:
+ break;
+ }
+}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 901d8e90a4..f95c33e7da 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -56,24 +56,19 @@ COMBINE_TESTS_FLAGS_BLACKLIST = [
'--wasm-lazy-compilation',
]
+
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_files(self):
+ return {
+ "mjsunit.js",
+ "mjsunit_suppressions.js",
+ }
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root, followlinks=True):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- dirs.sort()
- files.sort()
- for filename in files:
- if (filename.endswith(".js") and
- filename != "mjsunit.js" and
- filename != "mjsunit_suppressions.js"):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_combiner_class(self):
return TestCombiner
diff --git a/deps/v8/test/mjsunit/tools/compiler-trace-flags.js b/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
index 491aad8c1e..ea6f816f5d 100644
--- a/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
+++ b/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
@@ -6,7 +6,6 @@
// Flags: --trace-turbo-cfg-file=test/mjsunit/tools/turbo.cfg
// Flags: --trace-turbo-path=test/mjsunit/tools
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// The idea behind this test is to make sure we do not crash when using the
diff --git a/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
new file mode 100644
index 0000000000..d2a21288ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// crbug.com/923466
+__v_5 = [ -1073741825, -2147483648];
+__v_5.sort();
+
+// crbug.com/923642
+new RegExp("(abcd){2148473648,}", "");
+
+// crbug.com/923626
+new Date(2146399200000).toString();
+new Date(2146940400000).toString();
+new Date(2147481600000).toString();
+new Date(2148022800000).toString();
+
+// crbug.com/927212
+assertThrows(() => (2n).toString(-2147483657), RangeError);
diff --git a/deps/v8/test/mjsunit/wasm/adapter-frame.js b/deps/v8/test/mjsunit/wasm/adapter-frame.js
index 284adb993f..55634163c6 100644
--- a/deps/v8/test/mjsunit/wasm/adapter-frame.js
+++ b/deps/v8/test/mjsunit/wasm/adapter-frame.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const JS = false; // for testing the tests.
diff --git a/deps/v8/test/mjsunit/wasm/add-getters.js b/deps/v8/test/mjsunit/wasm/add-getters.js
index ca5783a7f9..fc5c5d2ff2 100644
--- a/deps/v8/test/mjsunit/wasm/add-getters.js
+++ b/deps/v8/test/mjsunit/wasm/add-getters.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testAddGetter(object, name, val) {
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index 30faef12a7..19415fe2b8 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -4,10 +4,9 @@
// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-(function testAnyRefIdentityFunction() {
+(function testAnyFuncIdentityFunction() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_a_a)
@@ -23,7 +22,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main, instance.exports.main(instance.exports.main));
})();
-(function testPassAnyRefToImportedFunction() {
+(function testPassAnyFuncToImportedFunction() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_v_a);
@@ -41,3 +40,160 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
main(main);
})();
+
+(function testPassAnyFuncWithGCWithLocals() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const ref_sig = builder.addType(kSig_v_a);
+ const void_sig = builder.addType(kSig_v_v);
+ const imp_index = builder.addImport("q", "func", ref_sig);
+ const gc_index = builder.addImport("q", "gc", void_sig);
+ // First call the gc, then check if the object still exists.
+ builder.addFunction('main', ref_sig)
+ .addLocals({anyfunc_count: 10})
+ .addBody([
+ kExprGetLocal, 0, kExprSetLocal, 1, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 2, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 3, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 4, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 5, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 6, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 7, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 8, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 9, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 10, // Set local
+ kExprCallFunction, gc_index, // call gc
+ kExprGetLocal, 9, kExprCallFunction, imp_index // call import
+ ])
+ .exportFunc();
+
+ const main =
+ builder.instantiate({q: {func: checkFunction, gc: gc}}).exports.main;
+
+ function checkFunction(value) {
+ assertSame(main, value);
+ }
+
+ main(main);
+})();
+
+(function testPassAnyFuncWithGC() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const ref_sig = builder.addType(kSig_v_a);
+ const void_sig = builder.addType(kSig_v_v);
+ const imp_index = builder.addImport("q", "func", ref_sig);
+ const gc_index = builder.addImport("q", "gc", void_sig);
+ // First call the gc, then check if the object still exists.
+ builder.addFunction('main', ref_sig)
+ .addBody([
+ kExprCallFunction, gc_index, // call gc
+ kExprGetLocal, 0, kExprCallFunction, imp_index // call import
+ ])
+ .exportFunc();
+
+ function checkFunction(value) {
+ assertSame(main, value);
+ }
+
+ const main = builder.instantiate({q: {func: checkFunction, gc: gc}}).exports.main;
+
+ main(main);
+})();
+
+(function testPassAnyFuncWithGCInWrapper() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const kSig_a_iai = makeSig([kWasmI32, kWasmAnyFunc, kWasmI32], [kWasmAnyFunc]);
+ const sig_index = builder.addType(kSig_a_iai);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprGetLocal, 1])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+
+ const triggerGCParam = {
+ valueOf: () => {
+ gc();
+ return 17;
+ }
+ };
+
+ const result = main(triggerGCParam, main, triggerGCParam);
+ assertSame(main, result);
+})();
+
+(function testAnyFuncDefaultValue() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_a_v);
+ builder.addFunction('main', sig_index)
+ .addLocals({anyfunc_count: 1})
+ .addBody([kExprGetLocal, 0])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
+
+(function testAssignNullRefToAnyFuncLocal() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_a_a);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprRefNull, kExprSetLocal, 0, kExprGetLocal, 0])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main(main));
+})();
+
+(function testImplicitReturnNullRefAsAnyFunc() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_a_v);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprRefNull])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
+
+(function testExplicitReturnNullRefAsAnyFunc() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_a_v);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprRefNull, kExprReturn])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
+
+(function testImplicitReturnAnyFuncAsAnyRef() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_r_v);
+ builder.addFunction('main', sig_index)
+ .addLocals({anyfunc_count: 1})
+ .addBody([kExprGetLocal, 0])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
+
+(function testExplicitReturnAnyFuncAsAnyRef() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_r_v);
+ builder.addFunction('main', sig_index)
+ .addLocals({anyfunc_count: 1})
+ .addBody([kExprGetLocal, 0, kExprReturn])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals.js b/deps/v8/test/mjsunit/wasm/anyref-globals.js
index 64f56366e0..f1facefb28 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-globals.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-globals.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-anyref --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestDefaultValue() {
diff --git a/deps/v8/test/mjsunit/wasm/anyref.js b/deps/v8/test/mjsunit/wasm/anyref.js
index 2c045712f1..141d25d1e3 100644
--- a/deps/v8/test/mjsunit/wasm/anyref.js
+++ b/deps/v8/test/mjsunit/wasm/anyref.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testAnyRefIdentityFunction() {
@@ -104,6 +103,42 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 'world'});
})();
+(function testPassAnyRefWithGCWithStackParameters() {
+ print(arguments.callee.name);
+ const num_params = 15;
+ for (let index = 0; index < num_params; index++) {
+ const builder = new WasmModuleBuilder();
+ // Make a signature with {num_params} many anyref parameters.
+ const mysig = makeSig(Array(num_params).fill(kWasmAnyRef), []);
+ const main_sig = builder.addType(mysig);
+ const ref_sig = builder.addType(kSig_v_r);
+ const void_sig = builder.addType(kSig_v_v);
+ const imp_index = builder.addImport('q', 'func', ref_sig);
+ const gc_index = builder.addImport('q', 'gc', void_sig);
+ // First call the gc, then check if the object still exists.
+ builder.addFunction('main', main_sig)
+ .addBody([
+ kExprCallFunction, gc_index, // call gc
+ kExprGetLocal, index, kExprCallFunction, imp_index // call import
+ ])
+ .exportFunc();
+
+ function checkFunction(value) {
+ assertEquals(index, value.hello);
+ }
+
+ const instance = builder.instantiate({q: {func: checkFunction, gc: gc}});
+
+ // Pass {num_params} many parameters to main. Note that it is important
+ // that no other references to these objects exist. They are kept alive
+ // only through references stored in the parameters slots of a stack frame.
+ instance.exports.main(
+ {hello: 0}, {hello: 1}, {hello: 2}, {hello: 3}, {hello: 4}, {hello: 5},
+ {hello: 6}, {hello: 7}, {hello: 8}, {hello: 9}, {hello: 10},
+ {hello: 11}, {hello: 12}, {hello: 13}, {hello: 14});
+ }
+})();
+
(function testPassAnyRefWithGCInWrapper() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
@@ -181,3 +216,27 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(null, instance.exports.main());
})();
+
+(function testImplicitReturnNullRefAsAnyRef() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_r_v);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprRefNull])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
+
+(function testExplicitReturnNullRefAsAnyRef() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_r_v);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprRefNull, kExprReturn])
+ .exportFunc();
+
+ const main = builder.instantiate().exports.main;
+ assertEquals(null, main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/async-compile.js b/deps/v8/test/mjsunit/wasm/async-compile.js
index 122eccbe96..a5e7c499bd 100644
--- a/deps/v8/test/mjsunit/wasm/async-compile.js
+++ b/deps/v8/test/mjsunit/wasm/async-compile.js
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --allow-natives-syntax
-
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
async function assertCompiles(buffer) {
@@ -12,13 +9,11 @@ async function assertCompiles(buffer) {
assertInstanceof(module, WebAssembly.Module);
}
-async function assertCompileError(buffer) {
- try {
- await WebAssembly.compile(buffer);
- assertUnreachable();
- } catch (e) {
- if (!(e instanceof WebAssembly.CompileError)) throw e;
- }
+function assertCompileError(buffer, msg) {
+ assertEquals('string', typeof msg);
+ return assertThrowsAsync(
+ WebAssembly.compile(buffer), WebAssembly.CompileError,
+ 'WebAssembly.compile(): ' + msg);
}
assertPromiseResult(async function basicCompile() {
@@ -50,7 +45,7 @@ assertPromiseResult(async function basicCompile() {
// Three compilations of the bad module should fail.
for (var i = 0; i < kNumCompiles; i++) {
- await assertCompileError(bad_buffer);
+ await assertCompileError(bad_buffer, 'BufferSource argument is empty');
}
}());
@@ -69,7 +64,10 @@ assertPromiseResult(async function badFunctionInTheMiddle() {
builder.addFunction('b' + i, sig).addBody([kExprI32Const, 42]);
}
let buffer = builder.toBuffer();
- await assertCompileError(buffer);
+ await assertCompileError(
+ buffer,
+ 'Compiling wasm function \"bad\" failed: ' +
+ 'expected 1 elements on the stack for fallthru to @1, found 0 @+94');
}());
assertPromiseResult(async function importWithoutCode() {
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index b832fad8fe..3d360373f0 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -9,7 +9,6 @@
// Note that results of this test are flaky by design. While the test is
// deterministic with a fixed seed, bugs may introduce non-determinism.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kDebug = false;
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 371839ae24..08714bbc01 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-threads
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kMemtypeSize32 = 4;
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
index e0ce2324ea..f85c19a970 100644
--- a/deps/v8/test/mjsunit/wasm/atomics64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -9,7 +9,6 @@
// Note that results of this test are flaky by design. While the test is
// deterministic with a fixed seed, bugs may introduce non-determinism.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kDebug = false;
diff --git a/deps/v8/test/mjsunit/wasm/bigint.js b/deps/v8/test/mjsunit/wasm/bigint.js
index cb761acab1..49a1ead6b8 100644
--- a/deps/v8/test/mjsunit/wasm/bigint.js
+++ b/deps/v8/test/mjsunit/wasm/bigint.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-bigint
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestWasmI64ToJSBigInt() {
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
index d909bcc542..43ff8570c6 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
index 7e260eab08..d972e7830d 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory.js b/deps/v8/test/mjsunit/wasm/bulk-memory.js
index c9aefb774c..0e59965085 100644
--- a/deps/v8/test/mjsunit/wasm/bulk-memory.js
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-bulk-memory
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPassiveDataSegment() {
@@ -47,8 +46,8 @@ function getMemoryInit(mem, segment_data) {
kExprGetLocal, 1, // Source.
kExprGetLocal, 2, // Size in bytes.
kNumericPrefix, kExprMemoryInit,
- 0, // Memory index.
0, // Data segment index.
+ 0, // Memory index.
])
.exportAs('init');
return builder.instantiate({'': {mem}}).exports.init;
@@ -116,8 +115,8 @@ function getMemoryInit(mem, segment_data) {
kExprI32Const, 0, // Source.
kExprI32Const, 0, // Size in bytes.
kNumericPrefix, kExprMemoryInit,
- 0, // Memory index.
1, // Data segment index.
+ 0, // Memory index.
])
.exportAs('init');
@@ -138,13 +137,13 @@ function getMemoryInit(mem, segment_data) {
kExprI32Const, 0, // Source.
kExprI32Const, 0, // Size in bytes.
kNumericPrefix, kExprMemoryInit,
- 0, // Memory index.
0, // Data segment index.
+ 0, // Memory index.
])
.exportAs('init');
builder.addFunction('drop', kSig_v_v)
.addBody([
- kNumericPrefix, kExprMemoryDrop,
+ kNumericPrefix, kExprDataDrop,
0, // Data segment index.
])
.exportAs('drop');
@@ -163,14 +162,14 @@ function getMemoryInit(mem, segment_data) {
assertTraps(kTrapDataSegmentDropped, () => instance.exports.drop());
})();
-(function TestMemoryDropOnActiveSegment() {
+(function TestDataDropOnActiveSegment() {
const builder = new WasmModuleBuilder();
builder.addMemory(1);
builder.addPassiveDataSegment([1, 2, 3]);
builder.addDataSegment(0, [4, 5, 6]);
builder.addFunction('drop', kSig_v_v)
.addBody([
- kNumericPrefix, kExprMemoryDrop,
+ kNumericPrefix, kExprDataDrop,
1, // Data segment index.
])
.exportAs('drop');
@@ -186,7 +185,7 @@ function getMemoryCopy(mem) {
kExprGetLocal, 0, // Dest.
kExprGetLocal, 1, // Source.
kExprGetLocal, 2, // Size in bytes.
- kNumericPrefix, kExprMemoryCopy, 0,
+ kNumericPrefix, kExprMemoryCopy, 0, 0,
]).exportAs("copy");
return builder.instantiate({'': {mem}}).exports.copy;
}
@@ -321,35 +320,13 @@ function getMemoryFill(mem) {
kTrapMemOutOfBounds, () => memoryFill(kPageSize + 1, v, kPageSize));
})();
-(function TestTableInit0() {
- let builder = new WasmModuleBuilder();
- let sig_v_iii = builder.addType(kSig_v_iii);
-
- builder.setTableBounds(5, 5);
- builder.addElementSegment(0, false, []);
- builder.addElementSegment(0, false, []);
-
- builder.addFunction("init0", sig_v_iii)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kNumericPrefix, kExprTableInit, kTableZero, kSegmentZero])
- .exportAs("init0");
-
- let instance = builder.instantiate();
- let init = instance.exports.init0;
- // TODO(titzer): we only check that a function containing TableInit can be compiled.
- // init(1, 2, 3);
-})();
-
-(function TestTableDropActive() {
+(function TestElemDropActive() {
const builder = new WasmModuleBuilder();
builder.setTableBounds(5, 5);
builder.addElementSegment(0, false, [0, 0, 0]);
builder.addFunction('drop', kSig_v_v)
.addBody([
- kNumericPrefix, kExprTableDrop,
+ kNumericPrefix, kExprElemDrop,
0, // Element segment index.
])
.exportAs('drop');
@@ -358,13 +335,13 @@ function getMemoryFill(mem) {
assertTraps(kTrapElemSegmentDropped, () => instance.exports.drop());
})();
-(function TestTableDropTwice() {
+(function TestElemDropTwice() {
const builder = new WasmModuleBuilder();
builder.setTableBounds(5, 5);
builder.addPassiveElementSegment([0, 0, 0]);
builder.addFunction('drop', kSig_v_v)
.addBody([
- kNumericPrefix, kExprTableDrop,
+ kNumericPrefix, kExprElemDrop,
0, // Element segment index.
])
.exportAs('drop');
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
index b0feda8c80..97188964d1 100644
--- a/deps/v8/test/mjsunit/wasm/calls.js
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function assertModule(module, memsize) {
diff --git a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
index 7e8bcde6af..6f4698c0d0 100644
--- a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
+++ b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
@@ -4,7 +4,6 @@
// Flags: --wasm-max-code-space=1
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// We only have 1 MB code space. This is enough for the code below, but for all
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
index d308919088..5102216933 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-threads
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kSequenceLength = 8192;
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
index 89cc7ecb34..bd1c5c95a5 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-threads
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kSequenceLength = 8192;
diff --git a/deps/v8/test/mjsunit/wasm/compilation-limits.js b/deps/v8/test/mjsunit/wasm/compilation-limits.js
index 2b79de008f..2fe2d32b71 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-limits.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-limits.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
%SetWasmCompileControls(100000, true);
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-management.js b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
index 19446403ab..a2b102f083 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-management.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Use global variables for all values where the test wants to maintain strict
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 8a9a0cbd10..9c28a7746d 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --allow-natives-syntax --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function SerializeAndDeserializeModule() {
diff --git a/deps/v8/test/mjsunit/wasm/data-segments.js b/deps/v8/test/mjsunit/wasm/data-segments.js
index e73e3fb3a7..91b6525537 100644
--- a/deps/v8/test/mjsunit/wasm/data-segments.js
+++ b/deps/v8/test/mjsunit/wasm/data-segments.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var debug = false;
diff --git a/deps/v8/test/mjsunit/wasm/disallow-codegen.js b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
index f1303e845f..c1dfb8a6e3 100644
--- a/deps/v8/test/mjsunit/wasm/disallow-codegen.js
+++ b/deps/v8/test/mjsunit/wasm/disallow-codegen.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let kReturnValue = 19;
diff --git a/deps/v8/test/mjsunit/wasm/divrem-trap.js b/deps/v8/test/mjsunit/wasm/divrem-trap.js
index d78e170901..d9a23693f0 100644
--- a/deps/v8/test/mjsunit/wasm/divrem-trap.js
+++ b/deps/v8/test/mjsunit/wasm/divrem-trap.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var assertTraps = function(messageId, code) {
diff --git a/deps/v8/test/mjsunit/wasm/empirical_max_memory.js b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
index 262dfe10ef..e2ff7ca517 100644
--- a/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
+++ b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let k1MiB = 1 * 1024 * 1024;
diff --git a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
index 26bdf8ece8..9f7a7f71d2 100644
--- a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
+++ b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
@@ -10,7 +10,6 @@
(function print_incrementer() {
if (true) return; // remove to regenerate the module
- load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var module = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
index 744ba9bbdb..9e3f683490 100644
--- a/deps/v8/test/mjsunit/wasm/errors.js
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -4,162 +4,159 @@
// Flags: --expose-wasm --allow-natives-syntax
-'use strict';
-
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function module(bytes) {
- let buffer = bytes;
- if (typeof buffer === 'string') {
- buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = {}) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-// instantiate should succeed but run should fail.
-function instantiateAndFailAtRuntime(bytes, imports = {}) {
- var instance =
- assertDoesNotThrow(new WebAssembly.Instance(module(bytes), imports));
- instance.exports.run();
-}
-
function builder() {
return new WasmModuleBuilder;
}
function assertCompileError(bytes, msg) {
- assertThrows(() => module(bytes), WebAssembly.CompileError, msg);
+ assertThrows(
+ () => new WebAssembly.Module(bytes), WebAssembly.CompileError,
+ 'WebAssembly.Module(): ' + msg);
+ assertThrowsAsync(
+ WebAssembly.compile(bytes), WebAssembly.CompileError,
+ 'WebAssembly.compile(): ' + msg);
+}
+
+function assertInstantiateError(error, bytes, imports = {}, msg) {
+ assertThrows(
+ () => new WebAssembly.Instance(new WebAssembly.Module(bytes), imports),
+ error, 'WebAssembly.Instance(): ' + msg);
+ assertThrowsAsync(
+ WebAssembly.instantiate(bytes, imports), error,
+ 'WebAssembly.instantiate(): ' + msg);
}
// default imports to {} so we get LinkError by default, thus allowing us to
// distinguish the TypeError we want to catch
function assertTypeError(bytes, imports = {}, msg) {
- assertThrows(() => instance(bytes, imports), TypeError, msg);
+ assertInstantiateError(TypeError, bytes, imports, msg);
}
function assertLinkError(bytes, imports, msg) {
- assertThrows(() => instance(bytes, imports), WebAssembly.LinkError, msg);
-}
-
-function assertRuntimeError(bytes, imports, msg) {
- assertThrows(
- () => instantiateAndFailAtRuntime(bytes, imports),
- WebAssembly.RuntimeError, msg);
+ assertInstantiateError(WebAssembly.LinkError, bytes, imports, msg);
}
function assertConversionError(bytes, imports, msg) {
- assertThrows(
- () => instantiateAndFailAtRuntime(bytes, imports), TypeError, msg);
+ let instance =
+ new WebAssembly.Instance(new WebAssembly.Module(bytes), imports);
+ assertThrows(() => instance.exports.run(), TypeError, msg);
}
(function TestDecodingError() {
- assertCompileError("", /is empty/);
- assertCompileError("X", /expected 4 bytes, fell off end @\+0/);
+ print(arguments.callee.name);
+ assertCompileError(bytes(), 'BufferSource argument is empty');
+ assertCompileError(bytes('X'), 'expected 4 bytes, fell off end @+0');
assertCompileError(
- "\0x00asm", /expected magic word 00 61 73 6d, found 00 78 30 30 @\+0/);
+ bytes('\0x00asm'),
+ 'expected magic word 00 61 73 6d, found 00 78 30 30 @+0');
})();
(function TestValidationError() {
+ print(arguments.callee.name);
+ let f_error = msg => 'Compiling wasm function "f" failed: ' + msg;
assertCompileError(
- builder().addFunction("f", kSig_i_v).end().toBuffer(),
- /function body must end with "end" opcode @/);
+ builder().addFunction('f', kSig_i_v).end().toBuffer(),
+ f_error('function body must end with "end" opcode @+24'));
assertCompileError(
builder().addFunction('f', kSig_i_v).addBody([kExprReturn])
.end().toBuffer(),
- /expected 1 elements on the stack for return, found 0 @/);
- assertCompileError(builder().addFunction("f", kSig_v_v).addBody([
+ f_error('expected 1 elements on the stack for return, found 0 @+24'));
+ assertCompileError(builder().addFunction('f', kSig_v_v).addBody([
kExprGetLocal, 0
- ]).end().toBuffer(), /invalid local index: 0 @/);
+ ]).end().toBuffer(), f_error('invalid local index: 0 @+24'));
assertCompileError(
- builder().addStart(0).toBuffer(), /function index 0 out of bounds/);
+ builder().addStart(0).toBuffer(),
+ 'start function index 0 out of bounds (0 entries) @+10');
})();
+function import_error(index, module, func, msg) {
+ let full_msg = 'Import #' + index + ' module=\"' + module + '\"';
+ if (func !== undefined) full_msg += ' function=\"' + func + '\"';
+ return full_msg + ' error: ' + msg;
+}
+
(function TestTypeError() {
- let b;
- b = builder();
- b.addImport("foo", "bar", kSig_v_v);
- assertTypeError(b.toBuffer(), {}, /module is not an object or function/);
+ print(arguments.callee.name);
+ let b = builder();
+ b.addImport('foo', 'bar', kSig_v_v);
+ let msg =
+ import_error(0, 'foo', undefined, 'module is not an object or function');
+ assertTypeError(b.toBuffer(), {}, msg);
b = builder();
- b.addImportedGlobal("foo", "bar", kWasmI32);
- assertTypeError(b.toBuffer(), {}, /module is not an object or function/);
+ b.addImportedGlobal('foo', 'bar', kWasmI32);
+ assertTypeError(b.toBuffer(), {}, msg);
b = builder();
- b.addImportedMemory("foo", "bar");
- assertTypeError(b.toBuffer(), {}, /module is not an object or function/);
+ b.addImportedMemory('foo', 'bar');
+ assertTypeError(b.toBuffer(), {}, msg);
})();
(function TestLinkingError() {
+ print(arguments.callee.name);
let b;
+ let msg;
b = builder();
- b.addImport("foo", "bar", kSig_v_v);
- assertLinkError(
- b.toBuffer(), {foo: {}}, /function import requires a callable/);
+ msg = import_error(0, 'foo', 'bar', 'function import requires a callable');
+ b.addImport('foo', 'bar', kSig_v_v);
+ assertLinkError(b.toBuffer(), {foo: {}}, msg);
b = builder();
- b.addImport("foo", "bar", kSig_v_v);
- assertLinkError(
- b.toBuffer(), {foo: {bar: 9}}, /function import requires a callable/);
+ b.addImport('foo', 'bar', kSig_v_v);
+ assertLinkError(b.toBuffer(), {foo: {bar: 9}}, msg);
b = builder();
- b.addImportedGlobal("foo", "bar", kWasmI32);
- assertLinkError(b.toBuffer(), {foo: {}}, /global import must be a number/);
+ msg = import_error(
+ 0, 'foo', 'bar',
+ 'global import must be a number or WebAssembly.Global object');
+ b.addImportedGlobal('foo', 'bar', kWasmI32);
+ assertLinkError(b.toBuffer(), {foo: {}}, msg);
b = builder();
- b.addImportedGlobal("foo", "bar", kWasmI32);
- assertLinkError(
- b.toBuffer(), {foo: {bar: ""}}, /global import must be a number/);
+ b.addImportedGlobal('foo', 'bar', kWasmI32);
+ assertLinkError(b.toBuffer(), {foo: {bar: ''}}, msg);
b = builder();
- b.addImportedGlobal("foo", "bar", kWasmI32);
- assertLinkError(
- b.toBuffer(), {foo: {bar: () => 9}}, /global import must be a number/);
+ b.addImportedGlobal('foo', 'bar', kWasmI32);
+ assertLinkError(b.toBuffer(), {foo: {bar: () => 9}}, msg);
b = builder();
- b.addImportedMemory("foo", "bar");
- assertLinkError(
- b.toBuffer(), {foo: {}},
- /memory import must be a WebAssembly\.Memory object/);
+ msg = import_error(
+ 0, 'foo', 'bar', 'memory import must be a WebAssembly.Memory object');
+ b.addImportedMemory('foo', 'bar');
+ assertLinkError(b.toBuffer(), {foo: {}}, msg);
b = builder();
- b.addImportedMemory("foo", "bar", 1);
+ b.addImportedMemory('foo', 'bar', 1);
assertLinkError(
b.toBuffer(), {foo: {bar: () => new WebAssembly.Memory({initial: 0})}},
- /memory import must be a WebAssembly\.Memory object/);
+ msg);
+})();
- b = builder();
- b.addFunction("startup", kSig_v_v).addBody([
- kExprUnreachable,
- ]).end().addStart(0);
- assertRuntimeError(b.toBuffer(), {}, "unreachable");
+(function TestTrapUnreachable() {
+ print(arguments.callee.name);
+ let instance = builder().addFunction('run', kSig_v_v)
+ .addBody([kExprUnreachable]).exportFunc().end().instantiate();
+ assertTraps(kTrapUnreachable, instance.exports.run);
})();
-(function TestTrapError() {
- assertRuntimeError(builder().addFunction("run", kSig_v_v).addBody([
- kExprUnreachable
- ]).exportFunc().end().toBuffer(), {}, "unreachable");
-
- assertRuntimeError(builder().addFunction("run", kSig_v_v).addBody([
- kExprI32Const, 1,
- kExprI32Const, 0,
- kExprI32DivS,
- kExprDrop
- ]).exportFunc().end().toBuffer(), {}, "divide by zero");
-
- assertRuntimeError(builder().
- addFunction("run", kSig_v_v).addBody([]).exportFunc().end().
- addFunction("start", kSig_v_v).addBody([kExprUnreachable]).end().
- addStart(1).toBuffer(),
- {}, "unreachable");
+(function TestTrapDivByZero() {
+ print(arguments.callee.name);
+ let instance = builder().addFunction('run', kSig_v_v).addBody(
+ [kExprI32Const, 1, kExprI32Const, 0, kExprI32DivS, kExprDrop])
+ .exportFunc().end().instantiate();
+ assertTraps(kTrapDivByZero, instance.exports.run);
+})();
+
+(function TestUnreachableInStart() {
+ print(arguments.callee.name);
+
+ let b = builder().addFunction("start", kSig_v_v).addBody(
+ [kExprUnreachable]).end().addStart(0);
+ assertTraps(kTrapUnreachable, () => b.instantiate());
})();
(function TestConversionError() {
+ print(arguments.callee.name);
let b = builder();
b.addImport('foo', 'bar', kSig_v_l);
let buffer = b.addFunction('run', kSig_v_v)
@@ -181,6 +178,7 @@ function assertConversionError(bytes, imports, msg) {
(function InternalDebugTrace() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var sig = builder.addType(kSig_i_dd);
builder.addImport("mod", "func", sig);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
index 8d43610ff8..48e3c85127 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-eh --experimental-wasm-anyref --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-export.js b/deps/v8/test/mjsunit/wasm/exceptions-export.js
index 72ec02dec3..b698cce756 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-export.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-export.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestExportSimple() {
@@ -44,7 +43,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addExportOfKind("ex_oob", kExternalException, except + 1);
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- /Wasm decoding failed: exception index 1 out of bounds/);
+ 'WebAssembly.Module(): exception index 1 out of bounds (1 entry) @+30');
})();
(function TestExportSameNameTwice() {
@@ -55,7 +54,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addExportOfKind("ex", kExternalException, except);
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- /Duplicate export name 'ex' for exception 0 and exception 0/);
+ 'WebAssembly.Module(): Duplicate export name \'ex\' ' +
+ 'for exception 0 and exception 0 @+28');
})();
(function TestExportModuleGetExports() {
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-import.js b/deps/v8/test/mjsunit/wasm/exceptions-import.js
index b5276727ba..bf6d5ee6ab 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-import.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-import.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Helper function to return a new exported exception with the {kSig_v_v} type
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
index 43041ca9e2..3b3fa365d5 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
index adfbded0c7..48835901f9 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-shared.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Helper function to return a new exported exception with the {kSig_v_v} type
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index fc21156b18..ed16a7f2cc 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh --experimental-wasm-simd --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 6aafd0c087..d8a25c3503 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
diff --git a/deps/v8/test/mjsunit/wasm/export-global.js b/deps/v8/test/mjsunit/wasm/export-global.js
index 1bce050c2f..89ca5b5f83 100644
--- a/deps/v8/test/mjsunit/wasm/export-global.js
+++ b/deps/v8/test/mjsunit/wasm/export-global.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function duplicateGlobalExportName() {
diff --git a/deps/v8/test/mjsunit/wasm/export-mutable-global.js b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
index 8e7d34a8b5..1ce918c6cc 100644
--- a/deps/v8/test/mjsunit/wasm/export-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function exportImmutableGlobal() {
diff --git a/deps/v8/test/mjsunit/wasm/export-table.js b/deps/v8/test/mjsunit/wasm/export-table.js
index cb9d565a7e..3d523f251a 100644
--- a/deps/v8/test/mjsunit/wasm/export-table.js
+++ b/deps/v8/test/mjsunit/wasm/export-table.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testExportedMain() {
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index 2b4cb9a4eb..5f777ef1cf 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
function CreateDefaultBuilder() {
@@ -31,15 +30,17 @@ function checkSuccessfulInstantiation(builder, ffi, handler) {
assertPromiseResult(builder.asyncInstantiate(ffi), handler);
}
-function checkFailingInstantiation(builder, ffi, error, message) {
+function checkFailingInstantiation(
+ builder, ffi, error, message, prepend_context = true) {
// Test synchronous instantiation.
- assertThrows(_ => builder.instantiate(ffi), error, message);
+ assertThrows(
+ _ => builder.instantiate(ffi), error,
+ (prepend_context ? 'WebAssembly.Instance(): ' : '') + message);
// Test asynchronous instantiation.
- assertPromiseResult(builder.asyncInstantiate(ffi), assertUnreachable, e => {
- assertInstanceof(e, error);
- assertEquals(message, e.message);
- });
+ assertThrowsAsync(
+ builder.asyncInstantiate(ffi), error,
+ (prepend_context ? 'WebAssembly.instantiate(): ' : '') + message);
}
(function testValidFFI() {
@@ -52,19 +53,19 @@ function checkFailingInstantiation(builder, ffi, error, message) {
print(arguments.callee.name);
checkFailingInstantiation(
CreateDefaultBuilder(), 17, TypeError,
- 'WebAssembly Instantiation: Argument 1 must be an object');
+ 'Argument 1 must be an object');
checkFailingInstantiation(
CreateDefaultBuilder(), {}, TypeError,
- 'WebAssembly Instantiation: Import #0 module="mod" error: module is not an object or function');
+ 'Import #0 module="mod" error: module is not an object or function');
checkFailingInstantiation(
CreateDefaultBuilder(), {mod: {}}, WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+ 'Import #0 module="mod" function="fun" error: function import requires a callable');
checkFailingInstantiation(
CreateDefaultBuilder(), {mod: {fun: {}}}, WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+ 'Import #0 module="mod" function="fun" error: function import requires a callable');
checkFailingInstantiation(
CreateDefaultBuilder(), {mod: {fun: 0}}, WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+ 'Import #0 module="mod" function="fun" error: function import requires a callable');
})();
(function testImportWithInvalidSignature() {
@@ -83,7 +84,7 @@ function checkFailingInstantiation(builder, ffi, error, message) {
let exported = builder.instantiate().exports.exp;
checkFailingInstantiation(
CreateDefaultBuilder(), {mod: {fun: exported}}, WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: imported function does not match the expected type');
+ 'Import #0 module="mod" function="fun" error: imported function does not match the expected type');
})();
(function regression870646() {
@@ -95,7 +96,8 @@ function checkFailingInstantiation(builder, ffi, error, message) {
}
});
- checkFailingInstantiation(CreateDefaultBuilder(), ffi, Error, 'my_exception');
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), ffi, Error, 'my_exception', false);
})();
// "fun" matches signature "i_dd"
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index ce96184766..72cc57f598 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallFFI(func, check) {
diff --git a/deps/v8/test/mjsunit/wasm/float-constant-folding.js b/deps/v8/test/mjsunit/wasm/float-constant-folding.js
index b08aee3d0e..6205da7cfc 100644
--- a/deps/v8/test/mjsunit/wasm/float-constant-folding.js
+++ b/deps/v8/test/mjsunit/wasm/float-constant-folding.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
diff --git a/deps/v8/test/mjsunit/wasm/function-names.js b/deps/v8/test/mjsunit/wasm/function-names.js
index fe7c401177..4bb8e31998 100644
--- a/deps/v8/test/mjsunit/wasm/function-names.js
+++ b/deps/v8/test/mjsunit/wasm/function-names.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/function-prototype.js b/deps/v8/test/mjsunit/wasm/function-prototype.js
index c2f1edd5c4..f3a99716a0 100644
--- a/deps/v8/test/mjsunit/wasm/function-prototype.js
+++ b/deps/v8/test/mjsunit/wasm/function-prototype.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestFunctionPrototype() {
diff --git a/deps/v8/test/mjsunit/wasm/futex.js b/deps/v8/test/mjsunit/wasm/futex.js
index 1ebb3f65ce..640334bbc6 100644
--- a/deps/v8/test/mjsunit/wasm/futex.js
+++ b/deps/v8/test/mjsunit/wasm/futex.js
@@ -7,7 +7,6 @@
'use strict';
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function WasmAtomicWake(memory, offset, index, num) {
@@ -114,6 +113,42 @@ function WasmI64AtomicWait(memory, offset, index, val_low,
});
})();
+(function TestInvalidAlignment() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+
+ // Wait and wake must be 4 byte aligned.
+ [1, 2, 3].forEach(function(invalid) {
+ assertThrows(function() {
+ WasmAtomicWake(memory, invalid, 0, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmAtomicWake(memory, 0, invalid, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmI32AtomicWait(memory, invalid, 0, 0, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmI32AtomicWait(memory, 0, invalid, 0, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, invalid, 0, 0, 0, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, 0, invalid, 0, 0, -1)
+ }, Error);
+ });
+
+ //WasmI64AtomicWait must be 8 byte aligned.
+ [4, 5, 6, 7].forEach(function(invalid) {
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, 0, invalid, 0, 0, -1)
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, invalid, 0, 0, 0, -1)
+ }, Error);
+ });
+})();
+
(function TestI32WaitTimeout() {
let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
var waitMs = 100;
@@ -181,7 +216,6 @@ if (this.Worker) {
const numWorkers = 4;
let workerScript = `onmessage = function(msg) {
- load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
${WasmI32AtomicWait.toString()}
${WasmI64AtomicWait.toString()}
diff --git a/deps/v8/test/mjsunit/wasm/gc-buffer.js b/deps/v8/test/mjsunit/wasm/gc-buffer.js
index 42cf6ae5ff..d8aa9a86d8 100644
--- a/deps/v8/test/mjsunit/wasm/gc-buffer.js
+++ b/deps/v8/test/mjsunit/wasm/gc-buffer.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --gc-interval=500 --stress-compaction --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function run(f) {
diff --git a/deps/v8/test/mjsunit/wasm/gc-frame.js b/deps/v8/test/mjsunit/wasm/gc-frame.js
index c5ff0ff53e..de8bdab51e 100644
--- a/deps/v8/test/mjsunit/wasm/gc-frame.js
+++ b/deps/v8/test/mjsunit/wasm/gc-frame.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function makeFFI(func, t) {
diff --git a/deps/v8/test/mjsunit/wasm/gc-stress.js b/deps/v8/test/mjsunit/wasm/gc-stress.js
index db425ea81d..8daff420da 100644
--- a/deps/v8/test/mjsunit/wasm/gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/gc-stress.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --gc-interval=500 --stress-compaction
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function run(f) {
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
index 7b2ffee7ef..b29993a8fc 100644
--- a/deps/v8/test/mjsunit/wasm/globals.js
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestMultipleInstances() {
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
index 1bc90fd417..aa50e6cf77 100644
--- a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --no-wait-for-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function CompileFunctionsTest() {
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
index c40bf2f11d..17c6803784 100644
--- a/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
@@ -4,7 +4,6 @@
// Flags: --no-wait-for-wasm --wasm-tier-up
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function ShutdownDuringTierUp() {
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
index 3581f47202..9ab2334a63 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let module = (() => {
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
index c73f0762ec..93bb56d83d 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --stress-compaction
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var initialMemoryPages = 1;
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index a87e123501..1790f9760f 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --stress-compaction
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var initialMemoryPages = 1;
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
index 908f966017..ed04e23c63 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --stress-compaction
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let initialPages = 1;
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index 3ecdb9aa1e..0e5618a2b1 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --stress-compaction
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/huge-memory.js b/deps/v8/test/mjsunit/wasm/huge-memory.js
index 188805bb5f..bf037b0c92 100644
--- a/deps/v8/test/mjsunit/wasm/huge-memory.js
+++ b/deps/v8/test/mjsunit/wasm/huge-memory.js
@@ -5,7 +5,6 @@
// Flags: --wasm-max-mem-pages=49152
// This test makes sure things don't break once we support >2GB wasm memories.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testHugeMemory() {
diff --git a/deps/v8/test/mjsunit/wasm/import-function.js b/deps/v8/test/mjsunit/wasm/import-function.js
index 9ed234b768..ec187aff4a 100644
--- a/deps/v8/test/mjsunit/wasm/import-function.js
+++ b/deps/v8/test/mjsunit/wasm/import-function.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallImport(func, check) {
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
index f099af56b4..fc688dc7ce 100644
--- a/deps/v8/test/mjsunit/wasm/import-memory.js
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// V8 internal memory size limit.
diff --git a/deps/v8/test/mjsunit/wasm/import-mutable-global.js b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
index a1133ee3f7..715549a41f 100644
--- a/deps/v8/test/mjsunit/wasm/import-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
@@ -4,7 +4,6 @@
// Flags: --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestBasic() {
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index 881f0b3b2c..b5a756aa4b 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function addConstFunc(builder, val) {
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 5abb346879..067d6884d7 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function Test1() {
diff --git a/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js b/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
index 5e6e7169a1..9e8ddac1c5 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-sig-mismatch.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kTableSize = 3;
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 642bd953bf..c4edd71f14 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function AddFunctions(builder) {
diff --git a/deps/v8/test/mjsunit/wasm/instance-gc.js b/deps/v8/test/mjsunit/wasm/instance-gc.js
index e5dd4edaf9..aed9589e51 100644
--- a/deps/v8/test/mjsunit/wasm/instance-gc.js
+++ b/deps/v8/test/mjsunit/wasm/instance-gc.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let nogc = () => {};
diff --git a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
index dc1ca24a85..29b65bc9b8 100644
--- a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// This test verifies that when instances are exported, Gc'ed, the other
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index d2489f3e89..1c5f10a832 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let kReturnValue = 17;
@@ -97,11 +96,8 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
print('InvalidBinaryAsyncCompilation...');
let builder = new WasmModuleBuilder();
builder.addFunction('f', kSig_i_i).addBody([kExprCallFunction, 0]);
- let promise = WebAssembly.compile(builder.toBuffer());
- assertPromiseResult(
- promise, compiled => assertUnreachable(
- 'should not be able to compile invalid blob.'),
- e => assertInstanceof(e, WebAssembly.CompileError));
+ assertThrowsAsync(
+ WebAssembly.compile(builder.toBuffer()), WebAssembly.CompileError);
})();
// Multiple instances tests.
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
index b0016ec9aa..ead01df352 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const kReturnValue = 15;
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index 4a84e13414..b5665d1654 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// =============================================================================
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index d9a1751408..c11e593115 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -4,7 +4,6 @@
// Flags: --wasm-interpret-all --allow-natives-syntax --expose-gc
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// The stack trace contains file path, only keep "interpreter.js".
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 5054fd73be..efc0150992 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -2,19 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --allow-natives-syntax
+// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
-function unexpectedSuccess() {
- %AbortJS('unexpected success');
-}
-
-function unexpectedFail(error) {
- %AbortJS('unexpected fail: ' + error);
-}
-
function assertEq(val, expected) {
assertSame(expected, val);
}
@@ -26,13 +17,6 @@ function assertArrayBuffer(val, expected) {
assertEq(expected[i], input[i]);
}
}
-function wasmIsSupported() {
- return (typeof WebAssembly.Module) == 'function';
-}
-function assertErrorMessage(func, type, msg) {
- // TODO assertThrows(func, type, msg);
- assertThrows(func, type);
-}
let emptyModuleBinary = (() => {
var builder = new WasmModuleBuilder();
@@ -147,25 +131,25 @@ let Module = WebAssembly.Module;
assertEq(Module, moduleDesc.value);
assertEq(Module.length, 1);
assertEq(Module.name, 'Module');
-assertErrorMessage(
- () => Module(), TypeError, /constructor without new is forbidden/);
-assertErrorMessage(
- () => new Module(), TypeError, /requires more than 0 arguments/);
-assertErrorMessage(
+assertThrows(
+ () => Module(), TypeError, /must be invoked with 'new'/);
+assertThrows(
+ () => new Module(), TypeError, /Argument 0 must be a buffer source/);
+assertThrows(
() => new Module(undefined), TypeError,
- 'first argument must be an ArrayBuffer or typed array object');
-assertErrorMessage(
+ 'WebAssembly.Module(): Argument 0 must be a buffer source');
+assertThrows(
() => new Module(1), TypeError,
- 'first argument must be an ArrayBuffer or typed array object');
-assertErrorMessage(
+ 'WebAssembly.Module(): Argument 0 must be a buffer source');
+assertThrows(
() => new Module({}), TypeError,
- 'first argument must be an ArrayBuffer or typed array object');
-assertErrorMessage(
+ 'WebAssembly.Module(): Argument 0 must be a buffer source');
+assertThrows(
() => new Module(new Uint8Array()), CompileError,
- /failed to match magic number/);
-assertErrorMessage(
+ /BufferSource argument is empty/);
+assertThrows(
() => new Module(new ArrayBuffer()), CompileError,
- /failed to match magic number/);
+ /BufferSource argument is empty/);
assertTrue(new Module(emptyModuleBinary) instanceof Module);
assertTrue(new Module(emptyModuleBinary.buffer) instanceof Module);
@@ -200,14 +184,14 @@ assertTrue(moduleImportsDesc.configurable);
// 'WebAssembly.Module.imports' method
let moduleImports = moduleImportsDesc.value;
assertEq(moduleImports.length, 1);
-assertErrorMessage(
- () => moduleImports(), TypeError, /requires more than 0 arguments/);
-assertErrorMessage(
+assertThrows(
+ () => moduleImports(), TypeError, /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleImports(undefined), TypeError,
- /first argument must be a WebAssembly.Module/);
-assertErrorMessage(
+ /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleImports({}), TypeError,
- /first argument must be a WebAssembly.Module/);
+ /Argument 0 must be a WebAssembly.Module/);
var arr = moduleImports(new Module(emptyModuleBinary));
assertTrue(arr instanceof Array);
assertEq(arr.length, 0);
@@ -247,14 +231,14 @@ assertTrue(moduleExportsDesc.configurable);
// 'WebAssembly.Module.exports' method
let moduleExports = moduleExportsDesc.value;
assertEq(moduleExports.length, 1);
-assertErrorMessage(
- () => moduleExports(), TypeError, /requires more than 0 arguments/);
-assertErrorMessage(
+assertThrows(
+ () => moduleExports(), TypeError, /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleExports(undefined), TypeError,
- /first argument must be a WebAssembly.Module/);
-assertErrorMessage(
+ /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleExports({}), TypeError,
- /first argument must be a WebAssembly.Module/);
+ /Argument 0 must be a WebAssembly.Module/);
var arr = moduleExports(emptyModule);
assertTrue(arr instanceof Array);
assertEq(arr.length, 0);
@@ -292,21 +276,21 @@ assertTrue(moduleCustomSectionsDesc.configurable);
let moduleCustomSections = moduleCustomSectionsDesc.value;
assertEq(moduleCustomSections.length, 2);
-assertErrorMessage(
- () => moduleCustomSections(), TypeError, /requires more than 0 arguments/);
-assertErrorMessage(
+assertThrows(
+ () => moduleCustomSections(), TypeError, /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleCustomSections(undefined), TypeError,
- /first argument must be a WebAssembly.Module/);
-assertErrorMessage(
+ /Argument 0 must be a WebAssembly.Module/);
+assertThrows(
() => moduleCustomSections({}), TypeError,
- /first argument must be a WebAssembly.Module/);
+ /Argument 0 must be a WebAssembly.Module/);
var arr = moduleCustomSections(emptyModule, 'x');
assertEq(arr instanceof Array, true);
assertEq(arr.length, 0);
-assertErrorMessage(
+assertThrows(
() => moduleCustomSections(1), TypeError,
- 'first argument must be a WebAssembly.Module');
+ 'WebAssembly.Module.customSections(): Argument 0 must be a WebAssembly.Module');
let customSectionModuleBinary2 = (() => {
let builder = new WasmModuleBuilder();
@@ -351,24 +335,24 @@ assertEq(Instance, instanceDesc.value);
assertEq(Instance.length, 1);
assertEq(Instance.name, 'Instance');
-assertErrorMessage(
- () => Instance(), TypeError, /constructor without new is forbidden/);
-assertErrorMessage(
+assertThrows(
+ () => Instance(), TypeError, /must be invoked with 'new'/);
+assertThrows(
() => new Instance(1), TypeError,
- 'first argument must be a WebAssembly.Module');
-assertErrorMessage(
+ 'WebAssembly.Instance(): Argument 0 must be a WebAssembly.Module');
+assertThrows(
() => new Instance({}), TypeError,
- 'first argument must be a WebAssembly.Module');
-assertErrorMessage(
+ 'WebAssembly.Instance(): Argument 0 must be a WebAssembly.Module');
+assertThrows(
() => new Instance(emptyModule, null), TypeError,
- 'second argument must be an object');
-assertErrorMessage(() => new Instance(importingModule, null), TypeError, '');
-assertErrorMessage(
- () => new Instance(importingModule, undefined), TypeError, '');
-assertErrorMessage(
- () => new Instance(importingModule, {'': {g: () => {}}}), LinkError, '');
-assertErrorMessage(
- () => new Instance(importingModule, {t: {f: () => {}}}), TypeError, '');
+ 'WebAssembly.Instance(): Argument 1 must be an object');
+assertThrows(() => new Instance(importingModule, null), TypeError);
+assertThrows(
+ () => new Instance(importingModule, undefined), TypeError);
+assertThrows(
+ () => new Instance(importingModule, {'': {g: () => {}}}), LinkError);
+assertThrows(
+ () => new Instance(importingModule, {t: {f: () => {}}}), TypeError);
assertTrue(new Instance(emptyModule) instanceof Instance);
assertTrue(new Instance(emptyModule, {}) instanceof Instance);
@@ -412,7 +396,7 @@ assertTrue(f instanceof Function);
assertEq(f.length, 0);
assertTrue('name' in f);
assertEq(Function.prototype.call.call(f), 42);
-assertErrorMessage(() => new f(), TypeError, /is not a constructor/);
+assertThrows(() => new f(), TypeError, /is not a constructor/);
// 'WebAssembly.Memory' data property
let memoryDesc = Object.getOwnPropertyDescriptor(WebAssembly, 'Memory');
@@ -426,27 +410,27 @@ let Memory = WebAssembly.Memory;
assertEq(Memory, memoryDesc.value);
assertEq(Memory.length, 1);
assertEq(Memory.name, 'Memory');
-assertErrorMessage(
- () => Memory(), TypeError, /constructor without new is forbidden/);
-assertErrorMessage(
+assertThrows(
+ () => Memory(), TypeError, /must be invoked with 'new'/);
+assertThrows(
() => new Memory(1), TypeError,
- 'first argument must be a memory descriptor');
-assertErrorMessage(
+ 'WebAssembly.Memory(): Argument 0 must be a memory descriptor');
+assertThrows(
() => new Memory({initial: {valueOf() { throw new Error('here') }}}), Error,
'here');
-assertErrorMessage(
- () => new Memory({initial: -1}), TypeError, /bad Memory initial size/);
-assertErrorMessage(
+assertThrows(
+ () => new Memory({initial: -1}), TypeError, /must be non-negative/);
+assertThrows(
() => new Memory({initial: Math.pow(2, 32)}), TypeError,
- /bad Memory initial size/);
-assertErrorMessage(
+ /must be in the unsigned long range/);
+assertThrows(
() => new Memory({initial: 1, maximum: Math.pow(2, 32) / Math.pow(2, 14)}),
- RangeError, /bad Memory maximum size/);
-assertErrorMessage(
+ RangeError, /is above the upper bound/);
+assertThrows(
() => new Memory({initial: 2, maximum: 1}), RangeError,
- /bad Memory maximum size/);
-assertErrorMessage(
- () => new Memory({maximum: -1}), TypeError, /bad Memory maximum size/);
+ /is below the lower bound/);
+assertThrows(
+ () => new Memory({maximum: -1}), TypeError, /'initial' is required/);
assertTrue(new Memory({initial: 1}) instanceof Memory);
assertEq(new Memory({initial: 1.5}).buffer.byteLength, kPageSize);
@@ -478,10 +462,10 @@ assertTrue(bufferDesc.configurable);
// 'WebAssembly.Memory.prototype.buffer' getter
let bufferGetter = bufferDesc.get;
-assertErrorMessage(
- () => bufferGetter.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => bufferGetter.call({}), TypeError, /called on incompatible Object/);
+assertThrows(
+ () => bufferGetter.call(), TypeError, /Receiver is not a WebAssembly.Memory/);
+assertThrows(
+ () => bufferGetter.call({}), TypeError, /Receiver is not a WebAssembly.Memory/);
assertTrue(bufferGetter.call(mem1) instanceof ArrayBuffer);
assertEq(bufferGetter.call(mem1).byteLength, kPageSize);
@@ -495,13 +479,13 @@ assertTrue(memGrowDesc.configurable);
let memGrow = memGrowDesc.value;
assertEq(memGrow.length, 1);
-assertErrorMessage(
- () => memGrow.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => memGrow.call({}), TypeError, /called on incompatible Object/);
-assertErrorMessage(
+assertThrows(
+ () => memGrow.call(), TypeError, /Receiver is not a WebAssembly.Memory/);
+assertThrows(
+ () => memGrow.call({}), TypeError, /Receiver is not a WebAssembly.Memory/);
+assertThrows(
() => memGrow.call(mem1, -1), TypeError, /must be non-negative/);
-assertErrorMessage(
+assertThrows(
() => memGrow.call(mem1, Math.pow(2, 32)), TypeError,
/must be in the unsigned long range/);
var mem = new Memory({initial: 1, maximum: 2});
@@ -522,16 +506,16 @@ assertTrue(buf !== mem.buffer);
assertEq(buf.byteLength, 0);
buf = mem.buffer;
assertEq(buf.byteLength, 2 * kPageSize);
-assertErrorMessage(() => mem.grow(1), Error, /failed to grow memory/);
-assertErrorMessage(() => mem.grow(Infinity), Error, /failed to grow memory/);
-assertErrorMessage(() => mem.grow(-Infinity), Error, /failed to grow memory/);
+assertThrows(() => mem.grow(1), Error, /Maximum memory size exceeded/);
+assertThrows(() => mem.grow(Infinity), Error, /must be convertible to a valid number/);
+assertThrows(() => mem.grow(-Infinity), Error, /must be convertible to a valid number/);
assertEq(buf, mem.buffer);
let throwOnValueOf = {
valueOf: function() {
throw Error('throwOnValueOf')
}
};
-assertErrorMessage(() => mem.grow(throwOnValueOf), Error, /throwOnValueOf/);
+assertThrows(() => mem.grow(throwOnValueOf), Error, /throwOnValueOf/);
assertEq(buf, mem.buffer);
let zero_wrapper = {
valueOf: function() {
@@ -566,41 +550,41 @@ let Table = WebAssembly.Table;
assertEq(Table, tableDesc.value);
assertEq(Table.length, 1);
assertEq(Table.name, 'Table');
-assertErrorMessage(
- () => Table(), TypeError, /constructor without new is forbidden/);
-assertErrorMessage(
- () => new Table(1), TypeError, 'first argument must be a table descriptor');
-assertErrorMessage(
- () => new Table({initial: 1, element: 1}), TypeError, /must be "anyfunc"/);
-assertErrorMessage(
+assertThrows(
+ () => Table(), TypeError, /must be invoked with 'new'/);
+assertThrows(
+ () => new Table(1), TypeError, 'WebAssembly.Module(): Argument 0 must be a table descriptor');
+assertThrows(
+ () => new Table({initial: 1, element: 1}), TypeError, /must be 'anyfunc'/);
+assertThrows(
() => new Table({initial: 1, element: 'any'}), TypeError,
- /must be "anyfunc"/);
-assertErrorMessage(
+ /must be 'anyfunc'/);
+assertThrows(
() => new Table({initial: 1, element: {valueOf() { return 'anyfunc' }}}),
- TypeError, /must be "anyfunc"/);
-assertErrorMessage(
+ TypeError, /must be 'anyfunc'/);
+assertThrows(
() => new Table(
{initial: {valueOf() { throw new Error('here') }}, element: 'anyfunc'}),
Error, 'here');
-assertErrorMessage(
+assertThrows(
() => new Table({initial: -1, element: 'anyfunc'}), TypeError,
- /bad Table initial size/);
-assertErrorMessage(
+ /must be non-negative/);
+assertThrows(
() => new Table({initial: Math.pow(2, 32), element: 'anyfunc'}), TypeError,
- /bad Table initial size/);
-assertErrorMessage(
+ /must be in the unsigned long range/);
+assertThrows(
() => new Table({initial: 2, maximum: 1, element: 'anyfunc'}), RangeError,
- /bad Table maximum size/);
-assertErrorMessage(
+ /is below the lower bound/);
+assertThrows(
() => new Table({initial: 2, maximum: Math.pow(2, 32), element: 'anyfunc'}),
- TypeError, /bad Table maximum size/);
+ TypeError, /must be in the unsigned long range/);
assertTrue(new Table({initial: 1, element: 'anyfunc'}) instanceof Table);
assertTrue(new Table({initial: 1.5, element: 'anyfunc'}) instanceof Table);
assertTrue(
new Table({initial: 1, maximum: 1.5, element: 'anyfunc'}) instanceof Table);
-assertTrue(
- new Table({initial: 1, maximum: Math.pow(2, 32) - 1, element: 'anyfunc'})
- instanceof Table);
+assertThrows(
+ () => new Table({initial: 1, maximum: Math.pow(2, 32) - 1, element: 'anyfunc'}),
+ RangeError, /above the upper bound/);
// 'WebAssembly.Table.prototype' data property
let tableProtoDesc = Object.getOwnPropertyDescriptor(Table, 'prototype');
@@ -631,10 +615,10 @@ assertTrue(lengthDesc.configurable);
// 'WebAssembly.Table.prototype.length' getter
let lengthGetter = lengthDesc.get;
assertEq(lengthGetter.length, 0);
-assertErrorMessage(
- () => lengthGetter.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => lengthGetter.call({}), TypeError, /called on incompatible Object/);
+assertThrows(
+ () => lengthGetter.call(), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => lengthGetter.call({}), TypeError, /Receiver is not a WebAssembly.Table/);
assertEq(typeof lengthGetter.call(tbl1), 'number');
assertEq(lengthGetter.call(tbl1), 2);
@@ -647,23 +631,24 @@ assertTrue(getDesc.configurable);
// 'WebAssembly.Table.prototype.get' method
let get = getDesc.value;
assertEq(get.length, 1);
-assertErrorMessage(
- () => get.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => get.call({}), TypeError, /called on incompatible Object/);
-assertErrorMessage(
+assertThrows(
+ () => get.call(), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => get.call({}), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
() => get.call(tbl1), TypeError, /must be convertible to a valid number/);
assertEq(get.call(tbl1, 0), null);
assertEq(get.call(tbl1, 0, Infinity), null);
assertEq(get.call(tbl1, 1), null);
assertEq(get.call(tbl1, 1.5), null);
-assertErrorMessage(() => get.call(tbl1, 2), RangeError, /bad Table get index/);
-assertErrorMessage(
- () => get.call(tbl1, 2.5), RangeError, /bad Table get index/);
-assertErrorMessage(() => get.call(tbl1, -1), TypeError, /bad Table get index/);
-assertErrorMessage(
- () => get.call(tbl1, Math.pow(2, 33)), TypeError, /bad Table get index/);
-assertErrorMessage(
+assertThrows(() => get.call(tbl1, 2), RangeError, /Index out of bounds/);
+assertThrows(
+ () => get.call(tbl1, 2.5), RangeError, /Index out of bounds/);
+assertThrows(() => get.call(tbl1, -1), TypeError, /must be non-negative/);
+assertThrows(
+ () => get.call(tbl1, Math.pow(2, 33)), TypeError,
+ /must be in the unsigned long range/);
+assertThrows(
() => get.call(tbl1, {valueOf() { throw new Error('hi') }}), Error, 'hi');
// 'WebAssembly.Table.prototype.set' data property
@@ -675,46 +660,48 @@ assertTrue(setDesc.configurable);
// 'WebAssembly.Table.prototype.set' method
let set = setDesc.value;
assertEq(set.length, 2);
-assertErrorMessage(
- () => set.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => set.call({}), TypeError, /called on incompatible Object/);
-assertErrorMessage(
- () => set.call(tbl1, 0), TypeError, /requires more than 1 argument/);
-assertErrorMessage(
+assertThrows(
+ () => set.call(), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => set.call({}), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => set.call(tbl1, 0), TypeError, /must be null or a WebAssembly function/);
+assertThrows(
() => set.call(tbl1, undefined), TypeError,
- /requires more than 1 argument/);
-assertErrorMessage(
- () => set.call(tbl1, 2, null), RangeError, /bad Table set index/);
-assertErrorMessage(
- () => set.call(tbl1, -1, null), TypeError, /bad Table set index/);
-assertErrorMessage(
+ /must be convertible to a valid number/);
+assertThrows(
+ () => set.call(tbl1, 2, null), RangeError, /index out of bounds/);
+assertThrows(
+ () => set.call(tbl1, -1, null), TypeError, /must be non-negative/);
+assertThrows(
() => set.call(tbl1, Math.pow(2, 33), null), TypeError,
- /bad Table set index/);
-assertErrorMessage(
- () => set.call(tbl1, Infinity, null), TypeError, /bad Table set index/);
-assertErrorMessage(
- () => set.call(tbl1, -Infinity, null), TypeError, /bad Table set index/);
-assertErrorMessage(
+ /must be in the unsigned long range/);
+assertThrows(
+ () => set.call(tbl1, Infinity, null), TypeError,
+ /must be convertible to a valid number/);
+assertThrows(
+ () => set.call(tbl1, -Infinity, null), TypeError,
+ /must be convertible to a valid number/);
+assertThrows(
() => set.call(tbl1, 0, undefined), TypeError,
- /can only assign WebAssembly exported functions to Table/);
-assertErrorMessage(
+ /must be null or a WebAssembly function/);
+assertThrows(
() => set.call(tbl1, undefined, undefined), TypeError,
- /can only assign WebAssembly exported functions to Table/);
-assertErrorMessage(
+ /must be convertible to a valid number/);
+assertThrows(
() => set.call(tbl1, 0, {}), TypeError,
- /can only assign WebAssembly exported functions to Table/);
-assertErrorMessage(() => set.call(tbl1, 0, function() {
-}), TypeError, /can only assign WebAssembly exported functions to Table/);
-assertErrorMessage(
+ /must be null or a WebAssembly function/);
+assertThrows(() => set.call(tbl1, 0, function() {
+}), TypeError, /must be null or a WebAssembly function/);
+assertThrows(
() => set.call(tbl1, 0, Math.sin), TypeError,
- /can only assign WebAssembly exported functions to Table/);
-assertErrorMessage(
+ /must be null or a WebAssembly function/);
+assertThrows(
() => set.call(tbl1, {valueOf() { throw Error('hai') }}, null), Error,
'hai');
assertEq(set.call(tbl1, 0, null), undefined);
assertEq(set.call(tbl1, 1, null), undefined);
-assertErrorMessage(
+assertThrows(
() => set.call(tbl1, undefined, null), TypeError,
/must be convertible to a valid number/);
@@ -727,35 +714,35 @@ assertTrue(tblGrowDesc.configurable);
// 'WebAssembly.Table.prototype.grow' method
let tblGrow = tblGrowDesc.value;
assertEq(tblGrow.length, 1);
-assertErrorMessage(
- () => tblGrow.call(), TypeError, /called on incompatible undefined/);
-assertErrorMessage(
- () => tblGrow.call({}), TypeError, /called on incompatible Object/);
-assertErrorMessage(
- () => tblGrow.call(tbl1, -1), TypeError, /bad Table grow delta/);
-assertErrorMessage(
+assertThrows(
+ () => tblGrow.call(), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => tblGrow.call({}), TypeError, /Receiver is not a WebAssembly.Table/);
+assertThrows(
+ () => tblGrow.call(tbl1, -1), TypeError, /must be non-negative/);
+assertThrows(
() => tblGrow.call(tbl1, Math.pow(2, 32)), TypeError,
- /bad Table grow delta/);
+ /must be in the unsigned long range/);
var tbl = new Table({element: 'anyfunc', initial: 1, maximum: 2});
assertEq(tbl.length, 1);
-assertErrorMessage(
- () => tbl.grow(Infinity), TypeError, /failed to grow table/);
-assertErrorMessage(
- () => tbl.grow(-Infinity), TypeError, /failed to grow table/);
+assertThrows(
+ () => tbl.grow(Infinity), TypeError, /must be convertible to a valid number/);
+assertThrows(
+ () => tbl.grow(-Infinity), TypeError, /must be convertible to a valid number/);
assertEq(tbl.grow(0), 1);
assertEq(tbl.length, 1);
assertEq(tbl.grow(1, 4), 1);
assertEq(tbl.length, 2);
assertEq(tbl.length, 2);
-assertErrorMessage(() => tbl.grow(1), Error, /failed to grow table/);
-assertErrorMessage(
- () => tbl.grow(Infinity), TypeError, /failed to grow table/);
-assertErrorMessage(
- () => tbl.grow(-Infinity), TypeError, /failed to grow table/);
+assertThrows(() => tbl.grow(1), Error, /maximum table size exceeded/);
+assertThrows(
+ () => tbl.grow(Infinity), TypeError, /must be convertible to a valid number/);
+assertThrows(
+ () => tbl.grow(-Infinity), TypeError, /must be convertible to a valid number/);
// 'WebAssembly.validate' function
-assertErrorMessage(() => WebAssembly.validate(), TypeError);
-assertErrorMessage(() => WebAssembly.validate('hi'), TypeError);
+assertThrows(() => WebAssembly.validate(), TypeError);
+assertThrows(() => WebAssembly.validate('hi'), TypeError);
assertTrue(WebAssembly.validate(emptyModuleBinary));
// TODO: other ways for validate to return false.
assertFalse(WebAssembly.validate(moduleBinaryImporting2Memories));
@@ -774,22 +761,18 @@ assertEq(compile, compileDesc.value);
assertEq(compile.length, 1);
assertEq(compile.name, 'compile');
function assertCompileError(args, err, msg) {
- var error = null;
- assertPromiseResult(compile(...args), unexpectedSuccess, error => {
- assertTrue(error instanceof err);
- // TODO assertTrue(Boolean(error.message.match(msg)));
- });
+ assertThrowsAsync(compile(...args), err /* TODO , msg */);
}
assertCompileError([], TypeError, /requires more than 0 arguments/);
assertCompileError(
[undefined], TypeError,
- /first argument must be an ArrayBuffer or typed array object/);
+ /Argument 0 must be a buffer source/);
assertCompileError(
[1], TypeError,
- /first argument must be an ArrayBuffer or typed array object/);
+ /Argument 0 must be a buffer source/);
assertCompileError(
[{}], TypeError,
- /first argument must be an ArrayBuffer or typed array object/);
+ /Argument 0 must be a buffer source/);
assertCompileError(
[new Uint8Array()], CompileError, /BufferSource argument is empty/);
assertCompileError(
@@ -820,11 +803,7 @@ assertEq(instantiate, instantiateDesc.value);
assertEq(instantiate.length, 1);
assertEq(instantiate.name, 'instantiate');
function assertInstantiateError(args, err, msg) {
- var error = null;
- assertPromiseResult(instantiate(...args), unexpectedSuccess, error => {
- assertTrue(error instanceof err);
- // TODO assertTrue(Boolean(error.message.match(msg)));
- });
+ assertThrowsAsync(instantiate(...args), err /* TODO , msg */);
}
var scratch_memory = new WebAssembly.Memory({ initial: 0 });
assertInstantiateError([], TypeError, /requires more than 0 arguments/);
diff --git a/deps/v8/test/mjsunit/wasm/large-offset.js b/deps/v8/test/mjsunit/wasm/large-offset.js
index 653194c159..919891ab99 100644
--- a/deps/v8/test/mjsunit/wasm/large-offset.js
+++ b/deps/v8/test/mjsunit/wasm/large-offset.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testMemoryGrowOutOfBoundsOffset() {
diff --git a/deps/v8/test/mjsunit/wasm/lazy-compilation.js b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
index fc41fbd622..c7cd40d05d 100644
--- a/deps/v8/test/mjsunit/wasm/lazy-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/lazy-compilation.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation --allow-natives-syntax
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function importFromOtherInstance() {
diff --git a/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js b/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
index d24b5a3b22..3ce74816ea 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff-trap-handler.js
@@ -7,7 +7,6 @@
// A simple test to make sure Liftoff can compile memory operations with trap
// handlers enabled.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testCompileLoadStore() {
diff --git a/deps/v8/test/mjsunit/wasm/liftoff.js b/deps/v8/test/mjsunit/wasm/liftoff.js
index 66fa5b70ef..51b30878d3 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --liftoff --no-future --no-wasm-tier-up
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function testLiftoffFlag() {
diff --git a/deps/v8/test/mjsunit/wasm/loop-rotation.js b/deps/v8/test/mjsunit/wasm/loop-rotation.js
index da7a45c4d0..92ad1f31c7 100644
--- a/deps/v8/test/mjsunit/wasm/loop-rotation.js
+++ b/deps/v8/test/mjsunit/wasm/loop-rotation.js
@@ -4,7 +4,6 @@
// Flags: --turbo-loop-rotation --noliftoff --nowasm-tier-up
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestTrivialLoop1() {
diff --git a/deps/v8/test/mjsunit/wasm/many-parameters.js b/deps/v8/test/mjsunit/wasm/many-parameters.js
index a56619a6ad..46b231943d 100644
--- a/deps/v8/test/mjsunit/wasm/many-parameters.js
+++ b/deps/v8/test/mjsunit/wasm/many-parameters.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let types = [kWasmI32, kWasmF32, kWasmF64];
diff --git a/deps/v8/test/mjsunit/wasm/memory-external-call.js b/deps/v8/test/mjsunit/wasm/memory-external-call.js
index 2af5888daa..853cdf616a 100644
--- a/deps/v8/test/mjsunit/wasm/memory-external-call.js
+++ b/deps/v8/test/mjsunit/wasm/memory-external-call.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let initialMemoryPages = 1;
diff --git a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
index c7aa32e4d5..ef65840532 100644
--- a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
+++ b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// This test verifies that when instances are exported, Gc'ed, the other
diff --git a/deps/v8/test/mjsunit/wasm/memory-size.js b/deps/v8/test/mjsunit/wasm/memory-size.js
index f803df2e3d..b83a424285 100644
--- a/deps/v8/test/mjsunit/wasm/memory-size.js
+++ b/deps/v8/test/mjsunit/wasm/memory-size.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB
diff --git a/deps/v8/test/mjsunit/wasm/memory.js b/deps/v8/test/mjsunit/wasm/memory.js
index 3bfb052808..f266dd4a85 100644
--- a/deps/v8/test/mjsunit/wasm/memory.js
+++ b/deps/v8/test/mjsunit/wasm/memory.js
@@ -4,7 +4,7 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
// Basic tests.
diff --git a/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
index f9593e84f7..f2b22d97ab 100644
--- a/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
@@ -4,7 +4,6 @@
// Flags: --wasm-max-mem-pages=16384
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
diff --git a/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
index db344f30f3..6baf0f3c7e 100644
--- a/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
@@ -4,7 +4,6 @@
// Flags: --wasm-max-mem-pages=32768
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
diff --git a/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
index d5cb006a79..39b9f95d9c 100644
--- a/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
+++ b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index 2ca5d9a725..8bc96f7ea0 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc --stress-compaction --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var kMemSize = 65536;
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
index d6eff16293..1948801958 100644
--- a/deps/v8/test/mjsunit/wasm/multi-value.js
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-mv
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function MultiBlockResultTest() {
diff --git a/deps/v8/test/mjsunit/wasm/names.js b/deps/v8/test/mjsunit/wasm/names.js
index 8b635e6771..4904d67fb5 100644
--- a/deps/v8/test/mjsunit/wasm/names.js
+++ b/deps/v8/test/mjsunit/wasm/names.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
function toBytes(string) {
diff --git a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
index d41f581e13..b9ce6f7f94 100644
--- a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
+++ b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
@@ -4,7 +4,6 @@
// Flags: --noexperimental-wasm-threads --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function instantiateModuleWithThreads() {
diff --git a/deps/v8/test/mjsunit/wasm/parallel_compilation.js b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
index 208232cfd4..6eca124bc4 100644
--- a/deps/v8/test/mjsunit/wasm/parallel_compilation.js
+++ b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-num-compilation-tasks=10
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function assertModule(module, memsize) {
diff --git a/deps/v8/test/mjsunit/wasm/params.js b/deps/v8/test/mjsunit/wasm/params.js
index e964cee686..33858429c4 100644
--- a/deps/v8/test/mjsunit/wasm/params.js
+++ b/deps/v8/test/mjsunit/wasm/params.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function runSelect2(select, which, a, b) {
diff --git a/deps/v8/test/mjsunit/wasm/print-code.js b/deps/v8/test/mjsunit/wasm/print-code.js
index 2d35a27559..c604ca75f0 100644
--- a/deps/v8/test/mjsunit/wasm/print-code.js
+++ b/deps/v8/test/mjsunit/wasm/print-code.js
@@ -7,7 +7,6 @@
// Just test that printing the code of the following wasm modules does not
// crash.
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function print_deserialized_code() {
diff --git a/deps/v8/test/mjsunit/wasm/receiver.js b/deps/v8/test/mjsunit/wasm/receiver.js
index b3373c11ec..10e8855927 100644
--- a/deps/v8/test/mjsunit/wasm/receiver.js
+++ b/deps/v8/test/mjsunit/wasm/receiver.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallImport(func, expected, a, b) {
diff --git a/deps/v8/test/mjsunit/wasm/serialize-lazy-module.js b/deps/v8/test/mjsunit/wasm/serialize-lazy-module.js
index 98998c6f43..86b6855fc1 100644
--- a/deps/v8/test/mjsunit/wasm/serialize-lazy-module.js
+++ b/deps/v8/test/mjsunit/wasm/serialize-lazy-module.js
@@ -4,7 +4,6 @@
// Flags: --wasm-lazy-compilation --allow-natives-syntax --expose-gc
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
(function SerializeUncompiledModule() {
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory.js b/deps/v8/test/mjsunit/wasm/shared-memory.js
index bbe89a3fe5..80e894b28f 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-threads
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function assertMemoryIsValid(memory, shared) {
@@ -130,3 +129,17 @@ function assertMemoryIsValid(memory, shared) {
assertEquals(0, instance.exports.main(0, 0x11111111));
assertEquals(0x11111111, instance.exports.main(0, 0x11111111));
})();
+
+(function TestMemoryConstructorShouldNotCallHasProperty() {
+ print(arguments.callee.name);
+ // from test/wasm-js/data/test/js-api/memory/constructor.any.js
+ const proxy = new Proxy({}, {
+ has(o, x) {
+ throw new Error(`Should not call [[HasProperty]] with ${x}`);
+ },
+ get(o, x) {
+ return 0;
+ },
+ });
+ new WebAssembly.Memory(proxy);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index f49dca3585..484cee0acd 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// The stack trace contains file path, only keep "stack.js".
@@ -70,10 +69,10 @@ var module = builder.instantiate({mod: {func: STACK}});
(function testSimpleStack() {
var expected_string = 'Error\n' +
// The line numbers below will change as this test gains / loses lines..
- ' at STACK (stack.js:39:11)\n' + // --
+ ' at STACK (stack.js:38:11)\n' + // --
' at main (wasm-function[1]:1)\n' + // --
- ' at testSimpleStack (stack.js:78:18)\n' + // --
- ' at stack.js:80:3'; // --
+ ' at testSimpleStack (stack.js:77:18)\n' + // --
+ ' at stack.js:79:3'; // --
module.exports.main();
assertEquals(expected_string, stripPath(stack));
@@ -90,10 +89,10 @@ Error.prepareStackTrace = function(error, frames) {
verifyStack(stack, [
// isWasm function line pos file
- [ false, "STACK", 39, 0, "stack.js"],
+ [ false, "STACK", 38, 0, "stack.js"],
[ true, "main", 1, 1, null],
- [ false, "testStackFrames", 89, 0, "stack.js"],
- [ false, null, 98, 0, "stack.js"]
+ [ false, "testStackFrames", 88, 0, "stack.js"],
+ [ false, null, 97, 0, "stack.js"]
]);
})();
@@ -106,8 +105,8 @@ Error.prepareStackTrace = function(error, frames) {
verifyStack(e.stack, [
// isWasm function line pos file
[ true, "exec_unreachable", 2, 1, null],
- [ false, "testWasmUnreachable", 102, 0, "stack.js"],
- [ false, null, 113, 0, "stack.js"]
+ [ false, "testWasmUnreachable", 101, 0, "stack.js"],
+ [ false, null, 112, 0, "stack.js"]
]);
}
})();
@@ -122,8 +121,8 @@ Error.prepareStackTrace = function(error, frames) {
// isWasm function line pos file
[ true, null, 3, 3, null],
[ true, "call_mem_out_of_bounds", 4, 1, null],
- [ false, "testWasmMemOutOfBounds", 117, 0, "stack.js"],
- [ false, null, 129, 0, "stack.js"]
+ [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
+ [ false, null, 128, 0, "stack.js"]
]);
}
})();
@@ -177,8 +176,8 @@ Error.prepareStackTrace = function(error, frames) {
verifyStack(e.stack, [
// isWasm, function, line, pos, file
[true, 'main', 0, unreachable_pos + 1, null], // -
- [false, 'testBigOffset', 173, 0, 'stack.js'], //-
- [false, null, 184, 0, 'stack.js']
+ [false, 'testBigOffset', 172, 0, 'stack.js'], //-
+ [false, null, 183, 0, 'stack.js']
]);
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
index d2810fce45..91951ff4c3 100644
--- a/deps/v8/test/mjsunit/wasm/stackwalk.js
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function makeFFI(func) {
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index b47dac5767..e17c8f1785 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function instantiate(sig, body) {
@@ -46,8 +45,8 @@ assertThrows(() => {instantiate(kSig_i_v, [kExprI32Const, 0]);});
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- 'WebAssembly.Module(): Wasm decoding failed: ' +
- 'function index 1 out of bounds (1 entry) @+20');
+ 'WebAssembly.Module(): ' +
+ 'start function index 1 out of bounds (1 entry) @+20');
})();
@@ -63,8 +62,7 @@ assertThrows(() => {instantiate(kSig_i_v, [kExprI32Const, 0]);});
assertThrows(
() => builder.instantiate(), WebAssembly.CompileError,
- 'WebAssembly.Module(): Wasm decoding failed: ' +
- 'unexpected section: Start @+27');
+ 'WebAssembly.Module(): unexpected section <Start> @+27');
})();
@@ -153,9 +151,7 @@ assertThrows(() => {instantiate(kSig_i_v, [kExprI32Const, 0]);});
assertThrows(
() => builder.instantiate(), WebAssembly.RuntimeError, /unreachable/);
- assertPromiseResult(builder.asyncInstantiate(), assertUnreachable,
- e => assertInstanceof(e, WebAssembly.RuntimeError));
- assertPromiseResult(WebAssembly.instantiate(builder.toModule()),
- assertUnreachable,
- e => assertInstanceof(e, WebAssembly.RuntimeError));
+ assertThrowsAsync(builder.asyncInstantiate(), WebAssembly.RuntimeError);
+ assertThrowsAsync(
+ WebAssembly.instantiate(builder.toModule()), WebAssembly.RuntimeError);
})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index fe9f9a1410..266c134966 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-test-streaming --expose-wasm --allow-natives-syntax
+// Flags: --wasm-test-streaming --expose-wasm
'use strict';
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
function module(bytes) {
@@ -33,14 +32,9 @@ function toBuffer(binary) {
}
function testErrorPosition(bytes, pos, test_name) {
- assertPromiseResult(
- WebAssembly.compile(toBuffer(bytes)), assertUnreachable, e => {
- print(test_name);
- assertInstanceof(e, WebAssembly.CompileError);
- let regex = new RegExp('@\\+' + pos);
- print(e.message);
- assertMatches(regex, e.message, 'Error Position');
- });
+ assertThrowsAsync(
+ WebAssembly.compile(toBuffer(bytes)), WebAssembly.CompileError,
+ new RegExp('@\\+' + pos));
}
(function testInvalidMagic() {
diff --git a/deps/v8/test/mjsunit/wasm/table-copy.js b/deps/v8/test/mjsunit/wasm/table-copy.js
index 89572fa41e..7c5c49669f 100644
--- a/deps/v8/test/mjsunit/wasm/table-copy.js
+++ b/deps/v8/test/mjsunit/wasm/table-copy.js
@@ -4,7 +4,6 @@
// Flags: --experimental-wasm-bulk-memory
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestTableCopyInbounds() {
@@ -20,7 +19,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kNumericPrefix, kExprTableCopy, kTableZero])
+ kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
let instance = builder.instantiate();
@@ -30,9 +29,6 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
copy(0, i, kTableSize - i);
copy(i, 0, kTableSize - i);
}
- let big = 1000000;
- copy(big, 0, 0); // nop
- copy(0, big, 0); // nop
})();
function addFunction(builder, k) {
@@ -75,7 +71,7 @@ function assertTable(obj, ...elems) {
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kNumericPrefix, kExprTableCopy, kTableZero])
+ kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
builder.addExportOfKind("table", kExternalTable, 0);
@@ -129,7 +125,7 @@ function assertCall(call, ...elems) {
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kNumericPrefix, kExprTableCopy, kTableZero])
+ kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
builder.addFunction("call", sig_i_i)
@@ -165,7 +161,7 @@ function assertCall(call, ...elems) {
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kNumericPrefix, kExprTableCopy, kTableZero])
+ kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
let instance = builder.instantiate();
@@ -176,6 +172,13 @@ function assertCall(call, ...elems) {
assertThrows(() => copy(1, 0, kTableSize));
assertThrows(() => copy(0, 1, kTableSize));
+ {
+ let big = 1000000;
+ assertThrows(() => copy(big, 0, 0));
+ assertThrows(() => copy(0, big, 0));
+ }
+
+
for (let big = 4294967295; big > 1000; big >>>= 1) {
assertThrows(() => copy(big, 0, 1));
assertThrows(() => copy(0, big, 1));
@@ -187,6 +190,7 @@ function assertCall(call, ...elems) {
assertThrows(() => copy(0, big, 1));
assertThrows(() => copy(0, 0, big));
}
+
})();
(function TestTableCopyShared() {
@@ -221,7 +225,7 @@ function assertCall(call, ...elems) {
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kNumericPrefix, kExprTableCopy, kTableZero])
+ kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
builder.addFunction("call", sig_i_i)
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index 8d3c717522..fd895dedc3 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let kMaxTableSize = 10000000;
diff --git a/deps/v8/test/mjsunit/wasm/table-init.js b/deps/v8/test/mjsunit/wasm/table-init.js
new file mode 100644
index 0000000000..c95e072c64
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-init.js
@@ -0,0 +1,128 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-bulk-memory
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function addFunction(builder, k) {
+ let m = builder.addFunction("", kSig_i_v)
+ .addBody([...wasmI32Const(k)]);
+ return m;
+}
+
+function addFunctions(builder, count, exportf = false) {
+ let o = {};
+ for (var i = 0; i < count; i++) {
+ let name = `f${i}`;
+ o[name] = addFunction(builder, i);
+ if (exportf) o[name].exportAs(name);
+ }
+ return o;
+}
+
+function assertTable(obj, ...elems) {
+ for (var i = 0; i < elems.length; i++) {
+ assertEquals(elems[i], obj.get(i));
+ }
+}
+
+(function TestTableInitInBounds() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+ {
+ let o = addFunctions(builder, kTableSize, true);
+ builder.addPassiveElementSegment(
+ [o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index, null]);
+ }
+
+ builder.addFunction("init0", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableInit, kSegmentZero, kTableZero])
+ .exportAs("init0");
+
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ let instance = builder.instantiate();
+ let x = instance.exports;
+
+ assertTable(x.table, null, null, null, null, null);
+
+ // test actual writes.
+ x.init0(0, 0, 1);
+ assertTable(x.table, x.f0, null, null, null, null);
+ x.init0(0, 0, 2);
+ assertTable(x.table, x.f0, x.f1, null, null, null);
+ x.init0(0, 0, 3);
+ assertTable(x.table, x.f0, x.f1, x.f2, null, null);
+ x.init0(3, 0, 2);
+ assertTable(x.table, x.f0, x.f1, x.f2, x.f0, x.f1);
+ x.init0(3, 1, 2);
+ assertTable(x.table, x.f0, x.f1, x.f2, x.f1, x.f2);
+ x.init0(3, 2, 2);
+ assertTable(x.table, x.f0, x.f1, x.f2, x.f2, x.f3);
+ x.init0(3, 3, 2);
+ assertTable(x.table, x.f0, x.f1, x.f2, x.f3, x.f4);
+
+ // test writing null
+ x.init0(0, 5, 1);
+ assertTable(x.table, null, x.f1, x.f2, x.f3, x.f4);
+})();
+
+(function TestTableInitOob() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+ {
+ let o = addFunctions(builder, kTableSize);
+ builder.addPassiveElementSegment(
+ [o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index]);
+ }
+
+ builder.addFunction("init0", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableInit, kSegmentZero, kTableZero])
+ .exportAs("init0");
+
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ let instance = builder.instantiate();
+ let x = instance.exports;
+
+ assertTable(x.table, null, null, null, null, null);
+
+ // 0-count is oob.
+ assertThrows(() => x.init0(kTableSize+1, 0, 0));
+ assertThrows(() => x.init0(0, kTableSize+1, 0));
+
+ assertThrows(() => x.init0(0, 0, 6));
+ assertThrows(() => x.init0(0, 1, 5));
+ assertThrows(() => x.init0(0, 2, 4));
+ assertThrows(() => x.init0(0, 3, 3));
+ assertThrows(() => x.init0(0, 4, 2));
+ assertThrows(() => x.init0(0, 5, 1));
+
+ assertThrows(() => x.init0(0, 0, 6));
+ assertThrows(() => x.init0(1, 0, 5));
+ assertThrows(() => x.init0(2, 0, 4));
+ assertThrows(() => x.init0(3, 0, 3));
+ assertThrows(() => x.init0(4, 0, 2));
+ assertThrows(() => x.init0(5, 0, 1));
+
+ assertThrows(() => x.init0(10, 0, 1));
+ assertThrows(() => x.init0(0, 10, 1));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-limits.js b/deps/v8/test/mjsunit/wasm/table-limits.js
new file mode 100644
index 0000000000..7e31bf1f5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-limits.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-table-size=10
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// With the flags we set the maximum table size to 10, so 11 is out-of-bounds.
+const oob = 11;
+
+(function TestJSTableInitialAboveTheLimit() {
+ print(arguments.callee.name);
+ assertThrows(
+ () => new WebAssembly.Table({ initial: oob, element: "anyfunc" }),
+ RangeError, /above the upper bound/);
+})();
+
+(function TestJSTableMaximumAboveTheLimit() {
+ print(arguments.callee.name);
+ assertThrows(
+ () => new WebAssembly.Table({ initial: 1, maximum: oob, element: "anyfunc" }),
+ RangeError, /above the upper bound/);
+})();
+
+(function TestDecodeTableInitialAboveTheLimit() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.setTableBounds(oob);
+ assertThrows(
+ () => builder.instantiate(),
+ WebAssembly.CompileError, /is larger than implementation limit/);
+})();
+
+(function TestDecodeTableMaximumAboveTheLimit() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.setTableBounds(1, oob);
+ assertThrows(
+ () => builder.instantiate(),
+ WebAssembly.CompileError, /is larger than implementation limit/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
index 0f4a63396e..3ee33be688 100644
--- a/deps/v8/test/mjsunit/wasm/table.js
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -6,16 +6,12 @@
'use strict';
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Basic tests.
-var outOfUint32RangeValue = 1e12;
-var int32ButOob = 1073741824;
-var kMaxUint32 = (4 * 1024 * 1024 * 1024) - 1;
-var kMaxUint31 = (2 * 1024 * 1024 * 1024) - 1;
-var kV8MaxWasmTableSize = 10000000;
+const outOfUint32RangeValue = 1e12;
+const kV8MaxWasmTableSize = 10000000;
function assertTableIsValid(table, length) {
assertSame(WebAssembly.Table.prototype, table.__proto__);
@@ -87,14 +83,16 @@ function assertTableIsValid(table, length) {
table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: undefined});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kMaxUint31});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: 1000000});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kMaxUint32});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kV8MaxWasmTableSize});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kV8MaxWasmTableSize + 1});
- assertTableIsValid(table, 0);
+ assertThrows(
+ () => new WebAssembly.Table(
+ {element: "anyfunc", initial: 0, maximum: kV8MaxWasmTableSize + 1}),
+ RangeError, /above the upper bound/);
})();
(function TestMaximumIsReadOnce() {
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index 02f28ff515..eb0a95384c 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
var debug = true;
diff --git a/deps/v8/test/mjsunit/wasm/tier-up-testing-flag.js b/deps/v8/test/mjsunit/wasm/tier-up-testing-flag.js
index 1b2a11e0ca..20aa30930c 100644
--- a/deps/v8/test/mjsunit/wasm/tier-up-testing-flag.js
+++ b/deps/v8/test/mjsunit/wasm/tier-up-testing-flag.js
@@ -6,7 +6,6 @@
// Compile functions 0 and 2 with Turbofan, the rest with Liftoff:
// Flags: --wasm-tier-mask-for-testing=5
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const num_functions = 5;
diff --git a/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js b/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
index 465ca449cc..6b1cdf96e2 100644
--- a/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
+++ b/deps/v8/test/mjsunit/wasm/trap-handler-fallback.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --wasm-trap-handler-fallback
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Make sure we can get at least one guard region if the trap handler is enabled.
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index c4a0f4d787..d893f97d62 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Collect the Callsite objects instead of just a string:
diff --git a/deps/v8/test/mjsunit/wasm/unicode-validation.js b/deps/v8/test/mjsunit/wasm/unicode-validation.js
index 8932c32653..e331c00a6a 100644
--- a/deps/v8/test/mjsunit/wasm/unicode-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unicode-validation.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function toByteArray(s) {
diff --git a/deps/v8/test/mjsunit/wasm/unicode.js b/deps/v8/test/mjsunit/wasm/unicode.js
index 7e29c00f33..379a513d42 100644
--- a/deps/v8/test/mjsunit/wasm/unicode.js
+++ b/deps/v8/test/mjsunit/wasm/unicode.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
function checkImport(
@@ -60,7 +59,7 @@ checkExports('ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗaddā˜ŗā˜ŗ', 'ā˜ŗā˜ŗadd
builder.addImport('three snowmen: ā˜ƒā˜ƒā˜ƒ', 'foo', kSig_i_v);
assertThrows(
() => builder.instantiate({}), TypeError,
- /WebAssembly Instantiation: Import #0 module="three snowmen: ā˜ƒā˜ƒā˜ƒ" error: /);
+ /WebAssembly.Instance\(\): Import #0 module="three snowmen: ā˜ƒā˜ƒā˜ƒ" error: /);
})();
(function errorMessageUnicodeInImportElemName() {
@@ -68,7 +67,7 @@ checkExports('ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗaddā˜ŗā˜ŗ', 'ā˜ŗā˜ŗadd
builder.addImport('mod', 'three snowmen: ā˜ƒā˜ƒā˜ƒ', kSig_i_v);
assertThrows(
() => builder.instantiate({mod: {}}), WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="mod" function="three ' +
+ 'WebAssembly.Instance\(\): Import #0 module="mod" function="three ' +
'snowmen: ā˜ƒā˜ƒā˜ƒ" error: function import requires a callable');
})();
@@ -79,7 +78,7 @@ checkExports('ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗmulā˜ŗā˜ŗ', 'ā˜ŗā˜ŗaddā˜ŗā˜ŗ', 'ā˜ŗā˜ŗadd
builder.addImport(mod_name, func_name, kSig_i_v);
assertThrows(
() => builder.instantiate({[mod_name]: {}}), WebAssembly.LinkError,
- 'WebAssembly Instantiation: Import #0 module="' + mod_name +
+ 'WebAssembly.Instance(): Import #0 module="' + mod_name +
'" function="' + func_name +
'" error: function import requires a callable');
})();
diff --git a/deps/v8/test/mjsunit/wasm/unreachable-validation.js b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
index 5b98b1713b..70768ff7d4 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
// Set unittest to false to run this test and just print results, without failing.
diff --git a/deps/v8/test/mjsunit/wasm/unreachable.js b/deps/v8/test/mjsunit/wasm/unreachable.js
index d77b53ea53..38be72952f 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var main = (function () {
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-common.js b/deps/v8/test/mjsunit/wasm/user-properties-common.js
index ab6b2bc979..f736593169 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-common.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-common.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --expose-gc --verify-heap
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
const verifyHeap = gc;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js b/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js
index 6a39510208..0ae9db5d3a 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
let buffer = (() => {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
deleted file mode 100644
index 7583d39a9b..0000000000
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm
-
-function bytes() {
- var buffer = new ArrayBuffer(arguments.length);
- var view = new Uint8Array(buffer);
- for (var i = 0; i < arguments.length; i++) {
- var val = arguments[i];
- if ((typeof val) == "string") val = val.charCodeAt(0);
- view[i] = val | 0;
- }
- return buffer;
-}
-
-// Header declaration constants
-var kWasmH0 = 0;
-var kWasmH1 = 0x61;
-var kWasmH2 = 0x73;
-var kWasmH3 = 0x6d;
-
-var kWasmV0 = 0x1;
-var kWasmV1 = 0;
-var kWasmV2 = 0;
-var kWasmV3 = 0;
-
-var kHeaderSize = 8;
-var kPageSize = 65536;
-var kSpecMaxPages = 65535;
-
-function bytesWithHeader() {
- var buffer = new ArrayBuffer(kHeaderSize + arguments.length);
- var view = new Uint8Array(buffer);
- view[0] = kWasmH0;
- view[1] = kWasmH1;
- view[2] = kWasmH2;
- view[3] = kWasmH3;
- view[4] = kWasmV0;
- view[5] = kWasmV1;
- view[6] = kWasmV2;
- view[7] = kWasmV3;
- for (var i = 0; i < arguments.length; i++) {
- var val = arguments[i];
- if ((typeof val) == "string") val = val.charCodeAt(0);
- view[kHeaderSize + i] = val | 0;
- }
- return buffer;
-}
-
-let kDeclNoLocals = 0;
-
-// Section declaration constants
-let kUnknownSectionCode = 0;
-let kTypeSectionCode = 1; // Function signature declarations
-let kImportSectionCode = 2; // Import declarations
-let kFunctionSectionCode = 3; // Function declarations
-let kTableSectionCode = 4; // Indirect function table and other tables
-let kMemorySectionCode = 5; // Memory attributes
-let kGlobalSectionCode = 6; // Global declarations
-let kExportSectionCode = 7; // Exports
-let kStartSectionCode = 8; // Start function declaration
-let kElementSectionCode = 9; // Elements section
-let kCodeSectionCode = 10; // Function code
-let kDataSectionCode = 11; // Data segments
-let kExceptionSectionCode = 12; // Exception section (between Global & Export)
-let kDataCountSectionCode = 13; // Data segments
-
-// Name section types
-let kModuleNameCode = 0;
-let kFunctionNamesCode = 1;
-let kLocalNamesCode = 2;
-
-let kWasmFunctionTypeForm = 0x60;
-let kWasmAnyFunctionTypeForm = 0x70;
-
-let kHasMaximumFlag = 1;
-
-// Segment flags
-let kActiveNoIndex = 0;
-let kPassive = 1;
-let kActiveWithIndex = 2;
-
-// Function declaration flags
-let kDeclFunctionName = 0x01;
-let kDeclFunctionImport = 0x02;
-let kDeclFunctionLocals = 0x04;
-let kDeclFunctionExport = 0x08;
-
-// Local types
-let kWasmStmt = 0x40;
-let kWasmI32 = 0x7f;
-let kWasmI64 = 0x7e;
-let kWasmF32 = 0x7d;
-let kWasmF64 = 0x7c;
-let kWasmS128 = 0x7b;
-let kWasmAnyRef = 0x6f;
-let kWasmAnyFunc = 0x70;
-let kWasmExceptRef = 0x68;
-
-let kExternalFunction = 0;
-let kExternalTable = 1;
-let kExternalMemory = 2;
-let kExternalGlobal = 3;
-let kExternalException = 4;
-
-let kTableZero = 0;
-let kMemoryZero = 0;
-let kSegmentZero = 0;
-
-let kExceptionAttribute = 0;
-
-// Useful signatures
-let kSig_i_i = makeSig([kWasmI32], [kWasmI32]);
-let kSig_l_l = makeSig([kWasmI64], [kWasmI64]);
-let kSig_i_l = makeSig([kWasmI64], [kWasmI32]);
-let kSig_i_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32]);
-let kSig_i_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
-let kSig_v_iiii = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], []);
-let kSig_f_ff = makeSig([kWasmF32, kWasmF32], [kWasmF32]);
-let kSig_d_dd = makeSig([kWasmF64, kWasmF64], [kWasmF64]);
-let kSig_l_ll = makeSig([kWasmI64, kWasmI64], [kWasmI64]);
-let kSig_i_dd = makeSig([kWasmF64, kWasmF64], [kWasmI32]);
-let kSig_v_v = makeSig([], []);
-let kSig_i_v = makeSig([], [kWasmI32]);
-let kSig_l_v = makeSig([], [kWasmI64]);
-let kSig_f_v = makeSig([], [kWasmF32]);
-let kSig_d_v = makeSig([], [kWasmF64]);
-let kSig_v_i = makeSig([kWasmI32], []);
-let kSig_v_ii = makeSig([kWasmI32, kWasmI32], []);
-let kSig_v_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], []);
-let kSig_v_l = makeSig([kWasmI64], []);
-let kSig_v_d = makeSig([kWasmF64], []);
-let kSig_v_dd = makeSig([kWasmF64, kWasmF64], []);
-let kSig_v_ddi = makeSig([kWasmF64, kWasmF64, kWasmI32], []);
-let kSig_ii_v = makeSig([], [kWasmI32, kWasmI32]);
-let kSig_iii_v = makeSig([], [kWasmI32, kWasmI32, kWasmI32]);
-let kSig_ii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32]);
-let kSig_iii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
-let kSig_ii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32]);
-let kSig_iii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
-
-let kSig_v_f = makeSig([kWasmF32], []);
-let kSig_f_f = makeSig([kWasmF32], [kWasmF32]);
-let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
-let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
-let kSig_r_r = makeSig([kWasmAnyRef], [kWasmAnyRef]);
-let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
-let kSig_i_r = makeSig([kWasmAnyRef], [kWasmI32]);
-let kSig_v_r = makeSig([kWasmAnyRef], []);
-let kSig_v_a = makeSig([kWasmAnyFunc], []);
-let kSig_v_rr = makeSig([kWasmAnyRef, kWasmAnyRef], []);
-let kSig_r_v = makeSig([], [kWasmAnyRef]);
-
-function makeSig(params, results) {
- return {params: params, results: results};
-}
-
-function makeSig_v_x(x) {
- return makeSig([x], []);
-}
-
-function makeSig_v_xx(x) {
- return makeSig([x, x], []);
-}
-
-function makeSig_r_v(r) {
- return makeSig([], [r]);
-}
-
-function makeSig_r_x(r, x) {
- return makeSig([x], [r]);
-}
-
-function makeSig_r_xx(r, x) {
- return makeSig([x, x], [r]);
-}
-
-// Opcodes
-let kExprUnreachable = 0x00;
-let kExprNop = 0x01;
-let kExprBlock = 0x02;
-let kExprLoop = 0x03;
-let kExprIf = 0x04;
-let kExprElse = 0x05;
-let kExprTry = 0x06;
-let kExprCatch = 0x07;
-let kExprThrow = 0x08;
-let kExprRethrow = 0x09;
-let kExprBrOnExn = 0x0a;
-let kExprEnd = 0x0b;
-let kExprBr = 0x0c;
-let kExprBrIf = 0x0d;
-let kExprBrTable = 0x0e;
-let kExprReturn = 0x0f;
-let kExprCallFunction = 0x10;
-let kExprCallIndirect = 0x11;
-let kExprDrop = 0x1a;
-let kExprSelect = 0x1b;
-let kExprGetLocal = 0x20;
-let kExprSetLocal = 0x21;
-let kExprTeeLocal = 0x22;
-let kExprGetGlobal = 0x23;
-let kExprSetGlobal = 0x24;
-let kExprI32Const = 0x41;
-let kExprI64Const = 0x42;
-let kExprF32Const = 0x43;
-let kExprF64Const = 0x44;
-let kExprRefNull = 0xd0;
-let kExprI32LoadMem = 0x28;
-let kExprI64LoadMem = 0x29;
-let kExprF32LoadMem = 0x2a;
-let kExprF64LoadMem = 0x2b;
-let kExprI32LoadMem8S = 0x2c;
-let kExprI32LoadMem8U = 0x2d;
-let kExprI32LoadMem16S = 0x2e;
-let kExprI32LoadMem16U = 0x2f;
-let kExprI64LoadMem8S = 0x30;
-let kExprI64LoadMem8U = 0x31;
-let kExprI64LoadMem16S = 0x32;
-let kExprI64LoadMem16U = 0x33;
-let kExprI64LoadMem32S = 0x34;
-let kExprI64LoadMem32U = 0x35;
-let kExprI32StoreMem = 0x36;
-let kExprI64StoreMem = 0x37;
-let kExprF32StoreMem = 0x38;
-let kExprF64StoreMem = 0x39;
-let kExprI32StoreMem8 = 0x3a;
-let kExprI32StoreMem16 = 0x3b;
-let kExprI64StoreMem8 = 0x3c;
-let kExprI64StoreMem16 = 0x3d;
-let kExprI64StoreMem32 = 0x3e;
-let kExprMemorySize = 0x3f;
-let kExprMemoryGrow = 0x40;
-let kExprI32Eqz = 0x45;
-let kExprI32Eq = 0x46;
-let kExprI32Ne = 0x47;
-let kExprI32LtS = 0x48;
-let kExprI32LtU = 0x49;
-let kExprI32GtS = 0x4a;
-let kExprI32GtU = 0x4b;
-let kExprI32LeS = 0x4c;
-let kExprI32LeU = 0x4d;
-let kExprI32GeS = 0x4e;
-let kExprI32GeU = 0x4f;
-let kExprI64Eqz = 0x50;
-let kExprI64Eq = 0x51;
-let kExprI64Ne = 0x52;
-let kExprI64LtS = 0x53;
-let kExprI64LtU = 0x54;
-let kExprI64GtS = 0x55;
-let kExprI64GtU = 0x56;
-let kExprI64LeS = 0x57;
-let kExprI64LeU = 0x58;
-let kExprI64GeS = 0x59;
-let kExprI64GeU = 0x5a;
-let kExprF32Eq = 0x5b;
-let kExprF32Ne = 0x5c;
-let kExprF32Lt = 0x5d;
-let kExprF32Gt = 0x5e;
-let kExprF32Le = 0x5f;
-let kExprF32Ge = 0x60;
-let kExprF64Eq = 0x61;
-let kExprF64Ne = 0x62;
-let kExprF64Lt = 0x63;
-let kExprF64Gt = 0x64;
-let kExprF64Le = 0x65;
-let kExprF64Ge = 0x66;
-let kExprRefIsNull = 0xd1;
-let kExprI32Clz = 0x67;
-let kExprI32Ctz = 0x68;
-let kExprI32Popcnt = 0x69;
-let kExprI32Add = 0x6a;
-let kExprI32Sub = 0x6b;
-let kExprI32Mul = 0x6c;
-let kExprI32DivS = 0x6d;
-let kExprI32DivU = 0x6e;
-let kExprI32RemS = 0x6f;
-let kExprI32RemU = 0x70;
-let kExprI32And = 0x71;
-let kExprI32Ior = 0x72;
-let kExprI32Xor = 0x73;
-let kExprI32Shl = 0x74;
-let kExprI32ShrS = 0x75;
-let kExprI32ShrU = 0x76;
-let kExprI32Rol = 0x77;
-let kExprI32Ror = 0x78;
-let kExprI64Clz = 0x79;
-let kExprI64Ctz = 0x7a;
-let kExprI64Popcnt = 0x7b;
-let kExprI64Add = 0x7c;
-let kExprI64Sub = 0x7d;
-let kExprI64Mul = 0x7e;
-let kExprI64DivS = 0x7f;
-let kExprI64DivU = 0x80;
-let kExprI64RemS = 0x81;
-let kExprI64RemU = 0x82;
-let kExprI64And = 0x83;
-let kExprI64Ior = 0x84;
-let kExprI64Xor = 0x85;
-let kExprI64Shl = 0x86;
-let kExprI64ShrS = 0x87;
-let kExprI64ShrU = 0x88;
-let kExprI64Rol = 0x89;
-let kExprI64Ror = 0x8a;
-let kExprF32Abs = 0x8b;
-let kExprF32Neg = 0x8c;
-let kExprF32Ceil = 0x8d;
-let kExprF32Floor = 0x8e;
-let kExprF32Trunc = 0x8f;
-let kExprF32NearestInt = 0x90;
-let kExprF32Sqrt = 0x91;
-let kExprF32Add = 0x92;
-let kExprF32Sub = 0x93;
-let kExprF32Mul = 0x94;
-let kExprF32Div = 0x95;
-let kExprF32Min = 0x96;
-let kExprF32Max = 0x97;
-let kExprF32CopySign = 0x98;
-let kExprF64Abs = 0x99;
-let kExprF64Neg = 0x9a;
-let kExprF64Ceil = 0x9b;
-let kExprF64Floor = 0x9c;
-let kExprF64Trunc = 0x9d;
-let kExprF64NearestInt = 0x9e;
-let kExprF64Sqrt = 0x9f;
-let kExprF64Add = 0xa0;
-let kExprF64Sub = 0xa1;
-let kExprF64Mul = 0xa2;
-let kExprF64Div = 0xa3;
-let kExprF64Min = 0xa4;
-let kExprF64Max = 0xa5;
-let kExprF64CopySign = 0xa6;
-let kExprI32ConvertI64 = 0xa7;
-let kExprI32SConvertF32 = 0xa8;
-let kExprI32UConvertF32 = 0xa9;
-let kExprI32SConvertF64 = 0xaa;
-let kExprI32UConvertF64 = 0xab;
-let kExprI64SConvertI32 = 0xac;
-let kExprI64UConvertI32 = 0xad;
-let kExprI64SConvertF32 = 0xae;
-let kExprI64UConvertF32 = 0xaf;
-let kExprI64SConvertF64 = 0xb0;
-let kExprI64UConvertF64 = 0xb1;
-let kExprF32SConvertI32 = 0xb2;
-let kExprF32UConvertI32 = 0xb3;
-let kExprF32SConvertI64 = 0xb4;
-let kExprF32UConvertI64 = 0xb5;
-let kExprF32ConvertF64 = 0xb6;
-let kExprF64SConvertI32 = 0xb7;
-let kExprF64UConvertI32 = 0xb8;
-let kExprF64SConvertI64 = 0xb9;
-let kExprF64UConvertI64 = 0xba;
-let kExprF64ConvertF32 = 0xbb;
-let kExprI32ReinterpretF32 = 0xbc;
-let kExprI64ReinterpretF64 = 0xbd;
-let kExprF32ReinterpretI32 = 0xbe;
-let kExprF64ReinterpretI64 = 0xbf;
-let kExprI32SExtendI8 = 0xc0;
-let kExprI32SExtendI16 = 0xc1;
-let kExprI64SExtendI8 = 0xc2;
-let kExprI64SExtendI16 = 0xc3;
-let kExprI64SExtendI32 = 0xc4;
-
-// Prefix opcodes
-let kNumericPrefix = 0xfc;
-let kSimdPrefix = 0xfd;
-let kAtomicPrefix = 0xfe;
-
-// Numeric opcodes.
-let kExprMemoryInit = 0x08;
-let kExprMemoryDrop = 0x09;
-let kExprMemoryCopy = 0x0a;
-let kExprMemoryFill = 0x0b;
-let kExprTableInit = 0x0c;
-let kExprTableDrop = 0x0d;
-let kExprTableCopy = 0x0e;
-
-// Atomic opcodes.
-let kExprAtomicWake = 0x00;
-let kExprI32AtomicWait = 0x01;
-let kExprI64AtomicWait = 0x02;
-let kExprI32AtomicLoad = 0x10;
-let kExprI32AtomicLoad8U = 0x12;
-let kExprI32AtomicLoad16U = 0x13;
-let kExprI32AtomicStore = 0x17;
-let kExprI32AtomicStore8U = 0x19;
-let kExprI32AtomicStore16U = 0x1a;
-let kExprI32AtomicAdd = 0x1e;
-let kExprI32AtomicAdd8U = 0x20;
-let kExprI32AtomicAdd16U = 0x21;
-let kExprI32AtomicSub = 0x25;
-let kExprI32AtomicSub8U = 0x27;
-let kExprI32AtomicSub16U = 0x28;
-let kExprI32AtomicAnd = 0x2c;
-let kExprI32AtomicAnd8U = 0x2e;
-let kExprI32AtomicAnd16U = 0x2f;
-let kExprI32AtomicOr = 0x33;
-let kExprI32AtomicOr8U = 0x35;
-let kExprI32AtomicOr16U = 0x36;
-let kExprI32AtomicXor = 0x3a;
-let kExprI32AtomicXor8U = 0x3c;
-let kExprI32AtomicXor16U = 0x3d;
-let kExprI32AtomicExchange = 0x41;
-let kExprI32AtomicExchange8U = 0x43;
-let kExprI32AtomicExchange16U = 0x44;
-let kExprI32AtomicCompareExchange = 0x48;
-let kExprI32AtomicCompareExchange8U = 0x4a;
-let kExprI32AtomicCompareExchange16U = 0x4b;
-
-let kExprI64AtomicLoad = 0x11;
-let kExprI64AtomicLoad8U = 0x14;
-let kExprI64AtomicLoad16U = 0x15;
-let kExprI64AtomicLoad32U = 0x16;
-let kExprI64AtomicStore = 0x18;
-let kExprI64AtomicStore8U = 0x1b;
-let kExprI64AtomicStore16U = 0x1c;
-let kExprI64AtomicStore32U = 0x1d;
-let kExprI64AtomicAdd = 0x1f;
-let kExprI64AtomicAdd8U = 0x22;
-let kExprI64AtomicAdd16U = 0x23;
-let kExprI64AtomicAdd32U = 0x24;
-let kExprI64AtomicSub = 0x26;
-let kExprI64AtomicSub8U = 0x29;
-let kExprI64AtomicSub16U = 0x2a;
-let kExprI64AtomicSub32U = 0x2b;
-let kExprI64AtomicAnd = 0x2d;
-let kExprI64AtomicAnd8U = 0x30;
-let kExprI64AtomicAnd16U = 0x31;
-let kExprI64AtomicAnd32U = 0x32;
-let kExprI64AtomicOr = 0x34;
-let kExprI64AtomicOr8U = 0x37;
-let kExprI64AtomicOr16U = 0x38;
-let kExprI64AtomicOr32U = 0x39;
-let kExprI64AtomicXor = 0x3b;
-let kExprI64AtomicXor8U = 0x3e;
-let kExprI64AtomicXor16U = 0x3f;
-let kExprI64AtomicXor32U = 0x40;
-let kExprI64AtomicExchange = 0x42;
-let kExprI64AtomicExchange8U = 0x45;
-let kExprI64AtomicExchange16U = 0x46;
-let kExprI64AtomicExchange32U = 0x47;
-let kExprI64AtomicCompareExchange = 0x49
-let kExprI64AtomicCompareExchange8U = 0x4c;
-let kExprI64AtomicCompareExchange16U = 0x4d;
-let kExprI64AtomicCompareExchange32U = 0x4e;
-
-// Simd opcodes.
-let kExprF32x4Min = 0x9e;
-
-let kTrapUnreachable = 0;
-let kTrapMemOutOfBounds = 1;
-let kTrapDivByZero = 2;
-let kTrapDivUnrepresentable = 3;
-let kTrapRemByZero = 4;
-let kTrapFloatUnrepresentable = 5;
-let kTrapFuncInvalid = 6;
-let kTrapFuncSigMismatch = 7;
-let kTrapTypeError = 8;
-let kTrapUnalignedAccess = 9;
-let kTrapDataSegmentDropped = 10;
-let kTrapElemSegmentDropped = 11;
-
-let kTrapMsgs = [
- "unreachable",
- "memory access out of bounds",
- "divide by zero",
- "divide result unrepresentable",
- "remainder by zero",
- "float unrepresentable in integer range",
- "invalid index into function table",
- "function signature mismatch",
- "wasm function signature contains illegal type",
- "operation does not support unaligned accesses",
- "data segment has been dropped",
- "element segment has been dropped"
-];
-
-function assertTraps(trap, code) {
- try {
- if (typeof code === 'function') {
- code();
- } else {
- eval(code);
- }
- } catch (e) {
- assertEquals('object', typeof e);
- assertEquals(kTrapMsgs[trap], e.message);
- // Success.
- return;
- }
- throw new MjsUnitAssertionError('Did not trap, expected: ' + kTrapMsgs[trap]);
-}
-
-function wasmI32Const(val) {
- let bytes = [kExprI32Const];
- for (let i = 0; i < 4; ++i) {
- bytes.push(0x80 | ((val >> (7 * i)) & 0x7f));
- }
- bytes.push((val >> (7 * 4)) & 0x7f);
- return bytes;
-}
-
-function wasmF32Const(f) {
- return [kExprF32Const].concat(Array.from(new Uint8Array((new Float32Array([f])).buffer)));
-}
-
-function wasmF64Const(f) {
- return [kExprF64Const].concat(Array.from(new Uint8Array((new Float64Array([f])).buffer)));
-}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
index 44bfedbfdb..3b1a333c7f 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
@@ -4,7 +4,6 @@
// Flags: --expose-wasm --wasm-math-intrinsics
-load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
function verbose(args) {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index b6959be23e..077ac51baa 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -4,10 +4,498 @@
// Used for encoding f32 and double constants to bits.
let __buffer = new ArrayBuffer(8);
-let byte_view = new Int8Array(__buffer);
+let byte_view = new Uint8Array(__buffer);
let f32_view = new Float32Array(__buffer);
let f64_view = new Float64Array(__buffer);
+// The bytes function receives one of
+// - several arguments, each of which is either a number or a string of length
+// 1; if it's a string, the charcode of the contained character is used.
+// - a single array argument containing the actual arguments
+// - a single string; the returned buffer will contain the char codes of all
+// contained characters.
+function bytes(...input) {
+ if (input.length == 1 && typeof input[0] == 'array') input = input[0];
+ if (input.length == 1 && typeof input[0] == 'string') {
+ let len = input[0].length;
+ let view = new Uint8Array(len);
+ for (let i = 0; i < len; i++) view[i] = input[0].charCodeAt(i);
+ return view.buffer;
+ }
+ let view = new Uint8Array(input.length);
+ for (let i = 0; i < input.length; i++) {
+ let val = input[i];
+ if (typeof val == 'string') {
+ assertEquals(1, val.length, 'string inputs must have length 1');
+ val = val.charCodeAt(0);
+ }
+ view[i] = val | 0;
+ }
+ return view.buffer;
+}
+
+// Header declaration constants
+var kWasmH0 = 0;
+var kWasmH1 = 0x61;
+var kWasmH2 = 0x73;
+var kWasmH3 = 0x6d;
+
+var kWasmV0 = 0x1;
+var kWasmV1 = 0;
+var kWasmV2 = 0;
+var kWasmV3 = 0;
+
+var kHeaderSize = 8;
+var kPageSize = 65536;
+var kSpecMaxPages = 65535;
+
+function bytesWithHeader(...input) {
+ const header =
+ [kWasmH0, kWasmH1, kWasmH2, kWasmH3, kWasmV0, kWasmV1, kWasmV2, kWasmV3];
+ return bytes(header + input);
+}
+
+let kDeclNoLocals = 0;
+
+// Section declaration constants
+let kUnknownSectionCode = 0;
+let kTypeSectionCode = 1; // Function signature declarations
+let kImportSectionCode = 2; // Import declarations
+let kFunctionSectionCode = 3; // Function declarations
+let kTableSectionCode = 4; // Indirect function table and other tables
+let kMemorySectionCode = 5; // Memory attributes
+let kGlobalSectionCode = 6; // Global declarations
+let kExportSectionCode = 7; // Exports
+let kStartSectionCode = 8; // Start function declaration
+let kElementSectionCode = 9; // Elements section
+let kCodeSectionCode = 10; // Function code
+let kDataSectionCode = 11; // Data segments
+let kDataCountSectionCode = 12; // Data segment count (between Element & Code)
+let kExceptionSectionCode = 13; // Exception section (between Global & Export)
+
+// Name section types
+let kModuleNameCode = 0;
+let kFunctionNamesCode = 1;
+let kLocalNamesCode = 2;
+
+let kWasmFunctionTypeForm = 0x60;
+let kWasmAnyFunctionTypeForm = 0x70;
+
+let kHasMaximumFlag = 1;
+let kSharedHasMaximumFlag = 3;
+
+// Segment flags
+let kActiveNoIndex = 0;
+let kPassive = 1;
+let kActiveWithIndex = 2;
+
+// Function declaration flags
+let kDeclFunctionName = 0x01;
+let kDeclFunctionImport = 0x02;
+let kDeclFunctionLocals = 0x04;
+let kDeclFunctionExport = 0x08;
+
+// Local types
+let kWasmStmt = 0x40;
+let kWasmI32 = 0x7f;
+let kWasmI64 = 0x7e;
+let kWasmF32 = 0x7d;
+let kWasmF64 = 0x7c;
+let kWasmS128 = 0x7b;
+let kWasmAnyRef = 0x6f;
+let kWasmAnyFunc = 0x70;
+let kWasmExceptRef = 0x68;
+
+let kExternalFunction = 0;
+let kExternalTable = 1;
+let kExternalMemory = 2;
+let kExternalGlobal = 3;
+let kExternalException = 4;
+
+let kTableZero = 0;
+let kMemoryZero = 0;
+let kSegmentZero = 0;
+
+let kExceptionAttribute = 0;
+
+// Useful signatures
+let kSig_i_i = makeSig([kWasmI32], [kWasmI32]);
+let kSig_l_l = makeSig([kWasmI64], [kWasmI64]);
+let kSig_i_l = makeSig([kWasmI64], [kWasmI32]);
+let kSig_i_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32]);
+let kSig_i_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
+let kSig_v_iiii = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], []);
+let kSig_f_ff = makeSig([kWasmF32, kWasmF32], [kWasmF32]);
+let kSig_d_dd = makeSig([kWasmF64, kWasmF64], [kWasmF64]);
+let kSig_l_ll = makeSig([kWasmI64, kWasmI64], [kWasmI64]);
+let kSig_i_dd = makeSig([kWasmF64, kWasmF64], [kWasmI32]);
+let kSig_v_v = makeSig([], []);
+let kSig_i_v = makeSig([], [kWasmI32]);
+let kSig_l_v = makeSig([], [kWasmI64]);
+let kSig_f_v = makeSig([], [kWasmF32]);
+let kSig_d_v = makeSig([], [kWasmF64]);
+let kSig_v_i = makeSig([kWasmI32], []);
+let kSig_v_ii = makeSig([kWasmI32, kWasmI32], []);
+let kSig_v_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], []);
+let kSig_v_l = makeSig([kWasmI64], []);
+let kSig_v_d = makeSig([kWasmF64], []);
+let kSig_v_dd = makeSig([kWasmF64, kWasmF64], []);
+let kSig_v_ddi = makeSig([kWasmF64, kWasmF64, kWasmI32], []);
+let kSig_ii_v = makeSig([], [kWasmI32, kWasmI32]);
+let kSig_iii_v = makeSig([], [kWasmI32, kWasmI32, kWasmI32]);
+let kSig_ii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32]);
+let kSig_iii_i = makeSig([kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
+let kSig_ii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32]);
+let kSig_iii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
+
+let kSig_v_f = makeSig([kWasmF32], []);
+let kSig_f_f = makeSig([kWasmF32], [kWasmF32]);
+let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
+let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
+let kSig_r_r = makeSig([kWasmAnyRef], [kWasmAnyRef]);
+let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
+let kSig_i_r = makeSig([kWasmAnyRef], [kWasmI32]);
+let kSig_v_r = makeSig([kWasmAnyRef], []);
+let kSig_v_a = makeSig([kWasmAnyFunc], []);
+let kSig_v_rr = makeSig([kWasmAnyRef, kWasmAnyRef], []);
+let kSig_r_v = makeSig([], [kWasmAnyRef]);
+let kSig_a_v = makeSig([], [kWasmAnyFunc]);
+
+function makeSig(params, results) {
+ return {params: params, results: results};
+}
+
+function makeSig_v_x(x) {
+ return makeSig([x], []);
+}
+
+function makeSig_v_xx(x) {
+ return makeSig([x, x], []);
+}
+
+function makeSig_r_v(r) {
+ return makeSig([], [r]);
+}
+
+function makeSig_r_x(r, x) {
+ return makeSig([x], [r]);
+}
+
+function makeSig_r_xx(r, x) {
+ return makeSig([x, x], [r]);
+}
+
+// Opcodes
+let kExprUnreachable = 0x00;
+let kExprNop = 0x01;
+let kExprBlock = 0x02;
+let kExprLoop = 0x03;
+let kExprIf = 0x04;
+let kExprElse = 0x05;
+let kExprTry = 0x06;
+let kExprCatch = 0x07;
+let kExprThrow = 0x08;
+let kExprRethrow = 0x09;
+let kExprBrOnExn = 0x0a;
+let kExprEnd = 0x0b;
+let kExprBr = 0x0c;
+let kExprBrIf = 0x0d;
+let kExprBrTable = 0x0e;
+let kExprReturn = 0x0f;
+let kExprCallFunction = 0x10;
+let kExprCallIndirect = 0x11;
+let kExprReturnCall = 0x12;
+let kExprReturnCallIndirect = 0x13;
+let kExprDrop = 0x1a;
+let kExprSelect = 0x1b;
+let kExprGetLocal = 0x20;
+let kExprSetLocal = 0x21;
+let kExprTeeLocal = 0x22;
+let kExprGetGlobal = 0x23;
+let kExprSetGlobal = 0x24;
+let kExprGetTable = 0x25;
+let kExprSetTable = 0x26;
+let kExprI32LoadMem = 0x28;
+let kExprI64LoadMem = 0x29;
+let kExprF32LoadMem = 0x2a;
+let kExprF64LoadMem = 0x2b;
+let kExprI32LoadMem8S = 0x2c;
+let kExprI32LoadMem8U = 0x2d;
+let kExprI32LoadMem16S = 0x2e;
+let kExprI32LoadMem16U = 0x2f;
+let kExprI64LoadMem8S = 0x30;
+let kExprI64LoadMem8U = 0x31;
+let kExprI64LoadMem16S = 0x32;
+let kExprI64LoadMem16U = 0x33;
+let kExprI64LoadMem32S = 0x34;
+let kExprI64LoadMem32U = 0x35;
+let kExprI32StoreMem = 0x36;
+let kExprI64StoreMem = 0x37;
+let kExprF32StoreMem = 0x38;
+let kExprF64StoreMem = 0x39;
+let kExprI32StoreMem8 = 0x3a;
+let kExprI32StoreMem16 = 0x3b;
+let kExprI64StoreMem8 = 0x3c;
+let kExprI64StoreMem16 = 0x3d;
+let kExprI64StoreMem32 = 0x3e;
+let kExprMemorySize = 0x3f;
+let kExprMemoryGrow = 0x40;
+let kExprI32Const = 0x41;
+let kExprI64Const = 0x42;
+let kExprF32Const = 0x43;
+let kExprF64Const = 0x44;
+let kExprI32Eqz = 0x45;
+let kExprI32Eq = 0x46;
+let kExprI32Ne = 0x47;
+let kExprI32LtS = 0x48;
+let kExprI32LtU = 0x49;
+let kExprI32GtS = 0x4a;
+let kExprI32GtU = 0x4b;
+let kExprI32LeS = 0x4c;
+let kExprI32LeU = 0x4d;
+let kExprI32GeS = 0x4e;
+let kExprI32GeU = 0x4f;
+let kExprI64Eqz = 0x50;
+let kExprI64Eq = 0x51;
+let kExprI64Ne = 0x52;
+let kExprI64LtS = 0x53;
+let kExprI64LtU = 0x54;
+let kExprI64GtS = 0x55;
+let kExprI64GtU = 0x56;
+let kExprI64LeS = 0x57;
+let kExprI64LeU = 0x58;
+let kExprI64GeS = 0x59;
+let kExprI64GeU = 0x5a;
+let kExprF32Eq = 0x5b;
+let kExprF32Ne = 0x5c;
+let kExprF32Lt = 0x5d;
+let kExprF32Gt = 0x5e;
+let kExprF32Le = 0x5f;
+let kExprF32Ge = 0x60;
+let kExprF64Eq = 0x61;
+let kExprF64Ne = 0x62;
+let kExprF64Lt = 0x63;
+let kExprF64Gt = 0x64;
+let kExprF64Le = 0x65;
+let kExprF64Ge = 0x66;
+let kExprI32Clz = 0x67;
+let kExprI32Ctz = 0x68;
+let kExprI32Popcnt = 0x69;
+let kExprI32Add = 0x6a;
+let kExprI32Sub = 0x6b;
+let kExprI32Mul = 0x6c;
+let kExprI32DivS = 0x6d;
+let kExprI32DivU = 0x6e;
+let kExprI32RemS = 0x6f;
+let kExprI32RemU = 0x70;
+let kExprI32And = 0x71;
+let kExprI32Ior = 0x72;
+let kExprI32Xor = 0x73;
+let kExprI32Shl = 0x74;
+let kExprI32ShrS = 0x75;
+let kExprI32ShrU = 0x76;
+let kExprI32Rol = 0x77;
+let kExprI32Ror = 0x78;
+let kExprI64Clz = 0x79;
+let kExprI64Ctz = 0x7a;
+let kExprI64Popcnt = 0x7b;
+let kExprI64Add = 0x7c;
+let kExprI64Sub = 0x7d;
+let kExprI64Mul = 0x7e;
+let kExprI64DivS = 0x7f;
+let kExprI64DivU = 0x80;
+let kExprI64RemS = 0x81;
+let kExprI64RemU = 0x82;
+let kExprI64And = 0x83;
+let kExprI64Ior = 0x84;
+let kExprI64Xor = 0x85;
+let kExprI64Shl = 0x86;
+let kExprI64ShrS = 0x87;
+let kExprI64ShrU = 0x88;
+let kExprI64Rol = 0x89;
+let kExprI64Ror = 0x8a;
+let kExprF32Abs = 0x8b;
+let kExprF32Neg = 0x8c;
+let kExprF32Ceil = 0x8d;
+let kExprF32Floor = 0x8e;
+let kExprF32Trunc = 0x8f;
+let kExprF32NearestInt = 0x90;
+let kExprF32Sqrt = 0x91;
+let kExprF32Add = 0x92;
+let kExprF32Sub = 0x93;
+let kExprF32Mul = 0x94;
+let kExprF32Div = 0x95;
+let kExprF32Min = 0x96;
+let kExprF32Max = 0x97;
+let kExprF32CopySign = 0x98;
+let kExprF64Abs = 0x99;
+let kExprF64Neg = 0x9a;
+let kExprF64Ceil = 0x9b;
+let kExprF64Floor = 0x9c;
+let kExprF64Trunc = 0x9d;
+let kExprF64NearestInt = 0x9e;
+let kExprF64Sqrt = 0x9f;
+let kExprF64Add = 0xa0;
+let kExprF64Sub = 0xa1;
+let kExprF64Mul = 0xa2;
+let kExprF64Div = 0xa3;
+let kExprF64Min = 0xa4;
+let kExprF64Max = 0xa5;
+let kExprF64CopySign = 0xa6;
+let kExprI32ConvertI64 = 0xa7;
+let kExprI32SConvertF32 = 0xa8;
+let kExprI32UConvertF32 = 0xa9;
+let kExprI32SConvertF64 = 0xaa;
+let kExprI32UConvertF64 = 0xab;
+let kExprI64SConvertI32 = 0xac;
+let kExprI64UConvertI32 = 0xad;
+let kExprI64SConvertF32 = 0xae;
+let kExprI64UConvertF32 = 0xaf;
+let kExprI64SConvertF64 = 0xb0;
+let kExprI64UConvertF64 = 0xb1;
+let kExprF32SConvertI32 = 0xb2;
+let kExprF32UConvertI32 = 0xb3;
+let kExprF32SConvertI64 = 0xb4;
+let kExprF32UConvertI64 = 0xb5;
+let kExprF32ConvertF64 = 0xb6;
+let kExprF64SConvertI32 = 0xb7;
+let kExprF64UConvertI32 = 0xb8;
+let kExprF64SConvertI64 = 0xb9;
+let kExprF64UConvertI64 = 0xba;
+let kExprF64ConvertF32 = 0xbb;
+let kExprI32ReinterpretF32 = 0xbc;
+let kExprI64ReinterpretF64 = 0xbd;
+let kExprF32ReinterpretI32 = 0xbe;
+let kExprF64ReinterpretI64 = 0xbf;
+let kExprI32SExtendI8 = 0xc0;
+let kExprI32SExtendI16 = 0xc1;
+let kExprI64SExtendI8 = 0xc2;
+let kExprI64SExtendI16 = 0xc3;
+let kExprI64SExtendI32 = 0xc4;
+let kExprRefNull = 0xd0;
+let kExprRefIsNull = 0xd1;
+let kExprRefFunc = 0xd2;
+
+// Prefix opcodes
+let kNumericPrefix = 0xfc;
+let kSimdPrefix = 0xfd;
+let kAtomicPrefix = 0xfe;
+
+// Numeric opcodes.
+let kExprMemoryInit = 0x08;
+let kExprDataDrop = 0x09;
+let kExprMemoryCopy = 0x0a;
+let kExprMemoryFill = 0x0b;
+let kExprTableInit = 0x0c;
+let kExprElemDrop = 0x0d;
+let kExprTableCopy = 0x0e;
+
+// Atomic opcodes.
+let kExprAtomicWake = 0x00;
+let kExprI32AtomicWait = 0x01;
+let kExprI64AtomicWait = 0x02;
+let kExprI32AtomicLoad = 0x10;
+let kExprI32AtomicLoad8U = 0x12;
+let kExprI32AtomicLoad16U = 0x13;
+let kExprI32AtomicStore = 0x17;
+let kExprI32AtomicStore8U = 0x19;
+let kExprI32AtomicStore16U = 0x1a;
+let kExprI32AtomicAdd = 0x1e;
+let kExprI32AtomicAdd8U = 0x20;
+let kExprI32AtomicAdd16U = 0x21;
+let kExprI32AtomicSub = 0x25;
+let kExprI32AtomicSub8U = 0x27;
+let kExprI32AtomicSub16U = 0x28;
+let kExprI32AtomicAnd = 0x2c;
+let kExprI32AtomicAnd8U = 0x2e;
+let kExprI32AtomicAnd16U = 0x2f;
+let kExprI32AtomicOr = 0x33;
+let kExprI32AtomicOr8U = 0x35;
+let kExprI32AtomicOr16U = 0x36;
+let kExprI32AtomicXor = 0x3a;
+let kExprI32AtomicXor8U = 0x3c;
+let kExprI32AtomicXor16U = 0x3d;
+let kExprI32AtomicExchange = 0x41;
+let kExprI32AtomicExchange8U = 0x43;
+let kExprI32AtomicExchange16U = 0x44;
+let kExprI32AtomicCompareExchange = 0x48;
+let kExprI32AtomicCompareExchange8U = 0x4a;
+let kExprI32AtomicCompareExchange16U = 0x4b;
+
+let kExprI64AtomicLoad = 0x11;
+let kExprI64AtomicLoad8U = 0x14;
+let kExprI64AtomicLoad16U = 0x15;
+let kExprI64AtomicLoad32U = 0x16;
+let kExprI64AtomicStore = 0x18;
+let kExprI64AtomicStore8U = 0x1b;
+let kExprI64AtomicStore16U = 0x1c;
+let kExprI64AtomicStore32U = 0x1d;
+let kExprI64AtomicAdd = 0x1f;
+let kExprI64AtomicAdd8U = 0x22;
+let kExprI64AtomicAdd16U = 0x23;
+let kExprI64AtomicAdd32U = 0x24;
+let kExprI64AtomicSub = 0x26;
+let kExprI64AtomicSub8U = 0x29;
+let kExprI64AtomicSub16U = 0x2a;
+let kExprI64AtomicSub32U = 0x2b;
+let kExprI64AtomicAnd = 0x2d;
+let kExprI64AtomicAnd8U = 0x30;
+let kExprI64AtomicAnd16U = 0x31;
+let kExprI64AtomicAnd32U = 0x32;
+let kExprI64AtomicOr = 0x34;
+let kExprI64AtomicOr8U = 0x37;
+let kExprI64AtomicOr16U = 0x38;
+let kExprI64AtomicOr32U = 0x39;
+let kExprI64AtomicXor = 0x3b;
+let kExprI64AtomicXor8U = 0x3e;
+let kExprI64AtomicXor16U = 0x3f;
+let kExprI64AtomicXor32U = 0x40;
+let kExprI64AtomicExchange = 0x42;
+let kExprI64AtomicExchange8U = 0x45;
+let kExprI64AtomicExchange16U = 0x46;
+let kExprI64AtomicExchange32U = 0x47;
+let kExprI64AtomicCompareExchange = 0x49
+let kExprI64AtomicCompareExchange8U = 0x4c;
+let kExprI64AtomicCompareExchange16U = 0x4d;
+let kExprI64AtomicCompareExchange32U = 0x4e;
+
+// Simd opcodes.
+let kExprF32x4Min = 0x9e;
+
+let kTrapUnreachable = 0;
+let kTrapMemOutOfBounds = 1;
+let kTrapDivByZero = 2;
+let kTrapDivUnrepresentable = 3;
+let kTrapRemByZero = 4;
+let kTrapFloatUnrepresentable = 5;
+let kTrapFuncInvalid = 6;
+let kTrapFuncSigMismatch = 7;
+let kTrapTypeError = 8;
+let kTrapUnalignedAccess = 9;
+let kTrapDataSegmentDropped = 10;
+let kTrapElemSegmentDropped = 11;
+let kTrapTableOutOfBounds = 12;
+
+let kTrapMsgs = [
+ "unreachable",
+ "memory access out of bounds",
+ "divide by zero",
+ "divide result unrepresentable",
+ "remainder by zero",
+ "float unrepresentable in integer range",
+ "invalid index into function table",
+ "function signature mismatch",
+ "wasm function signature contains illegal type",
+ "operation does not support unaligned accesses",
+ "data segment has been dropped",
+ "element segment has been dropped",
+ "table access out of bounds"
+];
+
+function assertTraps(trap, code) {
+ assertThrows(code, WebAssembly.RuntimeError, kTrapMsgs[trap]);
+}
+
class Binary extends Array {
emit_u8(val) {
this.push(val);
@@ -37,6 +525,18 @@ class Binary extends Array {
}
}
+ emit_u64v(val) {
+ while (true) {
+ let v = val & 0xff;
+ val = val >>> 7;
+ if (val == 0) {
+ this.push(v);
+ break;
+ }
+ this.push(v | 0x80);
+ }
+ }
+
emit_bytes(data) {
for (let i = 0; i < data.length; i++) {
this.push(data[i] & 0xff);
@@ -163,21 +663,37 @@ class WasmGlobalBuilder {
}
}
+class WasmTableBuilder {
+ constructor(module, type, initial_size, max_size) {
+ this.module = module;
+ this.type = type;
+ this.initial_size = initial_size;
+ this.has_max = max_size != undefined;
+ this.max_size = max_size;
+ }
+
+ exportAs(name) {
+ this.module.exports.push({name: name, kind: kExternalTable,
+ index: this.index});
+ return this;
+ }
+}
+
class WasmModuleBuilder {
constructor() {
this.types = [];
this.imports = [];
this.exports = [];
this.globals = [];
+ this.tables = [];
this.exceptions = [];
this.functions = [];
- this.table_length_min = 0;
- this.table_length_max = undefined;
this.element_segments = [];
this.data_segments = [];
this.explicit = [];
this.num_imported_funcs = 0;
this.num_imported_globals = 0;
+ this.num_imported_tables = 0;
this.num_imported_exceptions = 0;
return this;
}
@@ -230,6 +746,16 @@ class WasmModuleBuilder {
return glob;
}
+ addTable(type, initial_size, max_size = undefined) {
+ if (type != kWasmAnyRef && type != kWasmAnyFunc) {
+ throw new Error('Tables must be of type kWasmAnyRef or kWasmAnyFunc');
+ }
+ let table = new WasmTableBuilder(this, type, initial_size, max_size);
+ table.index = this.tables.length + this.num_imported_tables;
+ this.tables.push(table);
+ return table;
+ }
+
addException(type) {
let type_index = (typeof type) == "number" ? type : this.addType(type);
let except_index = this.exceptions.length + this.num_imported_exceptions;
@@ -273,9 +799,13 @@ class WasmModuleBuilder {
}
addImportedTable(module, name, initial, maximum) {
+ if (this.tables.length != 0) {
+ throw new Error('Imported tables must be declared before local ones');
+ }
let o = {module: module, name: name, kind: kExternalTable, initial: initial,
maximum: maximum};
this.imports.push(o);
+ return this.num_imported_tables++;
}
addImportedException(module, name, type) {
@@ -314,15 +844,19 @@ class WasmModuleBuilder {
}
addElementSegment(base, is_global, array, is_import = false) {
+ if (this.tables.length + this.num_imported_tables == 0) {
+ this.addTable(kWasmAnyFunc, 0);
+ }
this.element_segments.push({base: base, is_global: is_global,
array: array, is_active: true});
if (!is_global) {
var length = base + array.length;
- if (length > this.table_length_min && !is_import) {
- this.table_length_min = length;
+ if (!is_import && length > this.tables[0].initial_size) {
+ this.tables[0].initial_size = length;
}
- if (length > this.table_length_max && !is_import) {
- this.table_length_max = length;
+ if (!is_import && this.tables[0].has_max &&
+ length > this.tables[0].max_size) {
+ this.tables[0].max_size = length;
}
}
return this;
@@ -338,12 +872,17 @@ class WasmModuleBuilder {
if (typeof n != 'number')
throw new Error('invalid table (entries have to be numbers): ' + array);
}
- return this.addElementSegment(this.table_length_min, false, array);
+ if (this.tables.length == 0) {
+ this.addTable(kWasmAnyFunc, 0);
+ }
+ return this.addElementSegment(this.tables[0].initial_size, false, array);
}
setTableBounds(min, max = undefined) {
- this.table_length_min = min;
- this.table_length_max = max;
+ if (this.tables.length != 0) {
+ throw new Error("The table bounds of table '0' have already been set.");
+ }
+ this.addTable(kWasmAnyFunc, min, max);
return this;
}
@@ -430,16 +969,16 @@ class WasmModuleBuilder {
}
// Add table section
- if (wasm.table_length_min > 0) {
- if (debug) print("emitting table @ " + binary.length);
+ if (wasm.tables.length > 0) {
+ if (debug) print ("emitting tables @ " + binary.length);
binary.emit_section(kTableSectionCode, section => {
- section.emit_u8(1); // one table entry
- section.emit_u8(kWasmAnyFunctionTypeForm);
- const max = wasm.table_length_max;
- const has_max = max !== undefined;
- section.emit_u8(has_max ? kHasMaximumFlag : 0);
- section.emit_u32v(wasm.table_length_min);
- if (has_max) section.emit_u32v(max);
+ section.emit_u32v(wasm.tables.length);
+ for (let table of wasm.tables) {
+ section.emit_u8(table.type);
+ section.emit_u8(table.has_max);
+ section.emit_u32v(table.initial_size);
+ if (table.has_max) section.emit_u32v(table.max_size);
+ }
});
}
@@ -452,9 +991,9 @@ class WasmModuleBuilder {
const is_shared = wasm.memory.shared !== undefined;
// Emit flags (bit 0: reszeable max, bit 1: shared memory)
if (is_shared) {
- section.emit_u8(has_max ? 3 : 2);
+ section.emit_u8(has_max ? kSharedHasMaximumFlag : 2);
} else {
- section.emit_u8(has_max ? 1 : 0);
+ section.emit_u8(has_max ? kHasMaximumFlag : 0);
}
section.emit_u32v(wasm.memory.min);
if (has_max) section.emit_u32v(wasm.memory.max);
@@ -478,7 +1017,7 @@ class WasmModuleBuilder {
break;
case kWasmI64:
section.emit_u8(kExprI64Const);
- section.emit_u32v(global.init);
+ section.emit_u64v(global.init);
break;
case kWasmF32:
section.emit_u8(kExprF32Const);
@@ -563,6 +1102,7 @@ class WasmModuleBuilder {
for (let init of inits) {
if (init.is_active) {
+ // Active segment.
section.emit_u8(0); // table index / flags
if (init.is_global) {
section.emit_u8(kExprGetGlobal);
@@ -571,12 +1111,25 @@ class WasmModuleBuilder {
}
section.emit_u32v(init.base);
section.emit_u8(kExprEnd);
+ section.emit_u32v(init.array.length);
+ for (let index of init.array) {
+ section.emit_u32v(index);
+ }
} else {
+ // Passive segment.
section.emit_u8(kPassive); // flags
- }
- section.emit_u32v(init.array.length);
- for (let index of init.array) {
- section.emit_u32v(index);
+ section.emit_u8(kWasmAnyFunc);
+ section.emit_u32v(init.array.length);
+ for (let index of init.array) {
+ if (index === null) {
+ section.emit_u8(kExprRefNull);
+ section.emit_u8(kExprEnd);
+ } else {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(index);
+ section.emit_u8(kExprEnd);
+ }
+ }
}
}
});
@@ -617,6 +1170,9 @@ class WasmModuleBuilder {
if (l.anyref_count > 0) {
local_decls.push({count: l.anyref_count, type: kWasmAnyRef});
}
+ if (l.anyfunc_count > 0) {
+ local_decls.push({count: l.anyfunc_count, type: kWasmAnyFunc});
+ }
if (l.except_count > 0) {
local_decls.push({count: l.except_count, type: kWasmExceptRef});
}
@@ -732,6 +1288,10 @@ class WasmModuleBuilder {
return buffer;
}
+ toUint8Array(debug = false) {
+ return new Uint8Array(this.toBuffer(debug));
+ }
+
instantiate(ffi) {
let module = new WebAssembly.Module(this.toBuffer());
let instance = new WebAssembly.Instance(module, ffi);
@@ -747,3 +1307,23 @@ class WasmModuleBuilder {
return new WebAssembly.Module(this.toBuffer(debug));
}
}
+
+function wasmI32Const(val) {
+ let bytes = [kExprI32Const];
+ for (let i = 0; i < 4; ++i) {
+ bytes.push(0x80 | ((val >> (7 * i)) & 0x7f));
+ }
+ bytes.push((val >> (7 * 4)) & 0x7f);
+ return bytes;
+}
+
+function wasmF32Const(f) {
+ f32_view[0] = f;
+ return [kExprF32Const, byte_view[0], byte_view[1], byte_view[2], byte_view[3]];
+}
+
+function wasmF64Const(f) {
+ f64_view[0] = f;
+ return [kExprF64Const, byte_view[0], byte_view[1], byte_view[2], byte_view[3],
+ byte_view[4], byte_view[5], byte_view[6], byte_view[7]];
+}
diff --git a/deps/v8/test/mjsunit/wasm/worker-interpreter.js b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
index 9bc1e1e11c..ccf6d279a0 100644
--- a/deps/v8/test/mjsunit/wasm/worker-interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
@@ -4,7 +4,6 @@
// Flags: --allow-natives-syntax --no-wasm-disable-structured-cloning
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostInterpretedModule() {
diff --git a/deps/v8/test/mjsunit/wasm/worker-module.js b/deps/v8/test/mjsunit/wasm/worker-module.js
index b60b19571b..f626263b25 100644
--- a/deps/v8/test/mjsunit/wasm/worker-module.js
+++ b/deps/v8/test/mjsunit/wasm/worker-module.js
@@ -4,7 +4,6 @@
// Flags: --wasm-shared-engine --no-wasm-disable-structured-cloning
-load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestPostModule() {
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 5582fd2b75..8c30c653cd 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -8,7 +8,7 @@
#include "include/v8.h"
#include "src/frames.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -16,7 +16,7 @@
namespace v8 {
static const char* kHeader =
- "# Copyright 2018 the V8 project authors. All rights reserved.\n"
+ "# Copyright 2019 the V8 project authors. All rights reserved.\n"
"# Use of this source code is governed by a BSD-style license that can\n"
"# be found in the LICENSE file.\n"
"\n"
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.status b/deps/v8/test/mkgrokdump/mkgrokdump.status
index 110cf6d15e..62453ad979 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.status
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.status
@@ -9,4 +9,9 @@
'*': [SKIP],
}], # variant != default or arch != x64 or lite_mode
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/mkgrokdump/testcfg.py b/deps/v8/test/mkgrokdump/testcfg.py
index d8f0380e75..0a20dc9839 100644
--- a/deps/v8/test/mkgrokdump/testcfg.py
+++ b/deps/v8/test/mkgrokdump/testcfg.py
@@ -11,6 +11,12 @@ from testrunner.outproc import mkgrokdump
SHELL = 'mkgrokdump'
+
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ yield SHELL
+
+#TODO(tmrts): refactor the test creation logic to migrate to TestLoader
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
@@ -18,9 +24,8 @@ class TestSuite(testsuite.TestSuite):
v8_path = os.path.dirname(os.path.dirname(os.path.abspath(self.root)))
self.expected_path = os.path.join(v8_path, 'tools', 'v8heapconst.py')
- def ListTests(self):
- test = self._create_test(SHELL)
- return [test]
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index c7dd68db3b..4fef899227 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -1037,4 +1037,9 @@
'*': [SKIP],
}], # variant == no_wasm_traps
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 3727fccef3..6d1a44782d 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -32,54 +32,55 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
from testrunner.outproc import mozilla
-EXCLUDED = ["CVS", ".svn"]
-
+EXCLUDED = [
+ "CVS",
+ ".svn",
+]
+
+FRAMEWORK = [
+ "browser.js",
+ "shell.js",
+ "jsref.js",
+ "template.js",
+]
+
+TEST_DIRS = [
+ "ecma",
+ "ecma_2",
+ "ecma_3",
+ "js1_1",
+ "js1_2",
+ "js1_3",
+ "js1_4",
+ "js1_5",
+]
+
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_files(self):
+ return set(FRAMEWORK)
-FRAMEWORK = """
- browser.js
- shell.js
- jsref.js
- template.js
-""".split()
+ @property
+ def excluded_dirs(self):
+ return set(EXCLUDED)
+ @property
+ def test_dirs(self):
+ return TEST_DIRS
-TEST_DIRS = """
- ecma
- ecma_2
- ecma_3
- js1_1
- js1_2
- js1_3
- js1_4
- js1_5
-""".split()
+ def _to_relpath(self, abspath, _):
+ # TODO: refactor this by setting the test path during the TestCase creation
+ return os.path.relpath(abspath, self.test_root)
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
- self.testroot = os.path.join(self.root, "data")
-
- def ListTests(self):
- tests = []
- for testdir in TEST_DIRS:
- current_root = os.path.join(self.testroot, testdir)
- for dirname, dirs, files in os.walk(current_root):
- for dotted in [x for x in dirs if x.startswith(".")]:
- dirs.remove(dotted)
- for excluded in EXCLUDED:
- if excluded in dirs:
- dirs.remove(excluded)
- dirs.sort()
- files.sort()
- for filename in files:
- if filename.endswith(".js") and not filename in FRAMEWORK:
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.testroot) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- case = self._create_test(testname)
- tests.append(case)
- return tests
+ self.test_root = os.path.join(self.root, "data")
+ self._test_loader.test_root = self.test_root
+
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
@@ -89,22 +90,22 @@ class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
testfilename = self.path + ".js"
- testfilepath = testfilename.split("/")
+ testfilepath = testfilename.split(os.path.sep)
for i in xrange(len(testfilepath)):
- script = os.path.join(self.suite.testroot,
+ script = os.path.join(self.suite.test_root,
reduce(os.path.join, testfilepath[:i], ""),
"shell.js")
if os.path.exists(script):
files.append(script)
- files.append(os.path.join(self.suite.testroot, testfilename))
+ files.append(os.path.join(self.suite.test_root, testfilename))
return files
def _get_suite_flags(self):
return ['--expose-gc']
def _get_source_path(self):
- return os.path.join(self.suite.testroot, self.path + self._get_suffix())
+ return os.path.join(self.suite.test_root, self.path + self._get_suffix())
@property
def output_proc(self):
@@ -117,6 +118,5 @@ class TestCase(testcase.D8TestCase):
return mozilla.OutProc(self.expected_outcomes)
-
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index 43049d46e0..792c416433 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -28,4 +28,9 @@
[
[ALWAYS, {
}], # ALWAYS
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index 11e6135444..0567564278 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -37,6 +37,14 @@ class VariantsGenerator(testsuite.VariantsGenerator):
return self._standard_variant
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ for file in os.listdir(self.suite.root):
+ if file.endswith(".pyt"):
+ yield file[:-4]
+
+
+# TODO(tmrts): refactor the python template parsing then use the TestLoader.
class TestSuite(testsuite.TestSuite):
def _ParsePythonTestTemplates(self, result, filename):
pathname = os.path.join(self.root, filename + ".pyt")
@@ -63,16 +71,20 @@ class TestSuite(testsuite.TestSuite):
def ListTests(self):
result = []
- # Find all .pyt files in this directory.
- filenames = [f[:-4] for f in os.listdir(self.root) if f.endswith(".pyt")]
- filenames.sort()
- for f in filenames:
+ filenames = self._test_loader._list_test_filenames()
+ for f in sorted(filenames):
self._ParsePythonTestTemplates(result, f)
+
+ # TODO: remove after converting to use a full TestLoader
+ self._test_loader.test_count_estimation = len(result)
return result
def _create_test(self, path, source, template_flags):
- return super(TestSuite, self)._create_test(
- path, source=source, template_flags=template_flags)
+ return self._test_loader._create_test(
+ path, self, source=source, template_flags=template_flags)
+
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-src-byteoffset-internal.js b/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-src-byteoffset-internal.js
deleted file mode 100644
index 430dbdf369..0000000000
--- a/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-src-byteoffset-internal.js
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-/*---
-esid: sec-%typedarray%.prototype.set-typedarray-offset
-description: >
- Uses typedArray's internal [[ByteOffset]]
-info: >
- 22.2.3.23.2 %TypedArray%.prototype.set(typedArray [ , offset ] )
- 1. Assert: typedArray has a [[TypedArrayName]] internal slot. If it does not,
- the definition in 22.2.3.23.1 applies.
- ...
- 21. Let srcByteOffset be typedArray.[[ByteOffset]].
- ...
-includes: [testTypedArray.js]
----*/
-
-var getCalls = 0;
-var desc = {
- get: function getLen() {
- getCalls++;
- return 0;
- }
-};
-
-Object.defineProperty(TypedArray.prototype, "byteOffset", desc);
-
-testWithTypedArrayConstructors(function(TA) {
- var sample = new TA(2);
- var src = new TA([42, 43]);
- var differentTA = TA === Uint8Array ? Int8Array : Uint8Array;
- var src2 = new differentTA([42, 43]);
- var src3 = new differentTA(sample.buffer, 0, 2);
-
- Object.defineProperty(TA.prototype, "byteOffset", desc);
- Object.defineProperty(src, "byteOffset", desc);
- Object.defineProperty(src2, "byteOffset", desc);
- Object.defineProperty(src3, "byteOffset", desc);
-
- sample.set(src);
- sample.set(src2);
- sample.set(src3);
-
- assert.sameValue(getCalls, 0, "ignores byteOffset properties");
-});
diff --git a/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-target-byteoffset-internal.js b/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-target-byteoffset-internal.js
deleted file mode 100644
index ab49f66a98..0000000000
--- a/deps/v8/test/test262/local-tests/test/built-ins/TypedArray/prototype/set/typedarray-arg-target-byteoffset-internal.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-/*---
-esid: sec-%typedarray%.prototype.set-typedarray-offset
-description: >
- Uses target's internal [[ArrayLength]]
-info: >
- 22.2.3.23.2 %TypedArray%.prototype.set(typedArray [ , offset ] )
- 1. Assert: typedArray has a [[TypedArrayName]] internal slot. If it does not,
- the definition in 22.2.3.23.1 applies.
- 2. Let target be the this value.
- ...
- 16. Let targetByteOffset be target.[[ByteOffset]].
- ...
-includes: [testTypedArray.js]
----*/
-
-var getCalls = 0;
-var desc = {
- get: function() {
- getCalls++;
- return 0;
- }
-};
-
-Object.defineProperty(TypedArray.prototype, "byteOffset", desc);
-
-testWithTypedArrayConstructors(function(TA) {
- var sample = new TA(2);
- var src = new TA([42, 43]);
- var differentTA = TA === Uint8Array ? Int8Array : Uint8Array;
- var src2 = new differentTA([42, 43]);
- var src3 = new differentTA(sample.buffer, 0, 2);
-
- Object.defineProperty(TA.prototype, "byteOffset", desc);
- Object.defineProperty(sample, "byteOffset", desc);
-
- sample.set(src);
- sample.set(src2);
- sample.set(src3);
-
- assert.sameValue(getCalls, 0, "ignores byteoffset properties");
-});
diff --git a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/fraction-digit-options-read-once.js b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/fraction-digit-options-read-once.js
deleted file mode 100644
index e7e37b8735..0000000000
--- a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/fraction-digit-options-read-once.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// This code is governed by the license found in the LICENSE file.
-
-/*---
-esid: ECMA-402 #sec-setnfdigitoptions
-description: >
- The maximum and minimum fraction digits properties should be read from
- the options bag exactly once from the NumberFormat constructor.
- Regression test for https://bugs.chromium.org/p/v8/issues/detail?id=6015
-include: [assert.js]
----*/
-
-var minCounter = 0;
-var maxCounter = 0;
-new Intl.NumberFormat("en", { get minimumFractionDigits() { minCounter++ },
- get maximumFractionDigits() { maxCounter++ } });
-assert.sameValue(1, minCounter);
-assert.sameValue(1, maxCounter);
diff --git a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js
deleted file mode 100644
index 408694c48c..0000000000
--- a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/prototype/formatToParts/default-parameter.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2017 Josh Wolfe. All rights reserved.
-// This code is governed by the BSD license found in the LICENSE file.
-/*---
-esid: #sec-intl.numberformat.prototype.formattoparts
-description: Intl.NumberFormat.prototype.formatToParts called with no parameters
-info: >
- Intl.NumberFormat.prototype.formatToParts ([ value ])
-
- 3. If value is not provided, let value be undefined.
----*/
-
-var nf = new Intl.NumberFormat();
-
-// Example value: [{"type":"nan","value":"NaN"}]
-var implicit = nf.formatToParts();
-var explicit = nf.formatToParts(undefined);
-
-assert(partsEquals(implicit, explicit),
- "formatToParts() should be equivalent to formatToParts(undefined)");
-
-function partsEquals(parts1, parts2) {
- if (parts1.length !== parts2.length) return false;
- for (var i = 0; i < parts1.length; i++) {
- var part1 = parts1[i];
- var part2 = parts2[i];
- if (part1.type !== part2.type) return false;
- if (part1.value !== part2.value) return false;
- }
- return true;
-}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index d547824b12..a1237a0848 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -84,6 +84,11 @@
'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
'language/statements/variable/binding-resolution': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=8771
+ 'language/computed-property-names/class/static/method-number': [FAIL],
+ 'language/computed-property-names/class/static/method-string': [FAIL],
+ 'language/computed-property-names/class/static/method-symbol': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/detached-buffer': [FAIL],
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/detached-buffer': [FAIL],
@@ -138,9 +143,6 @@
'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4231
- 'language/eval-code/direct/var-env-lower-lex-catch-non-strict': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
@@ -374,9 +376,6 @@
'language/literals/regexp/u-unicode-esc-non-hex': [FAIL_PHASE_ONLY],
'language/literals/regexp/unicode-escape-nls-err': [FAIL_PHASE_ONLY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7829
- 'language/block-scope/syntax/redeclaration/function-declaration-attempt-to-redeclare-with-var-declaration-nested-in-function': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4628
'language/eval-code/direct/non-definable-function-with-function': [FAIL],
'language/eval-code/direct/non-definable-function-with-variable': [FAIL],
@@ -489,80 +488,28 @@
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6776
- 'built-ins/Proxy/ownKeys/return-duplicate-entries-throws': [FAIL],
- 'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7184
'annexB/language/expressions/yield/star-iterable-return-emulates-undefined-throws-when-called': [FAIL],
'annexB/language/statements/for-await-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
'annexB/language/statements/for-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7468
- 'language/statements/class/elements/privatename-not-valid-earlyerr-script-8': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5690
'language/expressions/call/eval-spread': [FAIL],
'language/expressions/call/eval-spread-empty-leading': [FAIL],
'language/expressions/call/eval-spread-empty-trailing': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8717
- 'intl402/Segmenter/constructor/constructor/options-granularity-valid': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-lineBreakStyle-invalid': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-lineBreakStyle-valid': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-order': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-throwing-getters': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-toobject-prototype': [FAIL],
- 'intl402/Segmenter/constructor/constructor/options-valid-combinations': [FAIL],
- 'intl402/Segmenter/iterator/granularity': [FAIL],
- 'intl402/Segmenter/prototype/resolvedOptions/order': [FAIL],
- 'intl402/Segmenter/prototype/resolvedOptions/type-with-lbs': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line-following': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line-following-modes': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line-iterable': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line-next': [FAIL],
- 'intl402/Segmenter/prototype/segment/segment-line-preceding': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8664
+ 'intl402/Collator/missing-unicode-ext-value-defaults-to-true': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7472
'intl402/NumberFormat/currency-digits': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7482
- 'intl402/DateTimeFormat/prototype/resolvedOptions/resolved-locale-with-hc-unicode': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8849
+ 'intl402/Intl/getCanonicalLocales/non-iana-canon': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7483
'annexB/built-ins/Function/createdynfn-html-close-comment-params': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8260
- 'intl402/Locale/constructor-non-iana-canon': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8261
- 'intl402/Locale/constructor-options-language-valid': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8262
- 'intl402/Locale/constructor-parse-twice': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8246
- 'intl402/Locale/constructor-tag': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8243
- 'intl402/Locale/extensions-private': [FAIL],
- 'intl402/Locale/getters-privateuse': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8236
- 'intl402/Locale/likely-subtags': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8242
- 'intl402/Locale/extensions-grandfathered': [FAIL],
- 'intl402/Locale/getters-grandfathered': [FAIL],
- 'intl402/Locale/likely-subtags-grandfathered': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8613
- 'intl402/RelativeTimeFormat/prototype/resolvedOptions/order': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=6705
- 'built-ins/Object/assign/strings-and-symbol-order': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7831
'language/statements/generators/generator-created-after-decl-inst': [FAIL],
'language/expressions/generators/generator-created-after-decl-inst': [FAIL],
@@ -575,15 +522,6 @@
'language/expressions/await/for-await-of-interleaved': ['--harmony-await-optimization'],
'language/expressions/await/async-await-interleaved': ['--harmony-await-optimization'],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8706
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-async-function': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-async-generator': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-class': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-const': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-function': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-generator': [FAIL],
- 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-let' : [FAIL],
-
# https://github.com/tc39/test262/issues/2033
'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall': [SKIP],
'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall-1': [SKIP],
@@ -616,44 +554,37 @@
'language/expressions/prefix-increment/eval': [SKIP],
'language/expressions/prefix-increment/eval-nostrict': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8707
- 'language/line-terminators/invalid-string-ls': [SKIP],
- 'language/line-terminators/invalid-string-ps': [SKIP],
-
- # https://github.com/tc39/proposal-class-fields/issues/215
- 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-bad-reference': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-this': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-bad-reference': [FAIL],
- 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-this': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-bad-reference': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-this': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-bad-reference': [FAIL],
- 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-this': [FAIL],
- 'language/expressions/function/early-errors/invalid-names-call-expression-bad-reference': [FAIL],
- 'language/expressions/function/early-errors/invalid-names-call-expression-this': [FAIL],
- 'language/expressions/function/early-errors/invalid-names-member-expression-bad-reference': [FAIL],
- 'language/expressions/function/early-errors/invalid-names-member-expression-this': [FAIL],
- 'language/statements/function/early-errors/invalid-names-call-expression-bad-reference': [FAIL],
- 'language/statements/function/early-errors/invalid-names-call-expression-this': [FAIL],
- 'language/statements/function/early-errors/invalid-names-member-expression-bad-reference': [FAIL],
- 'language/statements/function/early-errors/invalid-names-member-expression-this': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8850
+ 'language/comments/hashbang/escaped-bang-041': [SKIP],
+ 'language/comments/hashbang/escaped-bang-u0021': [SKIP],
+ 'language/comments/hashbang/escaped-bang-u21': [SKIP],
+ 'language/comments/hashbang/escaped-bang-x21': [SKIP],
+ 'language/comments/hashbang/escaped-hash-043': [SKIP],
+ 'language/comments/hashbang/escaped-hash-u0023': [SKIP],
+ 'language/comments/hashbang/escaped-hash-u23': [SKIP],
+ 'language/comments/hashbang/escaped-hash-x23': [SKIP],
+ 'language/comments/hashbang/escaped-hashbang': [SKIP],
+ 'language/comments/hashbang/eval': [SKIP],
+ 'language/comments/hashbang/eval-indirect': [SKIP],
+ 'language/comments/hashbang/module': [SKIP],
+ 'language/comments/hashbang/multi-line-comment': [SKIP],
+ 'language/comments/hashbang/no-line-separator': [SKIP],
+ 'language/comments/hashbang/not-empty': [SKIP],
+ 'language/comments/hashbang/preceding-directive-prologue': [SKIP],
+ 'language/comments/hashbang/preceding-directive-prologue-sc': [SKIP],
+ 'language/comments/hashbang/preceding-empty-statement': [SKIP],
+ 'language/comments/hashbang/preceding-hashbang': [SKIP],
+ 'language/comments/hashbang/preceding-line-comment': [SKIP],
+ 'language/comments/hashbang/preceding-multi-line-comment': [SKIP],
+ 'language/comments/hashbang/preceding-whitespace': [SKIP],
+ 'language/comments/hashbang/use-strict': [SKIP],
######################## NEEDS INVESTIGATION ###########################
- # These test failures are specific to the intl402 suite and need investigation
- # to be either marked as bugs with issues filed for them or as deliberate
- # incompatibilities if the test cases turn out to be broken or ambiguous.
- # Some of these are related to v8:4361 in being visible side effects from Intl.
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
'built-ins/Atomics/wait/cannot-suspend-throws': [SKIP],
'built-ins/Atomics/wait/undefined-index-defaults-to-zero': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8258
- 'intl402/Locale/constructor-options-language-valid-undefined': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-fraction-digits-precision': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-significant-digits-precision': [FAIL],
-
##################### DELIBERATE INCOMPATIBILITIES #####################
# https://github.com/tc39/ecma262/pull/889
@@ -661,24 +592,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=6538
- # https://bugs.chromium.org/p/v8/issues/detail?id=6541
- 'language/export/escaped-as-export-specifier': [FAIL],
- 'language/export/escaped-from': [FAIL],
- 'language/expressions/object/method-definition/escaped-get': [FAIL],
- 'language/expressions/object/method-definition/escaped-set': [FAIL],
- 'language/import/escaped-as-import-specifier': [FAIL],
- 'language/import/escaped-as-namespace-import': [FAIL],
- 'language/import/escaped-from': [FAIL],
- 'language/statements/for-await-of/escaped-of': [FAIL],
- 'language/statements/for-of/escaped-of': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=6543
- 'language/statements/labeled/value-await-non-module-escaped': [FAIL],
- 'language/statements/labeled/value-yield-non-strict-escaped': [FAIL],
- 'language/expressions/async-arrow-function/escaped-async-line-terminator': [FAIL],
- 'language/expressions/class/class-name-ident-await-escaped': [FAIL],
- 'language/statements/class/class-name-ident-await-escaped': [FAIL],
-
############################ INVALID TESTS #############################
# Test makes unjustified assumptions about the number of calls to SortCompare.
@@ -1336,6 +1249,21 @@
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
}], # asan == True or msan == True or tsan == True
+['variant == interpreted_regexp', {
+ # Call stack exceeded: https://crbug.com/v8/8678
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-digit-class-escape-plus-quantifier-flags-u': [SKIP],
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-whitespace-class-escape-plus-quantifier-flags-u': [SKIP],
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-word-class-escape-plus-quantifier-flags-u': [SKIP],
+}], # variant == interpreted_regexp
+
+##############################################################################
+['variant == jitless', {
+ # https://crbug.com/v8/7777
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-digit-class-escape-plus-quantifier-flags-u': [SKIP],
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-whitespace-class-escape-plus-quantifier-flags-u': [SKIP],
+ 'built-ins/RegExp/CharacterClassEscapes/character-class-non-word-class-escape-plus-quantifier-flags-u': [SKIP],
+}], # variant == jitless
+
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
@@ -1350,4 +1278,9 @@
'intl402/DateTimeFormat/prototype/resolvedOptions/basic': [SKIP],
}], # system == windows
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 6674abbfce..54311e952c 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -46,20 +46,17 @@ FEATURE_FLAGS = {
'class-static-fields-public': '--harmony-class-fields',
'class-fields-private': '--harmony-private-fields',
'class-static-fields-private': '--harmony-private-fields',
- 'Array.prototype.flat': '--harmony-array-flat',
- 'Array.prototype.flatMap': '--harmony-array-flat',
'String.prototype.matchAll': '--harmony-string-matchall',
'Symbol.matchAll': '--harmony-string-matchall',
'numeric-separator-literal': '--harmony-numeric-separator',
- 'Intl.ListFormat': '--harmony-intl-list-format',
'Intl.Locale': '--harmony-locale',
- 'Intl.RelativeTimeFormat': '--harmony-intl-relative-time-format',
'Intl.Segmenter': '--harmony-intl-segmenter',
'Symbol.prototype.description': '--harmony-symbol-description',
'globalThis': '--harmony-global',
'well-formed-json-stringify': '--harmony-json-stringify',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
'Object.fromEntries': '--harmony-object-from-entries',
+ 'hashbang': '--harmony-hashbang',
}
SKIPPED_FEATURES = set(['class-methods-private',
@@ -76,9 +73,6 @@ TEST_262_HARNESS_PATH = ["data", "harness"]
TEST_262_TOOLS_PATH = ["harness", "src"]
TEST_262_LOCAL_TESTS_PATH = ["local-tests", "test"]
-TEST_262_RELPATH_REGEXP = re.compile(
- r'.*[\\/]test[\\/]test262[\\/][^\\/]+[\\/]test[\\/](.*)\.js')
-
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
*TEST_262_TOOLS_PATH))
@@ -106,18 +100,38 @@ class VariantsGenerator(testsuite.VariantsGenerator):
yield (variant, flags + ['--use-strict'], 'strict-%d' % n + phase_var)
-class TestSuite(testsuite.TestSuite):
- # Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
- # In practice, subdir is data or local-tests
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def test_dirs(self):
+ return [
+ self.test_root,
+ os.path.join(self.suite.root, *TEST_262_LOCAL_TESTS_PATH),
+ ]
+ @property
+ def excluded_suffixes(self):
+ return {"_FIXTURE.js"}
+
+ @property
+ def excluded_dirs(self):
+ return {"intl402"} if self.test_config.noi18n else set()
+
+ def _should_filter_by_test(self, test):
+ features = test.test_record.get("features", [])
+ return SKIPPED_FEATURES.intersection(features)
+
+
+class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
- self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
+ self.test_root = os.path.join(self.root, *TEST_262_SUITE_PATH)
+ # TODO: this makes the TestLoader mutable, refactor it.
+ self._test_loader.test_root = self.test_root
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
for f in TEST_262_HARNESS_FILES]
self.harness += [os.path.join(self.root, "harness-adapt.js")]
- self.localtestroot = os.path.join(self.root, *TEST_262_LOCAL_TESTS_PATH)
+ self.local_test_root = os.path.join(self.root, *TEST_262_LOCAL_TESTS_PATH)
self.parse_test_record = self._load_parse_test_record()
def _load_parse_test_record(self):
@@ -135,28 +149,8 @@ class TestSuite(testsuite.TestSuite):
if f:
f.close()
- def ListTests(self):
- testnames = set()
- for dirname, dirs, files in itertools.chain(os.walk(self.testroot),
- os.walk(self.localtestroot)):
- for dotted in [x for x in dirs if x.startswith(".")]:
- dirs.remove(dotted)
- if self.test_config.noi18n and "intl402" in dirs:
- dirs.remove("intl402")
- dirs.sort()
- files.sort()
- for filename in files:
- if not filename.endswith(".js"):
- continue
- if filename.endswith("_FIXTURE.js"):
- continue
- fullpath = os.path.join(dirname, filename)
- relpath = re.match(TEST_262_RELPATH_REGEXP, fullpath).group(1)
- testnames.add(relpath.replace(os.path.sep, "/"))
- cases = map(self._create_test, testnames)
- return [case for case in cases if len(
- SKIPPED_FEATURES.intersection(
- case.test_record.get("features", []))) == 0]
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
@@ -198,11 +192,15 @@ class TestCase(testcase.D8TestCase):
def _fail_phase_reverse(self):
return 'fail-phase-reverse' in self.procid
+ def __needs_harness_agent(self):
+ tokens = self.path.split(os.path.sep)
+ return tokens[:2] == ["built-ins", "Atomics"]
+
def _get_files_params(self):
return (
list(self.suite.harness) +
([os.path.join(self.suite.root, "harness-agent.js")]
- if self.path.startswith('built-ins/Atomics') else []) +
+ if self.__needs_harness_agent() else []) +
([os.path.join(self.suite.root, "harness-adapt-donotevaluate.js")]
if self.fail_phase_only and not self._fail_phase_reverse else []) +
self._get_includes() +
@@ -233,10 +231,10 @@ class TestCase(testcase.D8TestCase):
def _get_source_path(self):
filename = self.path + self._get_suffix()
- path = os.path.join(self.suite.localtestroot, filename)
+ path = os.path.join(self.suite.local_test_root, filename)
if os.path.exists(path):
return path
- return os.path.join(self.suite.testroot, filename)
+ return os.path.join(self.suite.test_root, filename)
@property
def output_proc(self):
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 59b1c3895f..f81552292a 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -30,8 +30,8 @@ namespace test {
}
macro LabelTestHelper3(): never
- labels Label3(String, Smi) {
- goto Label3('foo', 7);
+ labels Label3(Oddball, Smi) {
+ goto Label3(Null, 7);
}
macro TestConstexpr1() {
@@ -74,8 +74,8 @@ namespace test {
try {
LabelTestHelper3() otherwise Label3;
}
- label Label3(str: String, smi: Smi) {
- check(str == 'foo');
+ label Label3(o: Oddball, smi: Smi) {
+ check(o == Null);
check(smi == 7);
return True;
}
@@ -156,7 +156,6 @@ namespace test {
check(GenericMacroTest<Object>(True) == True);
check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
- check((GenericMacroTestWithLabels<Object>(smi0) otherwise Fail) == smi0);
try {
GenericMacroTestWithLabels<Object>(False) otherwise Expected;
}
@@ -413,6 +412,11 @@ namespace test {
label Exit {
check(j == 10);
}
+
+ // Test if we can handle uninitialized values on the stack.
+ let i: Smi;
+ for (let j: Smi = 0; j < 10; ++j) {
+ }
}
macro TestSubtyping(x: Smi) {
@@ -494,6 +498,15 @@ namespace test {
check(UnsafeCast<Smi>(ExampleGenericOverload<Object>(xObject)) == 5);
}
+ macro TestEquality(implicit context: Context)() {
+ const notEqual: bool =
+ AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
+ check(!notEqual);
+ const equal: bool =
+ AllocateHeapNumberWithValue(0.5) == AllocateHeapNumberWithValue(0.5);
+ check(equal);
+ }
+
macro BoolToBranch(x: bool): never
labels Taken, NotTaken {
if (x) {
@@ -633,7 +646,7 @@ namespace test {
macro TestCatch1(implicit context: Context)(): Smi {
let r: Smi = 0;
try {
- ThrowTypeError(context, kInvalidArrayLength);
+ ThrowTypeError(kInvalidArrayLength);
} catch (e) {
r = 1;
return r;
@@ -641,7 +654,7 @@ namespace test {
}
macro TestCatch2Wrapper(implicit context: Context)(): never {
- ThrowTypeError(context, kInvalidArrayLength);
+ ThrowTypeError(kInvalidArrayLength);
}
macro TestCatch2(implicit context: Context)(): Smi {
@@ -656,7 +669,7 @@ namespace test {
macro TestCatch3WrapperWithLabel(implicit context: Context)(): never
labels Abort {
- ThrowTypeError(context, kInvalidArrayLength);
+ ThrowTypeError(kInvalidArrayLength);
}
macro TestCatch3(implicit context: Context)(): Smi {
@@ -714,7 +727,7 @@ namespace test {
}
macro TestNew(implicit context: Context)() {
- const f: JSArray = new JSArray{};
+ const f: JSArray = NewJSArray();
assert(f.IsEmpty());
f.length = 0;
}
@@ -736,19 +749,6 @@ namespace test {
c: int32;
}
- struct TestCustomStructConstructor {
- constructor(x: int32, y: Smi) {
- this.a = x;
- this.c = x;
- this.b = y;
- this.d = y;
- }
- a: int32;
- b: Smi;
- c: int32;
- d: Smi;
- }
-
macro TestStructConstructor(implicit context: Context)() {
// Test default constructor
let a: TestOuter = TestOuter{5, TestInner{6, 7}, 8};
@@ -761,11 +761,63 @@ namespace test {
a.b.SetX(2);
assert(a.b.x == 2);
assert(a.b.GetX() == 2);
- // Test custom constructor
- let w: TestCustomStructConstructor = TestCustomStructConstructor{1, 2};
- assert(w.a == 1);
- assert(w.b == 2);
- assert(w.c == 1);
- assert(w.d == 2);
+ }
+
+ extern class TestClassWithAllTypes extends JSObject {
+ a: int8;
+ b: uint8;
+ b2: uint8;
+ b3: uint8;
+ c: int16;
+ d: uint16;
+ e: int32;
+ f: uint32;
+ g: RawPtr;
+ h: intptr;
+ i: uintptr;
+ }
+
+ macro TestClassWithAllTypesLoadsAndStores(
+ t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
+ v4: uint16) {
+ t.a = v1;
+ t.b = v2;
+ t.c = v3;
+ t.d = v4;
+ t.e = 0;
+ t.f = 0;
+ t.g = r;
+ t.h = 0;
+ t.i = 0;
+ t.a = t.a;
+ t.b = t.b;
+ t.c = t.c;
+ t.d = t.d;
+ t.e = t.e;
+ t.f = t.f;
+ t.g = t.g;
+ t.h = t.h;
+ t.i = t.i;
+ }
+
+ class InternalClass {
+ Flip() labels NotASmi {
+ const tmp = Cast<Smi>(this.b) otherwise NotASmi;
+ this.b = this.a;
+ this.a = tmp;
+ }
+ a: Smi;
+ b: Number;
+ }
+
+ macro NewInternalClass(x: Smi): InternalClass {
+ return new InternalClass{x, x + 1};
+ }
+
+ macro TestInternalClass(implicit context: Context)() {
+ const o = NewInternalClass(5);
+ o.Flip() otherwise unreachable;
+ check(o.a == 6);
+ check(o.b == 5);
}
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 77d503c7d4..2da7849073 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -150,6 +150,7 @@ v8_source_set("unittests_sources") {
"eh-frame-iterator-unittest.cc",
"eh-frame-writer-unittest.cc",
"heap/barrier-unittest.cc",
+ "heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",
"heap/embedder-tracing-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
@@ -198,7 +199,10 @@ v8_source_set("unittests_sources") {
"test-utils.cc",
"test-utils.h",
"torque/earley-parser-unittest.cc",
+ "torque/ls-json-unittest.cc",
+ "torque/ls-message-unittest.cc",
"torque/torque-unittest.cc",
+ "torque/torque-utils-unittest.cc",
"unicode-unittest.cc",
"utils-unittest.cc",
"value-serializer-unittest.cc",
@@ -214,7 +218,6 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
- "zone/segmentpool-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
@@ -286,6 +289,7 @@ v8_source_set("unittests_sources") {
]
deps = [
+ "..:common_test_headers",
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
diff --git a/deps/v8/test/unittests/background-compile-task-unittest.cc b/deps/v8/test/unittests/background-compile-task-unittest.cc
index 5bb6b68285..2577a974fe 100644
--- a/deps/v8/test/unittests/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/background-compile-task-unittest.cc
@@ -18,6 +18,7 @@
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data.h"
#include "src/v8.h"
+#include "src/zone/zone-list-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index 207d5cbdd7..857a9de5de 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -64,8 +64,7 @@ typedef ::testing::Types<signed char, unsigned char,
int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
int64_t, uint64_t, float, double> FunctionalTypes;
-TYPED_TEST_CASE(FunctionalTest, FunctionalTypes);
-
+TYPED_TEST_SUITE(FunctionalTest, FunctionalTypes);
TYPED_TEST(FunctionalTest, EqualToImpliesSameHashCode) {
hash<TypeParam> h;
diff --git a/deps/v8/test/unittests/base/template-utils-unittest.cc b/deps/v8/test/unittests/base/template-utils-unittest.cc
index 42917e0ffc..0819b3de8c 100644
--- a/deps/v8/test/unittests/base/template-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/template-utils-unittest.cc
@@ -32,7 +32,7 @@ TEST(TemplateUtilsTest, MakeArraySimple) {
namespace {
constexpr int doubleIntValue(int i) { return i * 2; }
-}; // namespace
+} // namespace
TEST(TemplateUtilsTest, MakeArrayConstexpr) {
constexpr auto computed_array = base::make_array<3>(doubleIntValue);
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
index 38c14cd96c..6099cd5a59 100644
--- a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -244,9 +244,9 @@ TEST_P(RandomNumberGeneratorTest, NextSampleSlowExcludedMax2) {
}
}
-INSTANTIATE_TEST_CASE_P(RandomSeeds, RandomNumberGeneratorTest,
- ::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
- 1234567890, 987654321, INT_MAX));
+INSTANTIATE_TEST_SUITE_P(RandomSeeds, RandomNumberGeneratorTest,
+ ::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
+ 1234567890, 987654321, INT_MAX));
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 0f918e3a07..1bad7fed10 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -20,6 +20,7 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/v8.h"
+#include "src/zone/zone-list-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -292,7 +293,7 @@ class MockPlatform : public v8::Platform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
UNREACHABLE();
- };
+ }
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
DCHECK(IdleTasksEnabled());
@@ -301,7 +302,7 @@ class MockPlatform : public v8::Platform {
platform_->idle_task_ = task.release();
}
- bool IdleTasksEnabled() override { return true; };
+ bool IdleTasksEnabled() override { return true; }
private:
MockPlatform* platform_;
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 77a1587f0b..95be442f0e 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -530,10 +530,8 @@ TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
- ::testing::ValuesIn(kDPIs));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+ ::testing::ValuesIn(kDPIs));
// -----------------------------------------------------------------------------
// Data processing instructions with overflow.
@@ -1031,10 +1029,8 @@ TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
- ::testing::ValuesIn(kODPIs));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+ ::testing::ValuesIn(kODPIs));
// -----------------------------------------------------------------------------
// Shifts.
@@ -1248,10 +1244,8 @@ TEST_P(InstructionSelectorShiftTest,
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
- ::testing::ValuesIn(kShifts));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShifts));
// -----------------------------------------------------------------------------
// Memory access instructions.
@@ -1400,10 +1394,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
TEST_F(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
TRACED_FORRANGE(int, immediate_shift, 1, 31) {
@@ -1572,11 +1565,9 @@ TEST_P(InstructionSelectorComparisonTest, Word32EqualWithZero) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorComparisonTest,
- ::testing::ValuesIn(kComparisons));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisons));
// -----------------------------------------------------------------------------
// Floating point comparisons.
@@ -1659,11 +1650,9 @@ TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnLeft) {
EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorF32ComparisonTest,
- ::testing::ValuesIn(kF32Comparisons));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorF32ComparisonTest,
+ ::testing::ValuesIn(kF32Comparisons));
namespace {
@@ -1742,11 +1731,9 @@ TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnLeft) {
EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorF64ComparisonTest,
- ::testing::ValuesIn(kF64Comparisons));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorF64ComparisonTest,
+ ::testing::ValuesIn(kF64Comparisons));
// -----------------------------------------------------------------------------
// Floating point arithmetic.
@@ -1774,10 +1761,8 @@ TEST_P(InstructionSelectorFAITest, Parameters) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFAITest,
- ::testing::ValuesIn(kFAIs));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFAITest,
+ ::testing::ValuesIn(kFAIs));
TEST_F(InstructionSelectorTest, Float32Abs) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
@@ -2208,9 +2193,9 @@ TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorFlagSettingTest,
- ::testing::ValuesIn(kFlagSettingInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
// -----------------------------------------------------------------------------
// Miscellaneous.
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index bca04a5cf3..c5f9645766 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -496,10 +496,9 @@ TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
- ::testing::ValuesIn(kLogicalInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
// -----------------------------------------------------------------------------
// Add and Sub instructions.
@@ -651,10 +650,8 @@ TEST_P(InstructionSelectorAddSubTest, SignedExtendHalfword) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
- ::testing::ValuesIn(kAddSubInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
+ ::testing::ValuesIn(kAddSubInstructions));
TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
{
@@ -1010,11 +1007,9 @@ TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorDPFlagSetTest,
- ::testing::ValuesIn(kDPFlagSetInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorDPFlagSetTest,
+ ::testing::ValuesIn(kDPFlagSetInstructions));
TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
@@ -1282,9 +1277,9 @@ TEST_P(InstructionSelectorTestAndBranchTest, TestAndBranch32) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorTestAndBranchTest,
- ::testing::ValuesIn(kTestAndBranchMatchers32));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorTestAndBranchTest,
+ ::testing::ValuesIn(kTestAndBranchMatchers32));
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
@@ -1761,10 +1756,9 @@ TEST_P(InstructionSelectorOvfAddSubTest, RORShift) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorOvfAddSubTest,
- ::testing::ValuesIn(kOvfAddSubInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorOvfAddSubTest,
+ ::testing::ValuesIn(kOvfAddSubInstructions));
TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
@@ -1879,10 +1873,8 @@ TEST_P(InstructionSelectorShiftTest, Immediate) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
- ::testing::ValuesIn(kShiftInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
@@ -1972,10 +1964,8 @@ TEST_P(InstructionSelectorMulDivTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
- ::testing::ValuesIn(kMulDivInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
namespace {
@@ -2080,11 +2070,9 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorIntDPWithIntMulTest,
- ::testing::ValuesIn(kMulDPInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntDPWithIntMulTest,
+ ::testing::ValuesIn(kMulDPInstructions));
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
@@ -2368,10 +2356,9 @@ TEST_P(InstructionSelectorFPArithTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
- ::testing::ValuesIn(kFPArithInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
@@ -2428,10 +2415,8 @@ TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
- ::testing::ValuesIn(kFPCmpInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
// -----------------------------------------------------------------------------
// Conversions.
@@ -2455,10 +2440,9 @@ TEST_P(InstructionSelectorConversionTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorConversionTest,
- ::testing::ValuesIn(kConversionInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
typedef InstructionSelectorTestWithParam<MachInst2>
InstructionSelectorElidedChangeUint32ToUint64Test;
@@ -2477,9 +2461,9 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorElidedChangeUint32ToUint64Test,
- ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
// For each case, make sure the `ChangeUint32ToUint64` node turned into a
@@ -2873,10 +2857,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// Comparison instructions.
@@ -2943,10 +2926,9 @@ TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorComparisonTest,
- ::testing::ValuesIn(kComparisonInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorComparisonTest,
+ ::testing::ValuesIn(kComparisonInstructions));
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
@@ -3705,9 +3687,9 @@ TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorFlagSettingTest,
- ::testing::ValuesIn(kFlagSettingInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
TEST_F(InstructionSelectorTest, TstInvalidImmediate) {
// Make sure we do not generate an invalid immediate for TST.
@@ -3841,10 +3823,9 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorLogicalWithNotRHSTest,
- ::testing::ValuesIn(kLogicalWithNotRHSs));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalWithNotRHSTest,
+ ::testing::ValuesIn(kLogicalWithNotRHSs));
TEST_F(InstructionSelectorTest, Word32BitwiseNotWithParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 59d5dccd06..01400041a8 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -284,7 +284,7 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
EXPECT_EQ(s.IsReference(phi), s.IsReference(param1));
}
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
InstructionSelectorTest, InstructionSelectorPhiTest,
::testing::Values(MachineType::Float64(), MachineType::Int8(),
MachineType::Uint8(), MachineType::Int16(),
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 4d66ded5f1..19e7c6c55f 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -106,10 +106,8 @@ TEST_P(CommonSharedOperatorTest, Properties) {
EXPECT_EQ(sop.properties, op->properties());
}
-
-INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
- ::testing::ValuesIn(kSharedOperators));
-
+INSTANTIATE_TEST_SUITE_P(CommonOperatorTest, CommonSharedOperatorTest,
+ ::testing::ValuesIn(kSharedOperators));
// -----------------------------------------------------------------------------
// Other operators.
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 640526bc90..6ee11be686 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -65,7 +65,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
: TypedGraphTest(3),
broker_(isolate(), zone()),
simplified_(zone()),
- deps_(isolate(), zone()) {}
+ deps_(&broker_, zone()) {}
~ConstantFoldingReducerTest() override = default;
protected:
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 65903506ad..72c82da09e 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -313,11 +313,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// AddressingMode for loads and stores.
@@ -622,10 +620,8 @@ TEST_P(InstructionSelectorMultTest, MultAdd32) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
- ::testing::ValuesIn(kMultParams));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMultTest,
+ ::testing::ValuesIn(kMultParams));
TEST_F(InstructionSelectorTest, Int32MulHigh) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 171658d830..7147b6c1a7 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -21,7 +21,7 @@ namespace compiler {
class JSCallReducerTest : public TypedGraphTest {
public:
JSCallReducerTest()
- : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {
+ : TypedGraphTest(3), javascript_(zone()), deps_(broker(), zone()) {
broker()->SerializeStandardObjects();
}
~JSCallReducerTest() override = default;
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 41f0c180e6..dcb621ced0 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -32,9 +32,8 @@ class JSCreateLoweringTest : public TypedGraphTest {
JSCreateLoweringTest()
: TypedGraphTest(3),
javascript_(zone()),
- deps_(isolate(), zone()),
- handle_scope_(isolate()) {
- }
+ deps_(broker(), zone()),
+ handle_scope_(isolate()) {}
~JSCreateLoweringTest() override = default;
protected:
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 338232b6e0..082e81f27c 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -108,9 +108,8 @@ TEST_P(JSSharedOperatorTest, Properties) {
EXPECT_EQ(sop.properties, op->properties());
}
-
-INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
- ::testing::ValuesIn(kSharedOperators));
+INSTANTIATE_TEST_SUITE_P(JSOperatorTest, JSSharedOperatorTest,
+ ::testing::ValuesIn(kSharedOperators));
} // namespace js_operator_unittest
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 042e7e6bbc..292415fed0 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -42,7 +42,7 @@ TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
Node* effect = graph()->start();
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -68,7 +68,7 @@ TEST_F(LoadEliminationTest, StoreElementAndLoadElement) {
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -95,7 +95,7 @@ TEST_F(LoadEliminationTest, StoreElementAndStoreFieldAndLoadElement) {
Node* control = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -125,7 +125,7 @@ TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase, kPointerSize,
+ FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -152,7 +152,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
Node* value = Parameter(Type::Any(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess access = {kTaggedBase, kPointerSize,
+ FieldAccess access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -180,16 +180,16 @@ TEST_F(LoadEliminationTest, StoreFieldAndKillFields) {
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess access1 = {kTaggedBase, kPointerSize,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier};
+ FieldAccess access1 = {kTaggedBase, kTaggedSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
// Offset that out of field cache size.
- FieldAccess access2 = {kTaggedBase, 2048 * kPointerSize,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier};
+ FieldAccess access2 = {kTaggedBase, 2048 * kTaggedSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, jsgraph(), zone());
@@ -220,7 +220,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
Node* index = Parameter(Type::UnsignedSmall(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess access = {kTaggedBase, kPointerSize,
+ FieldAccess access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -253,7 +253,7 @@ TEST_F(LoadEliminationTest, LoadElementOnTrueBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -288,7 +288,7 @@ TEST_F(LoadEliminationTest, LoadElementOnFalseBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -322,7 +322,7 @@ TEST_F(LoadEliminationTest, LoadFieldOnFalseBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase, kPointerSize,
+ FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -358,7 +358,7 @@ TEST_F(LoadEliminationTest, LoadFieldOnTrueBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase, kPointerSize,
+ FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -394,7 +394,7 @@ TEST_F(LoadEliminationTest, LoadFieldWithTypeMismatch) {
Node* value = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase, kPointerSize,
+ FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Unsigned31(), MachineType::AnyTagged(),
kNoWriteBarrier};
@@ -422,7 +422,7 @@ TEST_F(LoadEliminationTest, LoadElementWithTypeMismatch) {
Node* value = Parameter(Type::Signed32(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
- ElementAccess const access = {kTaggedBase, kPointerSize, Type::Unsigned31(),
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Unsigned31(),
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -446,7 +446,7 @@ TEST_F(LoadEliminationTest, AliasAnalysisForFinishRegion) {
Node* value1 = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase, kPointerSize,
+ FieldAccess const access = {kTaggedBase, kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Signed32(), MachineType::AnyTagged(),
kNoWriteBarrier};
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index cd70b3bc41..e7ff126702 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -315,6 +315,9 @@ const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
#undef OPCODE
};
+// Avoid undefined behavior on signed integer overflow.
+int32_t Shl(int32_t x, int32_t y) { return static_cast<uint32_t>(x) << y; }
+
} // namespace
@@ -591,13 +594,13 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithWord32ShlWithConstant) {
Reduction const r1 = Reduce(graph()->NewNode(
machine()->Word32And(),
graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l)),
- Int32Constant(-1 << k)));
+ Int32Constant(Shl(-1, k))));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
// (-1 << K) & (x << L) => x << L
Reduction const r2 = Reduce(graph()->NewNode(
- machine()->Word32And(), Int32Constant(-1 << k),
+ machine()->Word32And(), Int32Constant(Shl(-1, k)),
graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l))));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
@@ -643,16 +646,16 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithInt32AddAndConstant) {
TRACED_FORRANGE(int32_t, l, 1, 31) {
TRACED_FOREACH(int32_t, k, kInt32Values) {
- if ((k << l) == 0) continue;
+ if (Shl(k, l) == 0) continue;
// (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32And(),
- graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(k << l)),
- Int32Constant(-1 << l)));
+ graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(Shl(k, l))),
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
- IsInt32Constant(k << l)));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))),
+ IsInt32Constant(Shl(k, l))));
}
Node* s1 = graph()->NewNode(machine()->Word32Shl(), p1, Int32Constant(l));
@@ -660,18 +663,18 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithInt32AddAndConstant) {
// (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
Reduction const r1 = Reduce(graph()->NewNode(
machine()->Word32And(), graph()->NewNode(machine()->Int32Add(), s1, p0),
- Int32Constant(-1 << l)));
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)), s1));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))), s1));
// (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
Reduction const r2 = Reduce(graph()->NewNode(
machine()->Word32And(), graph()->NewNode(machine()->Int32Add(), p0, s1),
- Int32Constant(-1 << l)));
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)), s1));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))), s1));
}
}
@@ -681,23 +684,23 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithInt32MulAndConstant) {
TRACED_FORRANGE(int32_t, l, 1, 31) {
TRACED_FOREACH(int32_t, k, kInt32Values) {
- if ((k << l) == 0) continue;
+ if (Shl(k, l) == 0) continue;
// (x * (K << L)) & (-1 << L) => x * (K << L)
Reduction const r1 = Reduce(graph()->NewNode(
machine()->Word32And(),
- graph()->NewNode(machine()->Int32Mul(), p0, Int32Constant(k << l)),
- Int32Constant(-1 << l)));
+ graph()->NewNode(machine()->Int32Mul(), p0, Int32Constant(Shl(k, l))),
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r1.Changed());
- EXPECT_THAT(r1.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+ EXPECT_THAT(r1.replacement(), IsInt32Mul(p0, IsInt32Constant(Shl(k, l))));
// ((K << L) * x) & (-1 << L) => x * (K << L)
Reduction const r2 = Reduce(graph()->NewNode(
machine()->Word32And(),
- graph()->NewNode(machine()->Int32Mul(), Int32Constant(k << l), p0),
- Int32Constant(-1 << l)));
+ graph()->NewNode(machine()->Int32Mul(), Int32Constant(Shl(k, l)), p0),
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+ EXPECT_THAT(r2.replacement(), IsInt32Mul(p0, IsInt32Constant(Shl(k, l))));
}
}
}
@@ -710,31 +713,31 @@ TEST_F(MachineOperatorReducerTest,
TRACED_FORRANGE(int32_t, l, 1, 31) {
TRACED_FOREACH(int32_t, k, kInt32Values) {
- if ((k << l) == 0) continue;
+ if (Shl(k, l) == 0) continue;
// (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
Reduction const r1 = Reduce(graph()->NewNode(
machine()->Word32And(),
graph()->NewNode(machine()->Int32Add(),
graph()->NewNode(machine()->Int32Mul(), p1,
- Int32Constant(k << l)),
+ Int32Constant(Shl(k, l))),
p0),
- Int32Constant(-1 << l)));
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
- IsInt32Mul(p1, IsInt32Constant(k << l))));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))),
+ IsInt32Mul(p1, IsInt32Constant(Shl(k, l)))));
// (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
Reduction const r2 = Reduce(graph()->NewNode(
machine()->Word32And(),
graph()->NewNode(machine()->Int32Add(), p0,
graph()->NewNode(machine()->Int32Mul(), p1,
- Int32Constant(k << l))),
- Int32Constant(-1 << l)));
+ Int32Constant(Shl(k, l)))),
+ Int32Constant(Shl(-1, l))));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
- IsInt32Mul(p1, IsInt32Constant(k << l))));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))),
+ IsInt32Mul(p1, IsInt32Constant(Shl(k, l)))));
}
}
}
@@ -1012,7 +1015,7 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Sar) {
Int32Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ int32_t m = static_cast<int32_t>(~((1U << x) - 1U));
EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
}
}
@@ -1023,19 +1026,19 @@ TEST_F(MachineOperatorReducerTest,
Node* const p0 = Parameter(0);
TRACED_FOREACH(int32_t, k, kInt32Values) {
TRACED_FORRANGE(int32_t, l, 1, 31) {
- if ((k << l) == 0) continue;
+ if (Shl(k, l) == 0) continue;
// (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Shl(),
graph()->NewNode(machine()->Word32Sar(),
graph()->NewNode(machine()->Int32Add(), p0,
- Int32Constant(k << l)),
+ Int32Constant(Shl(k, l))),
Int32Constant(l)),
Int32Constant(l)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
- IsInt32Constant(k << l)));
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(Shl(-1, l))),
+ IsInt32Constant(Shl(k, l))));
}
}
}
@@ -1050,7 +1053,7 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
Int32Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- int32_t m = bit_cast<int32_t>(~((1U << x) - 1U));
+ int32_t m = static_cast<int32_t>(~((1U << x) - 1U));
EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
}
}
@@ -1134,10 +1137,9 @@ TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
IsInt32Constant(shift)));
}
TRACED_FORRANGE(int32_t, shift, 2, 31) {
- Reduction const r = Reduce(graph()->NewNode(
- machine()->Int32Div(), p0,
- Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
- graph()->start()));
+ Reduction const r = Reduce(graph()->NewNode(machine()->Int32Div(), p0,
+ Int32Constant(Shl(-1, shift)),
+ graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -1220,7 +1222,7 @@ TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
Uint32Constant(1u << shift), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsWord32Shr(p0, IsInt32Constant(bit_cast<int32_t>(shift))));
+ IsWord32Shr(p0, IsInt32Constant(static_cast<int32_t>(shift))));
}
}
@@ -1296,11 +1298,10 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
graph()->start())))));
}
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- Reduction const r = Reduce(graph()->NewNode(
- machine()->Int32Mod(), p0,
- Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
- graph()->start()));
- int32_t const mask = bit_cast<int32_t, uint32_t>((1U << shift) - 1);
+ Reduction const r = Reduce(graph()->NewNode(machine()->Int32Mod(), p0,
+ Int32Constant(Shl(-1, shift)),
+ graph()->start()));
+ int32_t const mask = static_cast<int32_t>((1U << shift) - 1U);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -1378,7 +1379,7 @@ TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsWord32And(p0, IsInt32Constant(
- bit_cast<int32_t>((1u << shift) - 1u))));
+ static_cast<int32_t>((1u << shift) - 1u))));
}
}
@@ -1676,7 +1677,7 @@ TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsUint32LessThan(
- p0, IsInt32Constant(bit_cast<int32_t>(limit << shift))));
+ p0, IsInt32Constant(static_cast<int32_t>(limit << shift))));
}
}
@@ -1958,8 +1959,9 @@ TEST_F(MachineOperatorReducerTest, Float64PowWithConstant) {
Reduction const r = Reduce(graph()->NewNode(
machine()->Float64Pow(), Float64Constant(x), Float64Constant(y)));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFloat64Constant(NanSensitiveDoubleEq(Pow(x, y))));
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::pow(x, y))));
}
}
}
@@ -2225,7 +2227,7 @@ TEST_F(MachineOperatorReducerTest, Float64RoundDownWithConstant) {
Reduction r = Reduce(graph()->NewNode(
machine()->Float64RoundDown().placeholder(), Float64Constant(x)));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Constant(Floor(x)));
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(std::floor(x)));
}
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 9eddb1d311..c4a86afffd 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -89,13 +89,11 @@ TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
EXPECT_EQ(GetParam(), LoadRepresentationOf(machine.Load(GetParam())));
}
-
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
MachineOperatorTest, MachineLoadOperatorTest,
::testing::Combine(::testing::ValuesIn(kMachineReps),
::testing::ValuesIn(kMachineTypesForAccess)));
-
// -----------------------------------------------------------------------------
// Store operator.
@@ -149,8 +147,7 @@ TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
EXPECT_EQ(GetParam(), StoreRepresentationOf(machine.Store(GetParam())));
}
-
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
MachineOperatorTest, MachineStoreOperatorTest,
::testing::Combine(
::testing::ValuesIn(kMachineReps),
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 83edb6a21e..09a897a54e 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -292,9 +292,8 @@ TEST_P(InstructionSelectorFPCmpTest, Parameter) {
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
- ::testing::ValuesIn(kFPCmpInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
// ----------------------------------------------------------------------------
// Arithmetic compare instructions integers.
@@ -316,10 +315,8 @@ TEST_P(InstructionSelectorCmpTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
- ::testing::ValuesIn(kCmpInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
// ----------------------------------------------------------------------------
// Shift instructions.
@@ -347,10 +344,8 @@ TEST_P(InstructionSelectorShiftTest, Immediate) {
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
- ::testing::ValuesIn(kShiftInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
// The available shift operand range is `0 <= imm < 32`, but we also test
@@ -464,10 +459,9 @@ TEST_P(InstructionSelectorLogicalTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
- ::testing::ValuesIn(kLogicalInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
{
@@ -601,10 +595,8 @@ TEST_P(InstructionSelectorMulDivTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
- ::testing::ValuesIn(kMulDivInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
// ----------------------------------------------------------------------------
// MOD instructions.
@@ -626,10 +618,8 @@ TEST_P(InstructionSelectorModTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
- ::testing::ValuesIn(kModInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
// ----------------------------------------------------------------------------
// Floating point instructions.
@@ -651,10 +641,9 @@ TEST_P(InstructionSelectorFPArithTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
- ::testing::ValuesIn(kFPArithInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
// ----------------------------------------------------------------------------
// Integer arithmetic.
@@ -677,11 +666,9 @@ TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorIntArithTwoTest,
- ::testing::ValuesIn(kAddSubInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
// ----------------------------------------------------------------------------
// One node.
@@ -704,11 +691,9 @@ TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorIntArithOneTest,
- ::testing::ValuesIn(kAddSubOneInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
// ----------------------------------------------------------------------------
// Conversions.
@@ -730,11 +715,9 @@ TEST_P(InstructionSelectorConversionTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorConversionTest,
- ::testing::ValuesIn(kConversionInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
typedef InstructionSelectorTestWithParam<Conversion>
CombineChangeFloat64ToInt32WithRoundFloat64;
@@ -753,10 +736,9 @@ TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- CombineChangeFloat64ToInt32WithRoundFloat64,
- ::testing::ValuesIn(kFloat64RoundInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
typedef InstructionSelectorTestWithParam<Conversion>
CombineChangeFloat32ToInt32WithRoundFloat32;
@@ -776,10 +758,9 @@ TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- CombineChangeFloat32ToInt32WithRoundFloat32,
- ::testing::ValuesIn(kFloat32RoundInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
{
@@ -1035,11 +1016,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
// ----------------------------------------------------------------------------
// Load immediate.
@@ -1112,9 +1091,9 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessImmTest,
- ::testing::ValuesIn(kMemoryAccessesImm));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
InstructionSelectorMemoryAccessUnalignedImmTest;
@@ -1143,9 +1122,9 @@ TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessUnalignedImmTest,
- ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessUnalignedImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
@@ -1190,11 +1169,9 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
}
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessImmMoreThan16bitTest,
- ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
// ----------------------------------------------------------------------------
// kMipsTst testing.
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index c6c1ff3ee8..cd73fe3c9b 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -316,8 +316,8 @@ TEST_P(InstructionSelectorFPCmpTest, Parameter) {
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
- ::testing::ValuesIn(kFPCmpInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
// ----------------------------------------------------------------------------
// Arithmetic compare instructions integers
@@ -367,8 +367,8 @@ TEST_P(InstructionSelectorCmpTest, Parameter) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
- ::testing::ValuesIn(kCmpInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
// ----------------------------------------------------------------------------
// Shift instructions.
@@ -393,8 +393,8 @@ TEST_P(InstructionSelectorShiftTest, Immediate) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
- ::testing::ValuesIn(kShiftInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
// The available shift operand range is `0 <= imm < 32`, but we also test
@@ -549,9 +549,9 @@ TEST_P(InstructionSelectorLogicalTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
- ::testing::ValuesIn(kLogicalInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
{
@@ -824,8 +824,8 @@ TEST_P(InstructionSelectorMulDivTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
- ::testing::ValuesIn(kMulDivInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
// ----------------------------------------------------------------------------
// MOD instructions.
@@ -844,8 +844,8 @@ TEST_P(InstructionSelectorModTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
- ::testing::ValuesIn(kModInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
// ----------------------------------------------------------------------------
// Floating point instructions.
@@ -864,8 +864,9 @@ TEST_P(InstructionSelectorFPArithTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
- ::testing::ValuesIn(kFPArithInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
// ----------------------------------------------------------------------------
// Integer arithmetic
// ----------------------------------------------------------------------------
@@ -884,10 +885,9 @@ TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorIntArithTwoTest,
- ::testing::ValuesIn(kAddSubInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
// ----------------------------------------------------------------------------
// One node.
@@ -909,9 +909,9 @@ TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorIntArithOneTest,
- ::testing::ValuesIn(kAddSubOneInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
// ----------------------------------------------------------------------------
// Conversions.
// ----------------------------------------------------------------------------
@@ -929,9 +929,9 @@ TEST_P(InstructionSelectorConversionTest, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorConversionTest,
- ::testing::ValuesIn(kConversionInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
{
@@ -975,9 +975,9 @@ TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- CombineChangeFloat64ToInt32WithRoundFloat64,
- ::testing::ValuesIn(kFloat64RoundInstructions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
typedef InstructionSelectorTestWithParam<Conversion>
CombineChangeFloat32ToInt32WithRoundFloat32;
@@ -997,10 +997,9 @@ TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- CombineChangeFloat32ToInt32WithRoundFloat32,
- ::testing::ValuesIn(kFloat32RoundInstructions));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
{
@@ -1172,9 +1171,9 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorElidedChangeUint32ToUint64Test,
- ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
// For each case, make sure the `ChangeUint32ToUint64` node turned into a
@@ -1477,10 +1476,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
// ----------------------------------------------------------------------------
// Load immediate.
@@ -1552,9 +1550,9 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessImmTest,
- ::testing::ValuesIn(kMemoryAccessesImm));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
InstructionSelectorMemoryAccessUnalignedImmTest;
@@ -1583,9 +1581,9 @@ TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessUnalignedImmTest,
- ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessUnalignedImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
@@ -1628,10 +1626,9 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
}
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessImmMoreThan16bitTest,
- ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
// ----------------------------------------------------------------------------
// kMips64Cmp with zero testing.
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index f23265e8e4..cedbfb9daf 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1946,7 +1946,7 @@ Matcher<Node*> IsTailCall(
IrOpcode::k##opcode, hint_matcher, lhs_matcher, rhs_matcher, \
effect_matcher, control_matcher)); \
}
-SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DEFINE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DEFINE_SPECULATIVE_BINOP_MATCHER)
DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
DEFINE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 96bdbdf3be..2fdfb99e48 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -212,7 +212,7 @@ Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher, \
const Matcher<Node*>& effect_matcher, \
const Matcher<Node*>& control_matcher);
-SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_SPECULATIVE_BINOP_MATCHER);
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_SPECULATIVE_BINOP_MATCHER)
DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberEqual)
DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThan)
DECLARE_SPECULATIVE_BINOP_MATCHER(SpeculativeNumberLessThanOrEqual)
@@ -499,83 +499,83 @@ Matcher<Node*> IsSpeculativeToNumber(const Matcher<Node*>& value_matcher);
// Helpers
static inline Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
- return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
- : IsInt32Constant(static_cast<int32_t>(value));
+ return kSystemPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
+ : IsInt32Constant(static_cast<int32_t>(value));
}
static inline Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
- : IsInt32Add(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
+ : IsInt32Add(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
- : IsInt32Sub(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
+ : IsInt32Sub(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrMul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
- : IsInt32Mul(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsInt64Mul(lhs_matcher, rhs_matcher)
+ : IsInt32Mul(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsIntPtrDiv(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsInt64Div(lhs_matcher, rhs_matcher)
- : IsInt32Div(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsInt64Div(lhs_matcher, rhs_matcher)
+ : IsInt32Div(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
- : IsWord32Shl(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
+ : IsWord32Shl(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordShr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Shr(lhs_matcher, rhs_matcher)
- : IsWord32Shr(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64Shr(lhs_matcher, rhs_matcher)
+ : IsWord32Shr(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
- : IsWord32Sar(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
+ : IsWord32Sar(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordAnd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64And(lhs_matcher, rhs_matcher)
- : IsWord32And(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64And(lhs_matcher, rhs_matcher)
+ : IsWord32And(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
- : IsWord32Or(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
+ : IsWord32Or(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsWordXor(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
- return kPointerSize == 8 ? IsWord64Xor(lhs_matcher, rhs_matcher)
- : IsWord32Xor(lhs_matcher, rhs_matcher);
+ return kSystemPointerSize == 8 ? IsWord64Xor(lhs_matcher, rhs_matcher)
+ : IsWord32Xor(lhs_matcher, rhs_matcher);
}
static inline Matcher<Node*> IsChangeInt32ToIntPtr(
const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
+ return kSystemPointerSize == 8 ? IsChangeInt32ToInt64(matcher) : matcher;
}
static inline Matcher<Node*> IsChangeUint32ToWord(
const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
+ return kSystemPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
}
static inline Matcher<Node*> IsTruncateIntPtrToInt32(
const Matcher<Node*>& matcher) {
- return kPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
+ return kSystemPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/opcodes-unittest.cc b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
index a0e67ecb27..5036ade61e 100644
--- a/deps/v8/test/unittests/compiler/opcodes-unittest.cc
+++ b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
@@ -78,9 +78,6 @@ bool IsComparisonOpcode(IrOpcode::Value opcode) {
}
}
-
-const IrOpcode::Value kInvalidOpcode = static_cast<IrOpcode::Value>(123456789);
-
char const* const kMnemonics[] = {
#define OPCODE(Opcode) #Opcode,
ALL_OP_LIST(OPCODE)
@@ -96,42 +93,36 @@ const IrOpcode::Value kOpcodes[] = {
} // namespace
TEST(IrOpcodeTest, IsCommonOpcode) {
- EXPECT_FALSE(IrOpcode::IsCommonOpcode(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_EQ(IsCommonOpcode(opcode), IrOpcode::IsCommonOpcode(opcode));
}
}
TEST(IrOpcodeTest, IsControlOpcode) {
- EXPECT_FALSE(IrOpcode::IsControlOpcode(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_EQ(IsControlOpcode(opcode), IrOpcode::IsControlOpcode(opcode));
}
}
TEST(IrOpcodeTest, IsJsOpcode) {
- EXPECT_FALSE(IrOpcode::IsJsOpcode(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_EQ(IsJsOpcode(opcode), IrOpcode::IsJsOpcode(opcode));
}
}
TEST(IrOpcodeTest, IsConstantOpcode) {
- EXPECT_FALSE(IrOpcode::IsConstantOpcode(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_EQ(IsConstantOpcode(opcode), IrOpcode::IsConstantOpcode(opcode));
}
}
TEST(IrOpcodeTest, IsComparisonOpcode) {
- EXPECT_FALSE(IrOpcode::IsComparisonOpcode(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_EQ(IsComparisonOpcode(opcode), IrOpcode::IsComparisonOpcode(opcode));
}
}
TEST(IrOpcodeTest, Mnemonic) {
- EXPECT_STREQ("UnknownOpcode", IrOpcode::Mnemonic(kInvalidOpcode));
TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
EXPECT_STREQ(kMnemonics[opcode], IrOpcode::Mnemonic(opcode));
}
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 079cc4b99a..a9cf3260a2 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -668,16 +668,18 @@ TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* check1 = effect =
- graph()->NewNode(simplified()->CheckedUint32Bounds(feedback1), index,
- length, effect, control);
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedUint32Bounds(
+ feedback1, CheckBoundsParameters::kDeoptOnOutOfBounds),
+ index, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
- Node* check2 = effect =
- graph()->NewNode(simplified()->CheckedUint32Bounds(feedback2), index,
- length, effect, control);
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedUint32Bounds(
+ feedback2, CheckBoundsParameters::kDeoptOnOutOfBounds),
+ index, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
diff --git a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
index d77f424ef7..02ce2bcdac 100644
--- a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
@@ -755,7 +755,7 @@ TEST_P(SlotConstraintTest, SlotConstraint) {
Allocate();
}
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
RegisterAllocatorTest, SlotConstraintTest,
::testing::Combine(::testing::ValuesIn(kParameterTypes),
::testing::Range(0, SlotConstraintTest::kMaxVariant)));
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 239f19ff93..280afef4c9 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -115,9 +115,8 @@ TEST_P(SimplifiedPureOperatorTest, Properties) {
EXPECT_EQ(pop.properties, op->properties() & pop.properties);
}
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
- ::testing::ValuesIn(kPureOperators));
-
+INSTANTIATE_TEST_SUITE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
+ ::testing::ValuesIn(kPureOperators));
// -----------------------------------------------------------------------------
@@ -213,10 +212,9 @@ TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
EXPECT_EQ(0, op->ControlOutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
- SimplifiedElementAccessOperatorTest,
- ::testing::ValuesIn(kElementAccesses));
+INSTANTIATE_TEST_SUITE_P(SimplifiedOperatorTest,
+ SimplifiedElementAccessOperatorTest,
+ ::testing::ValuesIn(kElementAccesses));
} // namespace simplified_operator_unittest
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index da1f3941f0..c8aaafb6dc 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -27,7 +27,7 @@ namespace typed_optimization_unittest {
class TypedOptimizationTest : public TypedGraphTest {
public:
TypedOptimizationTest()
- : TypedGraphTest(3), simplified_(zone()), deps_(isolate(), zone()) {}
+ : TypedGraphTest(3), simplified_(zone()), deps_(broker(), zone()) {}
~TypedOptimizationTest() override = default;
protected:
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 5954dbc638..5d712bd220 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -10,7 +10,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
-#include "test/cctest/types-fuzz.h"
+#include "test/common/types-fuzz.h"
#include "test/unittests/compiler/graph-unittest.h"
namespace v8 {
@@ -303,7 +303,9 @@ class TyperTest : public TypedGraphTest {
namespace {
-int32_t shift_left(int32_t x, int32_t y) { return x << (y & 0x1F); }
+int32_t shift_left(int32_t x, int32_t y) {
+ return static_cast<uint32_t>(x) << (y & 0x1F);
+}
int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1F); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
@@ -506,7 +508,7 @@ TEST_MONOTONICITY(ToBoolean)
TestBinaryMonotonicity(simplified_.name(), Type::Number(), \
Type::Number()); \
}
-SIMPLIFIED_NUMBER_BINOP_LIST(TEST_MONOTONICITY);
+SIMPLIFIED_NUMBER_BINOP_LIST(TEST_MONOTONICITY)
#undef TEST_MONOTONICITY
// SIMPLIFIED BINOPs without hint, without input restriction
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index f174b92731..7ba4c5ae4b 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -103,9 +103,9 @@ TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) {
EXPECT_EQ(extension.expected_opcode, s[0]->arch_opcode());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorChangeInt32ToInt64Test,
- ::testing::ValuesIn(kLoadWithToInt64Extensions));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorChangeInt32ToInt64Test,
+ ::testing::ValuesIn(kLoadWithToInt64Extensions));
// -----------------------------------------------------------------------------
// Loads and stores
@@ -171,11 +171,9 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
EXPECT_EQ(0U, s[0]->OutputCount());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorMemoryAccessTest,
- ::testing::ValuesIn(kMemoryAccesses));
-
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// ChangeUint32ToUint64.
@@ -237,10 +235,9 @@ TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
ASSERT_EQ(1U, s.size());
}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorChangeUint32ToUint64Test,
- ::testing::ValuesIn(kWord32BinaryOperations));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kWord32BinaryOperations));
// -----------------------------------------------------------------------------
// CanElideChangeUint32ToUint64
@@ -320,9 +317,9 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorElidedChangeUint32ToUint64Test,
- ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
// ChangeUint32ToUint64AfterLoad
TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
index 0846fda2f4..52501b462e 100644
--- a/deps/v8/test/unittests/eh-frame-writer-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -43,7 +43,7 @@ TEST_F(EhFrameWriterTest, Alignment) {
ASSERT_EQ(0, EhFrameConstants::kEhFrameTerminatorSize % 4);
EXPECT_EQ(0, (iterator.GetBufferSize() - EhFrameConstants::kEhFrameHdrSize -
EhFrameConstants::kEhFrameTerminatorSize) %
- kPointerSize);
+ kSystemPointerSize);
}
TEST_F(EhFrameWriterTest, FDEHeader) {
diff --git a/deps/v8/test/unittests/heap/bitmap-test-utils.h b/deps/v8/test/unittests/heap/bitmap-test-utils.h
new file mode 100644
index 0000000000..b709263968
--- /dev/null
+++ b/deps/v8/test/unittests/heap/bitmap-test-utils.h
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
+#define V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class TestWithBitmap : public ::testing::Test {
+ public:
+ TestWithBitmap() : memory_(new uint8_t[Bitmap::kSize]) {
+ memset(memory_, 0, Bitmap::kSize);
+ }
+
+ ~TestWithBitmap() override { delete[] memory_; }
+
+ T* bitmap() { return reinterpret_cast<T*>(memory_); }
+ uint8_t* raw_bitmap() { return memory_; }
+
+ private:
+ uint8_t* memory_;
+};
+
+using BitmapTypes = ::testing::Types<ConcurrentBitmap<AccessMode::NON_ATOMIC>,
+ ConcurrentBitmap<AccessMode::ATOMIC>>;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
diff --git a/deps/v8/test/unittests/heap/bitmap-unittest.cc b/deps/v8/test/unittests/heap/bitmap-unittest.cc
index 1ecab4dd72..393f5ea303 100644
--- a/deps/v8/test/unittests/heap/bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/bitmap-unittest.cc
@@ -3,40 +3,26 @@
// found in the LICENSE file.
#include "src/heap/spaces.h"
+#include "test/unittests/heap/bitmap-test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
-namespace {
+namespace v8 {
+namespace internal {
-using v8::internal::Bitmap;
+const uint32_t kBlackCell = 0xAAAAAAAA;
+const uint32_t kWhiteCell = 0x00000000;
+const uint32_t kBlackByte = 0xAA;
+const uint32_t kWhiteByte = 0x00;
-class BitmapTest : public ::testing::Test {
- public:
- static const uint32_t kBlackCell;
- static const uint32_t kWhiteCell;
- static const uint32_t kBlackByte;
- static const uint32_t kWhiteByte;
+template <typename T>
+using BitmapTest = TestWithBitmap<T>;
- BitmapTest() : memory_(new uint8_t[Bitmap::kSize]) {
- memset(memory_, 0, Bitmap::kSize);
- }
-
- ~BitmapTest() override { delete[] memory_; }
-
- Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
- uint8_t* raw_bitmap() { return memory_; }
-
- private:
- uint8_t* memory_;
-};
+TYPED_TEST_SUITE(BitmapTest, BitmapTypes);
+using NonAtomicBitmapTest =
+ TestWithBitmap<ConcurrentBitmap<AccessMode::NON_ATOMIC>>;
-const uint32_t BitmapTest::kBlackCell = 0xAAAAAAAA;
-const uint32_t BitmapTest::kWhiteCell = 0x00000000;
-const uint32_t BitmapTest::kBlackByte = 0xAA;
-const uint32_t BitmapTest::kWhiteByte = 0x00;
-
-
-TEST_F(BitmapTest, IsZeroInitialized) {
+TEST_F(NonAtomicBitmapTest, IsZeroInitialized) {
// We require all tests to start from a zero-initialized bitmap. Manually
// verify this invariant here.
for (size_t i = 0; i < Bitmap::kSize; i++) {
@@ -44,9 +30,8 @@ TEST_F(BitmapTest, IsZeroInitialized) {
}
}
-
-TEST_F(BitmapTest, Cells) {
- Bitmap* bm = bitmap();
+TEST_F(NonAtomicBitmapTest, Cells) {
+ auto bm = bitmap();
bm->cells()[1] = kBlackCell;
uint8_t* raw = raw_bitmap();
int second_cell_base = Bitmap::kBytesPerCell;
@@ -55,8 +40,7 @@ TEST_F(BitmapTest, Cells) {
}
}
-
-TEST_F(BitmapTest, CellsCount) {
+TEST_F(NonAtomicBitmapTest, CellsCount) {
int last_cell_index = bitmap()->CellsCount() - 1;
bitmap()->cells()[last_cell_index] = kBlackCell;
// Manually verify on raw memory.
@@ -71,17 +55,34 @@ TEST_F(BitmapTest, CellsCount) {
}
}
-
-TEST_F(BitmapTest, IsClean) {
- Bitmap* bm = bitmap();
+TEST_F(NonAtomicBitmapTest, IsClean) {
+ auto bm = bitmap();
EXPECT_TRUE(bm->IsClean());
bm->cells()[0] = kBlackCell;
EXPECT_FALSE(bm->IsClean());
}
+TYPED_TEST(BitmapTest, Clear) {
+ auto bm = this->bitmap();
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ this->raw_bitmap()[i] = 0xFFu;
+ }
+ bm->Clear();
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ EXPECT_EQ(this->raw_bitmap()[i], 0);
+ }
+}
+
+TYPED_TEST(BitmapTest, MarkAllBits) {
+ auto bm = this->bitmap();
+ bm->MarkAllBits();
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ EXPECT_EQ(this->raw_bitmap()[i], 0xFF);
+ }
+}
-TEST_F(BitmapTest, ClearRange1) {
- Bitmap* bm = bitmap();
+TYPED_TEST(BitmapTest, ClearRange1) {
+ auto bm = this->bitmap();
bm->cells()[0] = kBlackCell;
bm->cells()[1] = kBlackCell;
bm->cells()[2] = kBlackCell;
@@ -91,9 +92,8 @@ TEST_F(BitmapTest, ClearRange1) {
EXPECT_EQ(bm->cells()[2], kBlackCell);
}
-
-TEST_F(BitmapTest, ClearRange2) {
- Bitmap* bm = bitmap();
+TYPED_TEST(BitmapTest, ClearRange2) {
+ auto bm = this->bitmap();
bm->cells()[0] = kBlackCell;
bm->cells()[1] = kBlackCell;
bm->cells()[2] = kBlackCell;
@@ -104,4 +104,59 @@ TEST_F(BitmapTest, ClearRange2) {
EXPECT_EQ(bm->cells()[2], kBlackCell);
}
-} // namespace
+TYPED_TEST(BitmapTest, SetAndClearRange) {
+ auto bm = this->bitmap();
+ for (int i = 0; i < 3; i++) {
+ bm->SetRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(bm->cells()[0], 0xFFFFFFFFu << i);
+ CHECK_EQ(bm->cells()[1], (1u << i) - 1);
+ bm->ClearRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(bm->cells()[0], 0x0u);
+ CHECK_EQ(bm->cells()[1], 0x0u);
+ }
+}
+
+// AllBitsSetInRange() and AllBitsClearInRange() are only used when verifying
+// the heap on the main thread so they don't have atomic implementations.
+TEST_F(NonAtomicBitmapTest, ClearMultipleRanges) {
+ auto bm = this->bitmap();
+
+ bm->SetRange(0, Bitmap::kBitsPerCell * 3);
+ CHECK(bm->AllBitsSetInRange(0, Bitmap::kBitsPerCell));
+
+ bm->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
+ bm->ClearRange(Bitmap::kBitsPerCell,
+ Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ bm->ClearRange(Bitmap::kBitsPerCell * 2 + 8, Bitmap::kBitsPerCell * 2 + 16);
+ bm->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
+
+ CHECK_EQ(bm->cells()[0], 0xFFFFu);
+ CHECK(bm->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
+ CHECK(
+ bm->AllBitsClearInRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell));
+
+ CHECK_EQ(bm->cells()[1], 0xFFFF0000u);
+ CHECK(bm->AllBitsClearInRange(
+ Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
+ CHECK(bm->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
+ Bitmap::kBitsPerCell * 2));
+
+ CHECK_EQ(bm->cells()[2], 0xFF00FFu);
+ CHECK(bm->AllBitsSetInRange(
+ Bitmap::kBitsPerCell * 2,
+ Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 4));
+ CHECK(bm->AllBitsClearInRange(
+ Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 4,
+ Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2));
+ CHECK(bm->AllBitsSetInRange(
+ Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2,
+ Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2 +
+ Bitmap::kBitsPerCell / 4));
+ CHECK(bm->AllBitsClearInRange(Bitmap::kBitsPerCell * 2 +
+ Bitmap::kBitsPerCell / 2 +
+ Bitmap::kBitsPerCell / 4,
+ Bitmap::kBitsPerCell * 3));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 7063b2a280..b3901d74b0 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -31,7 +31,6 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
static const size_t kSizeOfObjects = 100 * MB;
static const size_t kMarkCompactSpeed = 200 * KB;
static const size_t kMarkingSpeed = 200 * KB;
- static const int kMaxNotifications = 100;
private:
GCIdleTimeHandler handler_;
@@ -95,8 +94,8 @@ TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
heap_state.contexts_disposed = 1;
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -108,8 +107,8 @@ TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
GCIdleTimeHandler::kHighContextDisposalRate - 1;
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kFullGC,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -120,8 +119,8 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
heap_state.contexts_disposal_rate = 1.0;
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kFullGC,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -133,8 +132,8 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
GCIdleTimeHandler::kHighContextDisposalRate;
size_t speed = kMarkCompactSpeed;
double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -146,8 +145,8 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
GCIdleTimeHandler::kHighContextDisposalRate;
size_t speed = kMarkCompactSpeed;
double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
+ handler()->Compute(idle_time_ms, heap_state));
}
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeHeap) {
@@ -158,16 +157,16 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeHeap) {
heap_state.incremental_marking_stopped = true;
heap_state.size_of_objects = 101 * MB;
double idle_time_ms = 0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
}
TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
if (!handler()->Enabled()) return;
GCIdleTimeHeapState heap_state = DefaultHeapState();
double idle_time_ms = 10;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -177,8 +176,8 @@ TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
heap_state.incremental_marking_stopped = true;
size_t speed = kMarkCompactSpeed;
double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -187,8 +186,8 @@ TEST_F(GCIdleTimeHandlerTest, DoNotStartIncrementalMarking) {
GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 10.0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -197,32 +196,11 @@ TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop) {
GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 10.0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
heap_state.incremental_marking_stopped = false;
- action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- for (int i = 0; i < kMaxNotifications; i++) {
- GCIdleTimeAction action = handler()->Compute(0, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
- }
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, SmallIdleTimeNothingToDo) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.incremental_marking_stopped = true;
- for (int i = 0; i < kMaxNotifications; i++) {
- GCIdleTimeAction action = handler()->Compute(10, heap_state);
- EXPECT_TRUE(DO_NOTHING == action.type || DONE == action.type);
- }
+ EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
+ handler()->Compute(idle_time_ms, heap_state));
}
@@ -235,9 +213,9 @@ TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnIncrementalMarking) {
// Simulate incremental marking stopped and not eligible to start.
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 10.0;
- // We should return DONE if we cannot start incremental marking.
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
+ // We should return kDone if we cannot start incremental marking.
+ EXPECT_EQ(GCIdleTimeAction::kDone,
+ handler()->Compute(idle_time_ms, heap_state));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 53954d8178..12bd46886d 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -6,13 +6,10 @@
#include <iostream>
#include <limits>
-#include "src/objects.h"
-#include "src/objects-inl.h"
-
-#include "src/handles.h"
#include "src/handles-inl.h"
-
#include "src/heap/heap.h"
+#include "src/heap/spaces-inl.h"
+#include "src/objects-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,14 +20,13 @@ typedef TestWithIsolate HeapTest;
typedef TestWithIsolateAndPointerCompression HeapWithPointerCompressionTest;
TEST(Heap, SemiSpaceSize) {
- const size_t KB = static_cast<size_t>(i::KB);
const size_t MB = static_cast<size_t>(i::MB);
const size_t pm = i::Heap::kPointerMultiplier;
- ASSERT_EQ(1u * pm * MB / 2, i::Heap::ComputeMaxSemiSpaceSize(0u) * KB);
- ASSERT_EQ(1u * pm * MB / 2, i::Heap::ComputeMaxSemiSpaceSize(512u * MB) * KB);
- ASSERT_EQ(2u * pm * MB, i::Heap::ComputeMaxSemiSpaceSize(1024u * MB) * KB);
- ASSERT_EQ(5u * pm * MB, i::Heap::ComputeMaxSemiSpaceSize(2024u * MB) * KB);
- ASSERT_EQ(8u * pm * MB, i::Heap::ComputeMaxSemiSpaceSize(4095u * MB) * KB);
+ ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(0u));
+ ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(512u * MB));
+ ASSERT_EQ(2048u * pm, i::Heap::ComputeMaxSemiSpaceSize(1024u * MB));
+ ASSERT_EQ(5120u * pm, i::Heap::ComputeMaxSemiSpaceSize(2024u * MB));
+ ASSERT_EQ(8192u * pm, i::Heap::ComputeMaxSemiSpaceSize(4095u * MB));
}
TEST_F(HeapTest, ASLR) {
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index 36d99a31ba..e42f22c4e1 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -202,7 +202,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run));
- job.Run(i_isolate()->async_counters());
+ job.Run();
EXPECT_TRUE(did_run);
}
@@ -214,7 +214,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
job.AddItem(new ItemParallelJob::Item);
- job.Run(i_isolate()->async_counters());
+ job.Run();
EXPECT_TRUE(did_run);
}
@@ -244,7 +244,7 @@ TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
job.AddItem(new SimpleItem);
}
- job.Run(i_isolate()->async_counters());
+ job.Run();
for (int i = 0; i < kNumTasks; i++) {
// Only the first kNumItems tasks should have been assigned a work item.
@@ -261,7 +261,7 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
}
- job.Run(i_isolate()->async_counters());
+ job.Run();
for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]);
}
@@ -282,7 +282,7 @@ TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
job.AddTask(
new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
}
- job.Run(i_isolate()->async_counters());
+ job.Run();
for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]);
}
@@ -296,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) {
job.AddItem(new ItemA());
job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
- job.Run(i_isolate()->async_counters());
+ job.Run();
EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b);
}
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index be26d4eebd..60aa28c4a9 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -6,15 +6,19 @@
#include "src/globals.h"
#include "src/heap/marking.h"
+#include "test/unittests/heap/bitmap-test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
+template <typename T>
+using MarkingTest = TestWithBitmap<T>;
-TEST(Marking, TransitionWhiteBlackWhite) {
- Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
+TYPED_TEST_SUITE(MarkingTest, BitmapTypes);
+
+TYPED_TEST(MarkingTest, TransitionWhiteBlackWhite) {
+ auto bitmap = this->bitmap();
const int kLocationsSize = 3;
int position[kLocationsSize] = {
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
@@ -29,12 +33,10 @@ TEST(Marking, TransitionWhiteBlackWhite) {
CHECK(Marking::IsWhite(mark_bit));
CHECK(!Marking::IsImpossible(mark_bit));
}
- free(bitmap);
}
-TEST(Marking, TransitionWhiteGreyBlack) {
- Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
+TYPED_TEST(MarkingTest, TransitionWhiteGreyBlack) {
+ auto bitmap = this->bitmap();
const int kLocationsSize = 3;
int position[kLocationsSize] = {
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
@@ -55,54 +57,7 @@ TEST(Marking, TransitionWhiteGreyBlack) {
CHECK(Marking::IsWhite(mark_bit));
CHECK(!Marking::IsImpossible(mark_bit));
}
- free(bitmap);
}
-TEST(Marking, SetAndClearRange) {
- Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
- for (int i = 0; i < 3; i++) {
- bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu << i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
- bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0u);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0u);
- }
- free(bitmap);
-}
-
-TEST(Marking, ClearMultipleRanges) {
- Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
- CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
- bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFFFFFFu);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFFFFFFFFu);
- CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
- bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
- bitmap->ClearRange(Bitmap::kBitsPerCell,
- Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
- bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
- Bitmap::kBitsPerCell * 2 + 16);
- bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFu);
- CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
- CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
- Bitmap::kBitsPerCell));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFF0000u);
- CHECK(
- bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
- 2 * Bitmap::kBitsPerCell));
- CHECK(bitmap->AllBitsClearInRange(
- Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFF00FFu);
- CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
- 2 * Bitmap::kBitsPerCell + 8));
- CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
- Bitmap::kBitsPerCell * 3));
- free(bitmap);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index de4bd39e1e..ecfa4b964f 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -84,38 +84,38 @@ TEST_F(SpacesTest, WriteBarrierIsMarking) {
EXPECT_FALSE(slim_chunk->IsMarking());
}
-TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) {
+TEST_F(SpacesTest, WriteBarrierInYoungGenerationToSpace) {
const size_t kSizeOfMemoryChunk = sizeof(MemoryChunk);
char memory[kSizeOfMemoryChunk];
memset(&memory, 0, kSizeOfMemoryChunk);
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
heap_internals::MemoryChunk* slim_chunk =
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
- EXPECT_FALSE(chunk->InNewSpace());
- EXPECT_FALSE(slim_chunk->InNewSpace());
- chunk->SetFlag(MemoryChunk::IN_TO_SPACE);
- EXPECT_TRUE(chunk->InNewSpace());
- EXPECT_TRUE(slim_chunk->InNewSpace());
- chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
- EXPECT_FALSE(chunk->InNewSpace());
- EXPECT_FALSE(slim_chunk->InNewSpace());
+ EXPECT_FALSE(chunk->InYoungGeneration());
+ EXPECT_FALSE(slim_chunk->InYoungGeneration());
+ chunk->SetFlag(MemoryChunk::TO_PAGE);
+ EXPECT_TRUE(chunk->InYoungGeneration());
+ EXPECT_TRUE(slim_chunk->InYoungGeneration());
+ chunk->ClearFlag(MemoryChunk::TO_PAGE);
+ EXPECT_FALSE(chunk->InYoungGeneration());
+ EXPECT_FALSE(slim_chunk->InYoungGeneration());
}
-TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
+TEST_F(SpacesTest, WriteBarrierInYoungGenerationFromSpace) {
const size_t kSizeOfMemoryChunk = sizeof(MemoryChunk);
char memory[kSizeOfMemoryChunk];
memset(&memory, 0, kSizeOfMemoryChunk);
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
heap_internals::MemoryChunk* slim_chunk =
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
- EXPECT_FALSE(chunk->InNewSpace());
- EXPECT_FALSE(slim_chunk->InNewSpace());
- chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
- EXPECT_TRUE(chunk->InNewSpace());
- EXPECT_TRUE(slim_chunk->InNewSpace());
- chunk->ClearFlag(MemoryChunk::IN_FROM_SPACE);
- EXPECT_FALSE(chunk->InNewSpace());
- EXPECT_FALSE(slim_chunk->InNewSpace());
+ EXPECT_FALSE(chunk->InYoungGeneration());
+ EXPECT_FALSE(slim_chunk->InYoungGeneration());
+ chunk->SetFlag(MemoryChunk::FROM_PAGE);
+ EXPECT_TRUE(chunk->InYoungGeneration());
+ EXPECT_TRUE(slim_chunk->InYoungGeneration());
+ chunk->ClearFlag(MemoryChunk::FROM_PAGE);
+ EXPECT_FALSE(chunk->InYoungGeneration());
+ EXPECT_FALSE(slim_chunk->InYoungGeneration());
}
TEST_F(SpacesTest, CodeRangeAddressReuse) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index ed53b8b0d2..21051c6da5 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -7,6 +7,7 @@
#include "src/v8.h"
#include "src/ast/scopes.h"
+#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -34,7 +35,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
BytecodeArrayBuilder builder(zone(), 1, 131, &feedback_spec);
Factory* factory = isolate()->factory();
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
DeclarationScope scope(zone(), &ast_factory);
CHECK_EQ(builder.locals_count(), 131);
@@ -260,7 +261,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CompareOperation(Token::Value::GTE, reg, 6)
.CompareTypeOf(TestTypeOfFlags::LiteralFlag::kNumber)
.CompareOperation(Token::Value::INSTANCEOF, reg, 7)
- .CompareOperation(Token::Value::IN, reg)
+ .CompareOperation(Token::Value::IN, reg, 8)
.CompareReference(reg)
.CompareUndetectable()
.CompareUndefined()
@@ -279,10 +280,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Short jumps with Imm8 operands
{
- BytecodeLabel start, after_jump1, after_jump2, after_jump3, after_jump4,
+ BytecodeLoopHeader loop_header;
+ BytecodeLabel after_jump1, after_jump2, after_jump3, after_jump4,
after_jump5, after_jump6, after_jump7, after_jump8, after_jump9,
- after_jump10;
- builder.Bind(&start)
+ after_jump10, after_loop;
+ builder.JumpIfNull(&after_loop)
+ .Bind(&loop_header)
.Jump(&after_jump1)
.Bind(&after_jump1)
.JumpIfNull(&after_jump2)
@@ -303,14 +306,16 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.Bind(&after_jump9)
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump10)
.Bind(&after_jump10)
- .JumpLoop(&start, 0);
+ .JumpLoop(&loop_header, 0)
+ .Bind(&after_loop);
}
- // Longer jumps with constant operands
BytecodeLabel end[10];
{
+ // Longer jumps with constant operands
BytecodeLabel after_jump;
- builder.Jump(&end[0])
+ builder.JumpIfNull(&after_jump)
+ .Jump(&end[0])
.Bind(&after_jump)
.JumpIfTrue(ToBooleanMode::kConvertToBoolean, &end[1])
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &end[2])
@@ -336,10 +341,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
- BytecodeLabel after_throw;
- builder.Throw().Bind(&after_throw);
- BytecodeLabel after_rethrow;
- builder.ReThrow().Bind(&after_rethrow);
+ BytecodeLabel after_throw, after_rethrow;
+ builder.JumpIfNull(&after_throw).Throw().Bind(&after_throw);
+ builder.JumpIfNull(&after_rethrow).ReThrow().Bind(&after_rethrow);
builder.ForInEnumerate(reg)
.ForInPrepare(triple, 1)
@@ -413,10 +417,10 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.Debugger();
// Emit abort bytecode.
- {
- BytecodeLabel after;
- builder.Abort(AbortReason::kOperandIsASmi).Bind(&after);
- }
+ BytecodeLabel after_abort;
+ builder.JumpIfNull(&after_abort)
+ .Abort(AbortReason::kOperandIsASmi)
+ .Bind(&after_abort);
// Insert dummy ops to force longer jumps.
for (int i = 0; i < 256; i++) {
@@ -439,7 +443,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
ast_factory.Internalize(isolate());
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
- builder.total_register_count() * kPointerSize);
+ builder.total_register_count() * kSystemPointerSize);
// Build scorecard of bytecodes encountered in the BytecodeArray.
std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
@@ -504,7 +508,7 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
int total_registers = locals + temps;
- CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
+ CHECK_EQ(the_array->frame_size(), total_registers * kSystemPointerSize);
}
}
}
@@ -534,7 +538,7 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
BytecodeArrayBuilder builder(zone(), 1, 0);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_1 = 3.14;
double heap_num_2 = 5.2;
@@ -567,10 +571,11 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
BytecodeLabel near0, near1, near2, near3, near4;
- BytecodeLabel after_jump0, after_jump1;
+ BytecodeLabel after_jump_near0, after_jump_far0;
- builder.Jump(&near0)
- .Bind(&after_jump0)
+ builder.JumpIfNull(&after_jump_near0)
+ .Jump(&near0)
+ .Bind(&after_jump_near0)
.CompareOperation(Token::Value::EQ, reg, 1)
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &near1)
.CompareOperation(Token::Value::EQ, reg, 2)
@@ -584,8 +589,9 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
.Bind(&near2)
.Bind(&near3)
.Bind(&near4)
+ .JumpIfNull(&after_jump_far0)
.Jump(&far0)
- .Bind(&after_jump1)
+ .Bind(&after_jump_far0)
.CompareOperation(Token::Value::EQ, reg, 3)
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &far1)
.CompareOperation(Token::Value::EQ, reg, 4)
@@ -601,9 +607,13 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
- DCHECK_EQ(array->length(), 44 + kFarJumpDistance - 22 + 1);
+ DCHECK_EQ(array->length(), 48 + kFarJumpDistance - 22 + 1);
BytecodeArrayIterator iterator(array);
+
+ // Ignore JumpIfNull operation.
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 22);
iterator.Advance();
@@ -636,6 +646,9 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 2);
iterator.Advance();
+ // Ignore JumpIfNull operation.
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
CHECK_EQ(iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance));
@@ -681,11 +694,22 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
Register reg(0);
- BytecodeLabel label0;
- builder.Bind(&label0).JumpLoop(&label0, 0);
+ BytecodeLabel end;
+ builder.JumpIfNull(&end);
+
+ BytecodeLabel after_loop;
+ // Conditional jump to force the code after the JumpLoop to be live.
+ // Technically this jump is illegal because it's jumping into the middle of
+ // the subsequent loops, but that's ok for this unit test.
+ BytecodeLoopHeader loop_header;
+ builder.JumpIfNull(&after_loop)
+ .Bind(&loop_header)
+ .JumpLoop(&loop_header, 0)
+ .Bind(&after_loop);
for (int i = 0; i < 42; i++) {
- BytecodeLabel after_jump;
- builder.JumpLoop(&label0, 0).Bind(&after_jump);
+ BytecodeLabel after_loop;
+ // Conditional jump to force the code after the JumpLoop to be live.
+ builder.JumpIfNull(&after_loop).JumpLoop(&loop_header, 0).Bind(&after_loop);
}
// Add padding to force wide backwards jumps.
@@ -693,21 +717,28 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
builder.Debugger();
}
- builder.JumpLoop(&label0, 0);
- BytecodeLabel end;
+ builder.JumpLoop(&loop_header, 0);
builder.Bind(&end);
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
+ // Ignore the JumpIfNull to the end
+ iterator.Advance();
+ // Ignore the JumpIfNull to after the first JumpLoop
+ iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 0);
iterator.Advance();
for (unsigned i = 0; i < 42; i++) {
+ // Ignore the JumpIfNull to after the JumpLoop
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- // offset of 3 (because kJumpLoop takes two immediate operands)
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), i * 3 + 3);
+ // offset of 5 (because kJumpLoop takes two immediate operands and
+ // JumpIfNull takes 1)
+ CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), i * 5 + 5);
iterator.Advance();
}
// Check padding to force wide backwards jumps.
@@ -717,7 +748,7 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
}
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 386);
+ CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 42 * 5 + 256 + 4);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -820,71 +851,6 @@ TEST_F(BytecodeArrayBuilderTest, WideSwitch) {
CHECK(iterator.done());
}
-TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
- BytecodeArrayBuilder builder(zone(), 1, 0);
-
- // Labels can only have 1 forward reference, but
- // can be referred to mulitple times once bound.
- BytecodeLabel label, after_jump0, after_jump1;
-
- builder.Jump(&label)
- .Bind(&label)
- .JumpLoop(&label, 0)
- .Bind(&after_jump0)
- .JumpLoop(&label, 0)
- .Bind(&after_jump1)
- .Return();
-
- Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
- BytecodeArrayIterator iterator(array);
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 2);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 0);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 3);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- iterator.Advance();
- CHECK(iterator.done());
-}
-
-
-TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
- static const int kRepeats = 3;
-
- BytecodeArrayBuilder builder(zone(), 1, 0);
- for (int i = 0; i < kRepeats; i++) {
- BytecodeLabel label, after_jump0, after_jump1;
- builder.Jump(&label)
- .Bind(&label)
- .JumpLoop(&label, 0)
- .Bind(&after_jump0)
- .JumpLoop(&label, 0)
- .Bind(&after_jump1);
- }
- builder.Return();
-
- Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
- BytecodeArrayIterator iterator(array);
- for (int i = 0; i < kRepeats; i++) {
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 2);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 0);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
- CHECK_EQ(iterator.GetUnsignedImmediateOperand(0), 3);
- iterator.Advance();
- }
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- iterator.Advance();
- CHECK(iterator.done());
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index ec70605dde..0e72e2ec8d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects-inl.h"
@@ -27,7 +28,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 2e2d92628f..6ec19fb726 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects-inl.h"
@@ -27,7 +28,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -81,7 +82,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -135,7 +136,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -193,7 +194,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -252,7 +253,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -437,7 +438,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
@@ -716,7 +717,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 7c01228936..35cc3b3c28 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -49,8 +49,8 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
void WriteJump(Bytecode bytecode, BytecodeLabel* label,
BytecodeSourceInfo info = BytecodeSourceInfo());
- void WriteJumpLoop(Bytecode bytecode, BytecodeLabel* label, int depth,
- BytecodeSourceInfo info = BytecodeSourceInfo());
+ void WriteJumpLoop(Bytecode bytecode, BytecodeLoopHeader* loop_header,
+ int depth, BytecodeSourceInfo info = BytecodeSourceInfo());
BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
@@ -105,10 +105,11 @@ void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
}
void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
- BytecodeLabel* label, int depth,
+ BytecodeLoopHeader* loop_header,
+ int depth,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, 0, depth, info);
- writer()->WriteJump(&node, label);
+ writer()->WriteJumpLoop(&node, loop_header);
}
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
@@ -195,7 +196,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
{0, 30, false}, {1, 42, true}, {3, 42, false}, {6, 68, true},
{18, 63, true}, {32, 54, false}, {37, 85, true}, {46, 85, true}};
- BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
+ BytecodeLoopHeader loop_header;
+ BytecodeLabel jump_for_in, jump_end_1, jump_end_2, jump_end_3;
Write(Bytecode::kStackCheck, {30, false});
Write(Bytecode::kLdaConstant, U8(0), {42, true});
@@ -206,7 +208,7 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
Write(Bytecode::kForInPrepare, R(3), U8(4));
Write(Bytecode::kLdaZero);
Write(Bytecode::kStar, R(7));
- writer()->BindLabel(&back_jump);
+ writer()->BindLoopHeader(&loop_header);
Write(Bytecode::kForInContinue, R(7), R(6), {63, true});
WriteJump(Bytecode::kJumpIfFalse, &jump_end_3);
Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
@@ -219,7 +221,7 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
writer()->BindLabel(&jump_for_in);
Write(Bytecode::kForInStep, R(7));
Write(Bytecode::kStar, R(7));
- WriteJumpLoop(Bytecode::kJumpLoop, &back_jump, 0);
+ WriteJumpLoop(Bytecode::kJumpLoop, &loop_header, 0);
writer()->BindLabel(&jump_end_1);
writer()->BindLabel(&jump_end_2);
writer()->BindLabel(&jump_end_3);
@@ -328,7 +330,9 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
Write(Bytecode::kLdaSmi, 127); // Dead code.
WriteJump(Bytecode::kJumpIfFalse, &after_conditional_jump); // Dead code.
writer()->BindLabel(&after_jump);
- writer()->BindLabel(&after_conditional_jump);
+ // We would bind the after_conditional_jump label here, but the jump to it is
+ // dead.
+ CHECK(!after_conditional_jump.has_referrer_jump());
Write(Bytecode::kLdaSmi, 127, {65, true});
WriteJump(Bytecode::kJumpIfFalse, &after_return);
Write(Bytecode::kReturn, {75, true});
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
index 401884559e..912e9dcb7b 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-utils.h
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -33,7 +33,7 @@ namespace interpreter {
#define U8(i) static_cast<uint8_t>(i)
#define REG_OPERAND(i) \
- (InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize - (i))
+ (InterpreterFrameConstants::kRegisterFileFromFp / kSystemPointerSize - (i))
#define R8(i) static_cast<uint8_t>(REG_OPERAND(i))
#define R16(i) U16(REG_OPERAND(i))
#define R32(i) U32(REG_OPERAND(i))
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 16b4e80489..6f5a11c0c7 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -89,7 +89,7 @@ TEST(OperandScaling, ScalableAndNonScalable) {
1 + 2 + 2 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCreateObjectLiteral, operand_scale),
1 + 2 * scale + 1);
- CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + scale);
+ CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + 2 * scale);
}
}
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 55f1cacf56..894aee16a4 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/handles-inl.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/factory.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/isolate.h"
@@ -34,7 +35,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (size_t i = 0; i < k16BitCapacity; i++) {
builder.Insert(i + 0.5);
}
@@ -84,7 +85,7 @@ TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
static const int kNumberOfElements = 37373;
for (int i = 0; i < kNumberOfElements; i++) {
builder.CommitReservedEntry(builder.CreateReservedEntry(), Smi::FromInt(i));
@@ -104,7 +105,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK_EQ(operand_size, OperandSize::kByte);
@@ -172,7 +173,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (size_t i = 0; i < k8BitCapacity; i++) {
builder.CommitReservedEntry(builder.CreateReservedEntry(),
Smi::FromInt(static_cast<int>(i)));
@@ -217,7 +218,7 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK_EQ(OperandSize::kByte, operand_size);
@@ -283,7 +284,7 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
static int k8BitCapacity = ConstantArrayBuilder::k8BitCapacity;
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (int i = 0; i < kNumberOfHoles; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
@@ -316,7 +317,7 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
CanonicalHandleScope canonical(isolate());
ConstantArrayBuilder builder(zone());
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
- isolate()->heap()->HashSeed());
+ HashSeed(isolate()));
for (int i = 0; i < 256; i++) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index d5f84952c3..bf240e41a4 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -72,8 +72,9 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
const Matcher<Node*>& value_matcher) {
- return kPointerSize == 8 ? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
- : IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
+ return kSystemPointerSize == 8
+ ? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
+ : IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
}
Matcher<Node*>
@@ -291,10 +292,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
int offset, OperandSize operand_size) {
Matcher<compiler::Node*> reg_operand = IsChangeInt32ToIntPtr(
IsSignedOperand(offset, operand_size, LoadSensitivity::kSafe));
- return IsLoad(
- MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
- c::IsWordShl(reg_operand, c::IsIntPtrConstant(kPointerSizeLog2)),
- LoadSensitivity::kCritical);
+ return IsBitcastWordToTagged(IsLoad(
+ MachineType::Pointer(), c::IsLoadParentFramePointer(),
+ c::IsWordShl(reg_operand, c::IsIntPtrConstant(kSystemPointerSizeLog2)),
+ LoadSensitivity::kCritical));
}
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
@@ -321,7 +322,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
MachineType::Pointer(),
c::IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
c::IsWordShl(target_bytecode_matcher,
- c::IsIntPtrConstant(kPointerSizeLog2)));
+ c::IsIntPtrConstant(kSystemPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
@@ -418,9 +419,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
InterpreterAssemblerForTest m(&state, bytecode);
EXPECT_THAT(
m.GetContext(),
- m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
- c::IsIntPtrConstant(Register::current_context().ToOperand()
- << kPointerSizeLog2)));
+ IsBitcastWordToTagged(m.IsLoad(
+ MachineType::Pointer(), c::IsLoadParentFramePointer(),
+ c::IsIntPtrConstant(Register::current_context().ToOperand() *
+ kSystemPointerSize))));
}
}
@@ -457,7 +459,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrAdd(
c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kPointerSizeLog2))),
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
LoadSensitivity::kCritical));
}
}
@@ -533,10 +535,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
InterpreterAssemblerForTest m(&state, bytecode);
Node* feedback_vector = m.LoadFeedbackVector();
- Matcher<Node*> load_function_matcher =
- m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
- c::IsIntPtrConstant(Register::function_closure().ToOperand()
- << kPointerSizeLog2));
+ Matcher<Node*> load_function_matcher = IsBitcastWordToTagged(
+ m.IsLoad(MachineType::Pointer(), c::IsLoadParentFramePointer(),
+ c::IsIntPtrConstant(Register::function_closure().ToOperand() *
+ kSystemPointerSize)));
Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
MachineType::AnyTagged(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
diff --git a/deps/v8/test/unittests/microtask-queue-unittest.cc b/deps/v8/test/unittests/microtask-queue-unittest.cc
index cc2c7f0de7..f39a1558c6 100644
--- a/deps/v8/test/unittests/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/microtask-queue-unittest.cc
@@ -10,7 +10,11 @@
#include <vector>
#include "src/heap/factory.h"
+#include "src/objects-inl.h"
#include "src/objects/foreign.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/visitors.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -25,7 +29,29 @@ void RunStdFunction(void* data) {
(*f)();
}
-class MicrotaskQueueTest : public TestWithNativeContext {
+template <typename TMixin>
+class WithFinalizationGroupMixin : public TMixin {
+ public:
+ WithFinalizationGroupMixin() {
+ FLAG_harmony_weak_refs = true;
+ FLAG_expose_gc = true;
+ }
+
+ private:
+ SaveFlags save_flags_;
+
+ DISALLOW_COPY_AND_ASSIGN(WithFinalizationGroupMixin);
+};
+
+using TestWithNativeContextAndFinalizationGroup = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithFinalizationGroupMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>>>>;
+
+class MicrotaskQueueTest : public TestWithNativeContextAndFinalizationGroup {
public:
template <typename F>
Handle<Microtask> NewMicrotask(F&& f) {
@@ -55,6 +81,11 @@ class MicrotaskQueueTest : public TestWithNativeContext {
microtask_queue_ = nullptr;
}
+ template <size_t N>
+ Handle<Name> NameFromChars(const char (&chars)[N]) {
+ return isolate()->factory()->NewStringFromStaticChars(chars);
+ }
+
private:
std::unique_ptr<MicrotaskQueue> microtask_queue_;
};
@@ -88,7 +119,7 @@ TEST_F(MicrotaskQueueTest, EnqueueAndRun) {
}));
EXPECT_EQ(MicrotaskQueue::kMinimumCapacity, microtask_queue()->capacity());
EXPECT_EQ(1, microtask_queue()->size());
- microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(1, microtask_queue()->RunMicrotasks(isolate()));
EXPECT_TRUE(ran);
EXPECT_EQ(0, microtask_queue()->size());
}
@@ -100,7 +131,7 @@ TEST_F(MicrotaskQueueTest, BufferGrowth) {
// Enqueue and flush the queue first to have non-zero |start_|.
microtask_queue()->EnqueueMicrotask(
*NewMicrotask([&count] { EXPECT_EQ(0, count++); }));
- microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(1, microtask_queue()->RunMicrotasks(isolate()));
EXPECT_LT(0, microtask_queue()->capacity());
EXPECT_EQ(0, microtask_queue()->size());
@@ -122,7 +153,8 @@ TEST_F(MicrotaskQueueTest, BufferGrowth) {
EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 1, microtask_queue()->size());
// Run all pending Microtasks to ensure they run in the proper order.
- microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 1,
+ microtask_queue()->RunMicrotasks(isolate()));
EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 2, count);
}
@@ -163,7 +195,8 @@ TEST_F(MicrotaskQueueTest, VisitRoot) {
for (int i = 0; i < MicrotaskQueue::kMinimumCapacity / 2 + 1; ++i) {
microtask_queue()->EnqueueMicrotask(*NewMicrotask([] {}));
}
- microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity / 2 + 1,
+ microtask_queue()->RunMicrotasks(isolate()));
std::vector<Object> expected;
for (int i = 0; i < MicrotaskQueue::kMinimumCapacity / 2 + 1; ++i) {
@@ -183,5 +216,287 @@ TEST_F(MicrotaskQueueTest, VisitRoot) {
EXPECT_EQ(expected, actual);
}
+TEST_F(MicrotaskQueueTest, PromiseHandlerContext) {
+ Local<v8::Context> v8_context2 = v8::Context::New(v8_isolate());
+ Local<v8::Context> v8_context3 = v8::Context::New(v8_isolate());
+ Local<v8::Context> v8_context4 = v8::Context::New(v8_isolate());
+ Handle<Context> context2 = Utils::OpenHandle(*v8_context2, isolate());
+ Handle<Context> context3 = Utils::OpenHandle(*v8_context3, isolate());
+ Handle<Context> context4 = Utils::OpenHandle(*v8_context3, isolate());
+ context2->native_context()->set_microtask_queue(microtask_queue());
+ context3->native_context()->set_microtask_queue(microtask_queue());
+ context4->native_context()->set_microtask_queue(microtask_queue());
+
+ Handle<JSFunction> handler;
+ Handle<JSProxy> proxy;
+ Handle<JSProxy> revoked_proxy;
+ Handle<JSBoundFunction> bound;
+
+ // Create a JSFunction on |context2|
+ {
+ v8::Context::Scope scope(v8_context2);
+ handler = RunJS<JSFunction>("()=>{}");
+ EXPECT_EQ(*context2,
+ *JSReceiver::GetContextForMicrotask(handler).ToHandleChecked());
+ }
+
+ // Create a JSProxy on |context3|.
+ {
+ v8::Context::Scope scope(v8_context3);
+ ASSERT_TRUE(
+ v8_context3->Global()
+ ->Set(v8_context3, NewString("handler"), Utils::ToLocal(handler))
+ .FromJust());
+ proxy = RunJS<JSProxy>("new Proxy(handler, {})");
+ revoked_proxy = RunJS<JSProxy>(
+ "let {proxy, revoke} = Proxy.revocable(handler, {});"
+ "revoke();"
+ "proxy");
+ EXPECT_EQ(*context2,
+ *JSReceiver::GetContextForMicrotask(proxy).ToHandleChecked());
+ EXPECT_TRUE(JSReceiver::GetContextForMicrotask(revoked_proxy).is_null());
+ }
+
+ // Create a JSBoundFunction on |context4|.
+ // Note that its CreationContext and ContextForTaskCancellation is |context2|.
+ {
+ v8::Context::Scope scope(v8_context4);
+ ASSERT_TRUE(
+ v8_context4->Global()
+ ->Set(v8_context4, NewString("handler"), Utils::ToLocal(handler))
+ .FromJust());
+ bound = RunJS<JSBoundFunction>("handler.bind()");
+ EXPECT_EQ(*context2,
+ *JSReceiver::GetContextForMicrotask(bound).ToHandleChecked());
+ }
+
+ // Give the objects to the main context.
+ SetGlobalProperty("handler", Utils::ToLocal(handler));
+ SetGlobalProperty("proxy", Utils::ToLocal(proxy));
+ SetGlobalProperty("revoked_proxy", Utils::ToLocal(revoked_proxy));
+ SetGlobalProperty("bound", Utils::ToLocal(Handle<JSReceiver>::cast(bound)));
+ RunJS(
+ "Promise.resolve().then(handler);"
+ "Promise.reject().catch(proxy);"
+ "Promise.resolve().then(revoked_proxy);"
+ "Promise.resolve().then(bound);");
+
+ ASSERT_EQ(4, microtask_queue()->size());
+ Handle<Microtask> microtask1(microtask_queue()->get(0), isolate());
+ ASSERT_TRUE(microtask1->IsPromiseFulfillReactionJobTask());
+ EXPECT_EQ(*context2,
+ Handle<PromiseFulfillReactionJobTask>::cast(microtask1)->context());
+
+ Handle<Microtask> microtask2(microtask_queue()->get(1), isolate());
+ ASSERT_TRUE(microtask2->IsPromiseRejectReactionJobTask());
+ EXPECT_EQ(*context2,
+ Handle<PromiseRejectReactionJobTask>::cast(microtask2)->context());
+
+ Handle<Microtask> microtask3(microtask_queue()->get(2), isolate());
+ ASSERT_TRUE(microtask3->IsPromiseFulfillReactionJobTask());
+ // |microtask3| corresponds to a PromiseReaction for |revoked_proxy|.
+ // As |revoked_proxy| doesn't have a context, the current context should be
+ // used as the fallback context.
+ EXPECT_EQ(*native_context(),
+ Handle<PromiseFulfillReactionJobTask>::cast(microtask3)->context());
+
+ Handle<Microtask> microtask4(microtask_queue()->get(3), isolate());
+ ASSERT_TRUE(microtask4->IsPromiseFulfillReactionJobTask());
+ EXPECT_EQ(*context2,
+ Handle<PromiseFulfillReactionJobTask>::cast(microtask4)->context());
+
+ v8_context4->DetachGlobal();
+ v8_context3->DetachGlobal();
+ v8_context2->DetachGlobal();
+}
+
+TEST_F(MicrotaskQueueTest, DetachGlobal_Enqueue) {
+ EXPECT_EQ(0, microtask_queue()->size());
+
+ // Detach MicrotaskQueue from the current context.
+ context()->DetachGlobal();
+
+ // No microtask should be enqueued after DetachGlobal call.
+ EXPECT_EQ(0, microtask_queue()->size());
+ RunJS("Promise.resolve().then(()=>{})");
+ EXPECT_EQ(0, microtask_queue()->size());
+}
+
+TEST_F(MicrotaskQueueTest, DetachGlobal_Run) {
+ EXPECT_EQ(0, microtask_queue()->size());
+
+ // Enqueue microtasks to the current context.
+ Handle<JSArray> ran = RunJS<JSArray>(
+ "var ran = [false, false, false, false];"
+ "Promise.resolve().then(() => { ran[0] = true; });"
+ "Promise.reject().catch(() => { ran[1] = true; });"
+ "ran");
+
+ Handle<JSFunction> function =
+ RunJS<JSFunction>("(function() { ran[2] = true; })");
+ Handle<CallableTask> callable =
+ factory()->NewCallableTask(function, Utils::OpenHandle(*context()));
+ microtask_queue()->EnqueueMicrotask(*callable);
+
+ // The handler should not run at this point.
+ const int kNumExpectedTasks = 3;
+ for (int i = 0; i < kNumExpectedTasks; ++i) {
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), ran, i).ToHandleChecked()->IsFalse());
+ }
+ EXPECT_EQ(kNumExpectedTasks, microtask_queue()->size());
+
+ // Detach MicrotaskQueue from the current context.
+ context()->DetachGlobal();
+
+ // RunMicrotasks processes pending Microtasks, but Microtasks that are
+ // associated to a detached context should be cancelled and should not take
+ // effect.
+ microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(0, microtask_queue()->size());
+ for (int i = 0; i < kNumExpectedTasks; ++i) {
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), ran, i).ToHandleChecked()->IsFalse());
+ }
+}
+
+TEST_F(MicrotaskQueueTest, DetachGlobal_FinalizationGroup) {
+ // Enqueue an FinalizationGroupCleanupTask.
+ Handle<JSArray> ran = RunJS<JSArray>(
+ "var ran = [false];"
+ "var wf = new FinalizationGroup(() => { ran[0] = true; });"
+ "(function() { wf.register({}, {}); })();"
+ "gc();"
+ "ran");
+
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
+ EXPECT_EQ(1, microtask_queue()->size());
+
+ // Detach MicrotaskQueue from the current context.
+ context()->DetachGlobal();
+
+ microtask_queue()->RunMicrotasks(isolate());
+
+ // RunMicrotasks processes the pending Microtask, but Microtasks that are
+ // associated to a detached context should be cancelled and should not take
+ // effect.
+ EXPECT_EQ(0, microtask_queue()->size());
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), ran, 0).ToHandleChecked()->IsFalse());
+}
+
+namespace {
+
+void DummyPromiseHook(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent) {}
+
+} // namespace
+
+TEST_F(MicrotaskQueueTest, DetachGlobal_PromiseResolveThenableJobTask) {
+ // Use a PromiseHook to switch the implementation to ResolvePromise runtime,
+ // instead of ResolvePromise builtin.
+ v8_isolate()->SetPromiseHook(&DummyPromiseHook);
+
+ RunJS(
+ "var resolve;"
+ "var promise = new Promise(r => { resolve = r; });"
+ "promise.then(() => {});"
+ "resolve({});");
+
+ // A PromiseResolveThenableJobTask is pending in the MicrotaskQueue.
+ EXPECT_EQ(1, microtask_queue()->size());
+
+ // Detach MicrotaskQueue from the current context.
+ context()->DetachGlobal();
+
+ // RunMicrotasks processes the pending Microtask, but Microtasks that are
+ // associated to a detached context should be cancelled and should not take
+ // effect.
+ // As PromiseResolveThenableJobTask queues another task for resolution,
+ // the return value is 2 if it ran.
+ EXPECT_EQ(1, microtask_queue()->RunMicrotasks(isolate()));
+ EXPECT_EQ(0, microtask_queue()->size());
+}
+
+TEST_F(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
+ // EnqueueMicrotask should use the context associated to the handler instead
+ // of the current context. E.g.
+ // // At Context A.
+ // let resolved = Promise.resolve();
+ // // Call DetachGlobal on A, so that microtasks associated to A is
+ // // cancelled.
+ //
+ // // At Context B.
+ // let handler = () => {
+ // console.log("here");
+ // };
+ // // The microtask to run |handler| should be associated to B instead of A,
+ // // so that handler runs even |resolved| is on the detached context A.
+ // resolved.then(handler);
+
+ Handle<JSReceiver> results = isolate()->factory()->NewJSObjectWithNullProto();
+
+ // These belong to a stale Context.
+ Handle<JSPromise> stale_resolved_promise;
+ Handle<JSPromise> stale_rejected_promise;
+ Handle<JSReceiver> stale_handler;
+
+ Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
+ {
+ v8::Context::Scope scope(sub_context);
+ stale_resolved_promise = RunJS<JSPromise>("Promise.resolve()");
+ stale_rejected_promise = RunJS<JSPromise>("Promise.reject()");
+ stale_handler = RunJS<JSReceiver>(
+ "(results, label) => {"
+ " results[label] = true;"
+ "}");
+ }
+ // DetachGlobal() cancells all microtasks associated to the context.
+ sub_context->DetachGlobal();
+ sub_context.Clear();
+
+ SetGlobalProperty("results", Utils::ToLocal(results));
+ SetGlobalProperty(
+ "stale_resolved_promise",
+ Utils::ToLocal(Handle<JSReceiver>::cast(stale_resolved_promise)));
+ SetGlobalProperty(
+ "stale_rejected_promise",
+ Utils::ToLocal(Handle<JSReceiver>::cast(stale_rejected_promise)));
+ SetGlobalProperty("stale_handler", Utils::ToLocal(stale_handler));
+
+ // Set valid handlers to stale promises.
+ RunJS(
+ "stale_resolved_promise.then(() => {"
+ " results['stale_resolved_promise'] = true;"
+ "})");
+ RunJS(
+ "stale_rejected_promise.catch(() => {"
+ " results['stale_rejected_promise'] = true;"
+ "})");
+ microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_TRUE(
+ JSReceiver::HasProperty(results, NameFromChars("stale_resolved_promise"))
+ .FromJust());
+ EXPECT_TRUE(
+ JSReceiver::HasProperty(results, NameFromChars("stale_rejected_promise"))
+ .FromJust());
+
+ // Set stale handlers to valid promises.
+ RunJS(
+ "Promise.resolve("
+ " stale_handler.bind(null, results, 'stale_handler_resolve'))");
+ RunJS(
+ "Promise.reject("
+ " stale_handler.bind(null, results, 'stale_handler_reject'))");
+ microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_FALSE(
+ JSReceiver::HasProperty(results, NameFromChars("stale_handler_resolve"))
+ .FromJust());
+ EXPECT_FALSE(
+ JSReceiver::HasProperty(results, NameFromChars("stale_handler_reject"))
+ .FromJust());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/parser/ast-value-unittest.cc b/deps/v8/test/unittests/parser/ast-value-unittest.cc
index 72e35a43a0..c30823b4b1 100644
--- a/deps/v8/test/unittests/parser/ast-value-unittest.cc
+++ b/deps/v8/test/unittests/parser/ast-value-unittest.cc
@@ -4,6 +4,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/hash-seed-inl.h"
#include "src/heap/heap-inl.h"
#include "src/isolate-inl.h"
#include "src/zone/zone.h"
@@ -17,7 +18,7 @@ class AstValueTest : public TestWithIsolateAndZone {
protected:
AstValueTest()
: ast_value_factory_(zone(), i_isolate()->ast_string_constants(),
- i_isolate()->heap()->HashSeed()),
+ HashSeed(i_isolate())),
ast_node_factory_(&ast_value_factory_, zone()) {}
Literal* NewBigInt(const char* str) {
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
index 05fdd85809..a7fd6dc463 100644
--- a/deps/v8/test/unittests/testcfg.py
+++ b/deps/v8/test/unittests/testcfg.py
@@ -15,9 +15,10 @@ class VariantsGenerator(testsuite.VariantsGenerator):
return self._standard_variant
-class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- shell = os.path.abspath(os.path.join(self.test_config.shell_dir, self.name))
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ shell = os.path.abspath(
+ os.path.join(self.test_config.shell_dir, "unittests"))
if utils.IsWindows():
shell += ".exe"
@@ -30,6 +31,7 @@ class TestSuite(testsuite.TestSuite):
output = cmd.execute()
if output.exit_code == 0:
break
+
print "Test executable failed to list the tests (try %d).\n\nCmd:" % i
print cmd
print "\nStdout:"
@@ -40,17 +42,22 @@ class TestSuite(testsuite.TestSuite):
else:
raise Exception("Test executable failed to list the tests.")
- tests = []
- test_case = ''
+ # TODO create an ExecutableTestLoader for refactoring this similar to
+ # JSTestLoader.
+ test_names = []
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
- test_path = test_case + test_desc
- tests.append(self._create_test(test_path))
- tests.sort(key=lambda t: t.path)
- return tests
+ test_names.append(test_case + test_desc)
+
+ return sorted(test_names)
+
+
+class TestSuite(testsuite.TestSuite):
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/unittests/torque/ls-json-unittest.cc b/deps/v8/test/unittests/torque/ls-json-unittest.cc
new file mode 100644
index 0000000000..5f0ec9252e
--- /dev/null
+++ b/deps/v8/test/unittests/torque/ls-json-unittest.cc
@@ -0,0 +1,103 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/ls/json-parser.h"
+#include "src/torque/ls/json.h"
+#include "src/torque/source-positions.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+TEST(LanguageServerJson, TestJsonPrimitives) {
+ const JsonValue true_result = ParseJson("true");
+ ASSERT_EQ(true_result.tag, JsonValue::BOOL);
+ EXPECT_EQ(true_result.ToBool(), true);
+
+ const JsonValue false_result = ParseJson("false");
+ ASSERT_EQ(false_result.tag, JsonValue::BOOL);
+ EXPECT_EQ(false_result.ToBool(), false);
+
+ const JsonValue null_result = ParseJson("null");
+ ASSERT_EQ(null_result.tag, JsonValue::IS_NULL);
+
+ const JsonValue number = ParseJson("42");
+ ASSERT_EQ(number.tag, JsonValue::NUMBER);
+ EXPECT_EQ(number.ToNumber(), 42);
+}
+
+TEST(LanguageServerJson, TestJsonStrings) {
+ const JsonValue basic = ParseJson("\"basic\"");
+ ASSERT_EQ(basic.tag, JsonValue::STRING);
+ EXPECT_EQ(basic.ToString(), "basic");
+
+ const JsonValue singleQuote = ParseJson("\"'\"");
+ ASSERT_EQ(singleQuote.tag, JsonValue::STRING);
+ EXPECT_EQ(singleQuote.ToString(), "'");
+}
+
+TEST(LanguageServerJson, TestJsonArrays) {
+ const JsonValue empty_array = ParseJson("[]");
+ ASSERT_EQ(empty_array.tag, JsonValue::ARRAY);
+ EXPECT_EQ(empty_array.ToArray().size(), (size_t)0);
+
+ const JsonValue number_array = ParseJson("[1, 2, 3, 4]");
+ ASSERT_EQ(number_array.tag, JsonValue::ARRAY);
+
+ const JsonArray& array = number_array.ToArray();
+ ASSERT_EQ(array.size(), (size_t)4);
+ ASSERT_EQ(array[1].tag, JsonValue::NUMBER);
+ EXPECT_EQ(array[1].ToNumber(), 2);
+
+ const JsonValue string_array_object = ParseJson("[\"a\", \"b\"]");
+ ASSERT_EQ(string_array_object.tag, JsonValue::ARRAY);
+
+ const JsonArray& string_array = string_array_object.ToArray();
+ ASSERT_EQ(string_array.size(), (size_t)2);
+ ASSERT_EQ(string_array[1].tag, JsonValue::STRING);
+ EXPECT_EQ(string_array[1].ToString(), "b");
+}
+
+TEST(LanguageServerJson, TestJsonObjects) {
+ const JsonValue empty_object = ParseJson("{}");
+ ASSERT_EQ(empty_object.tag, JsonValue::OBJECT);
+ EXPECT_EQ(empty_object.ToObject().size(), (size_t)0);
+
+ const JsonValue primitive_fields = ParseJson("{ \"flag\": true, \"id\": 5}");
+ EXPECT_EQ(primitive_fields.tag, JsonValue::OBJECT);
+
+ const JsonValue& flag = primitive_fields.ToObject().at("flag");
+ ASSERT_EQ(flag.tag, JsonValue::BOOL);
+ EXPECT_TRUE(flag.ToBool());
+
+ const JsonValue& id = primitive_fields.ToObject().at("id");
+ ASSERT_EQ(id.tag, JsonValue::NUMBER);
+ EXPECT_EQ(id.ToNumber(), 5);
+
+ const JsonValue& complex_fields =
+ ParseJson("{ \"array\": [], \"object\": { \"name\": \"torque\" } }");
+ ASSERT_EQ(complex_fields.tag, JsonValue::OBJECT);
+
+ const JsonValue& array = complex_fields.ToObject().at("array");
+ ASSERT_EQ(array.tag, JsonValue::ARRAY);
+ EXPECT_EQ(array.ToArray().size(), (size_t)0);
+
+ const JsonValue& object = complex_fields.ToObject().at("object");
+ ASSERT_EQ(object.tag, JsonValue::OBJECT);
+ ASSERT_EQ(object.ToObject().at("name").tag, JsonValue::STRING);
+ EXPECT_EQ(object.ToObject().at("name").ToString(), "torque");
+}
+
+TEST(LanguageServerJsonDeathTest, SyntaxError) {
+ ASSERT_DEATH(ParseJson("{]"), "Parser Error: unexpected token");
+ ASSERT_DEATH(ParseJson("{ noquoteskey: null }"),
+ "Lexer Error: unknown token");
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/torque/ls-message-unittest.cc b/deps/v8/test/unittests/torque/ls-message-unittest.cc
new file mode 100644
index 0000000000..b8f7cf5864
--- /dev/null
+++ b/deps/v8/test/unittests/torque/ls-message-unittest.cc
@@ -0,0 +1,117 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/ls/json.h"
+#include "src/torque/ls/message-handler.h"
+#include "src/torque/ls/message.h"
+#include "src/torque/server-data.h"
+#include "src/torque/source-positions.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+namespace ls {
+
+TEST(LanguageServerMessage, InitializeRequest) {
+ InitializeRequest request;
+ request.set_id(5);
+ request.set_method("initialize");
+ request.params();
+
+ HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
+ InitializeResponse response(raw_response);
+
+ // Check that the response id matches up with the request id, and that
+ // the language server signals its support for definitions.
+ EXPECT_EQ(response.id(), 5);
+ EXPECT_EQ(response.result().capabilities().definitionProvider(), true);
+ });
+}
+
+TEST(LanguageServerMessage,
+ RegisterDynamicCapabilitiesAfterInitializedNotification) {
+ Request<bool> notification;
+ notification.set_method("initialized");
+
+ HandleMessage(notification.GetJsonValue(), [](JsonValue& raw_request) {
+ RegistrationRequest request(raw_request);
+
+ ASSERT_EQ(request.method(), "client/registerCapability");
+ ASSERT_EQ(request.params().registrations_size(), (size_t)1);
+
+ Registration registration = request.params().registrations(0);
+ ASSERT_EQ(registration.method(), "workspace/didChangeWatchedFiles");
+
+ auto options =
+ registration
+ .registerOptions<DidChangeWatchedFilesRegistrationOptions>();
+ ASSERT_EQ(options.watchers_size(), (size_t)1);
+ });
+}
+
+TEST(LanguageServerMessage, GotoDefinitionUnkownFile) {
+ SourceFileMap::Scope source_file_map_scope;
+
+ GotoDefinitionRequest request;
+ request.set_id(42);
+ request.set_method("textDocument/definition");
+ request.params().textDocument().set_uri("file:///unknown.tq");
+
+ HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
+ GotoDefinitionResponse response(raw_response);
+ EXPECT_EQ(response.id(), 42);
+ EXPECT_TRUE(response.IsNull("result"));
+ });
+}
+
+TEST(LanguageServerMessage, GotoDefinition) {
+ SourceFileMap::Scope source_file_map_scope;
+ SourceId test_id = SourceFileMap::AddSource("file://test.tq");
+ SourceId definition_id = SourceFileMap::AddSource("file://base.tq");
+
+ LanguageServerData::Scope server_data_scope;
+ LanguageServerData::AddDefinition({test_id, {1, 0}, {1, 10}},
+ {definition_id, {4, 1}, {4, 5}});
+
+ // First, check a unknown definition. The result must be null.
+ GotoDefinitionRequest request;
+ request.set_id(42);
+ request.set_method("textDocument/definition");
+ request.params().textDocument().set_uri("file://test.tq");
+ request.params().position().set_line(2);
+ request.params().position().set_character(0);
+
+ HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
+ GotoDefinitionResponse response(raw_response);
+ EXPECT_EQ(response.id(), 42);
+ EXPECT_TRUE(response.IsNull("result"));
+ });
+
+ // Second, check a known defintion.
+ request = GotoDefinitionRequest();
+ request.set_id(43);
+ request.set_method("textDocument/definition");
+ request.params().textDocument().set_uri("file://test.tq");
+ request.params().position().set_line(1);
+ request.params().position().set_character(5);
+
+ HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
+ GotoDefinitionResponse response(raw_response);
+ EXPECT_EQ(response.id(), 43);
+ ASSERT_FALSE(response.IsNull("result"));
+
+ Location location = response.result();
+ EXPECT_EQ(location.uri(), "file://base.tq");
+ EXPECT_EQ(location.range().start().line(), 4);
+ EXPECT_EQ(location.range().start().character(), 1);
+ EXPECT_EQ(location.range().end().line(), 4);
+ EXPECT_EQ(location.range().end().character(), 5);
+ });
+}
+
+} // namespace ls
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/torque/torque-utils-unittest.cc b/deps/v8/test/unittests/torque/torque-utils-unittest.cc
new file mode 100644
index 0000000000..ff877a3c42
--- /dev/null
+++ b/deps/v8/test/unittests/torque/torque-utils-unittest.cc
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+TEST(TorqueUtils, FileUriDecodeIllegal) {
+ EXPECT_EQ(FileUriDecode("http://wrong.scheme"), base::nullopt);
+ EXPECT_EQ(FileUriDecode("file://wrong-escape%"), base::nullopt);
+ EXPECT_EQ(FileUriDecode("file://another-wrong-escape%a"), base::nullopt);
+ EXPECT_EQ(FileUriDecode("file://no-hex-escape%0g"), base::nullopt);
+}
+
+TEST(TorqueUtils, FileUriDecode) {
+ EXPECT_EQ(FileUriDecode("file:///some/src/file.tq").value(),
+ "/some/src/file.tq");
+ EXPECT_EQ(FileUriDecode("file:///c%3A/torque/base.tq").value(),
+ "/c:/torque/base.tq");
+ EXPECT_EQ(FileUriDecode("file:///d%3a/lower/hex.txt").value(),
+ "/d:/lower/hex.txt");
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
index 1bede08343..da1383c22c 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -50,9 +50,11 @@ void DecodeIncrementally(const std::vector<byte>& bytes,
std::vector<unibrow::uchar>* output) {
unibrow::Utf8::Utf8IncrementalBuffer buffer = 0;
unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
- for (size_t i = 0; i < bytes.size();) {
+ const byte* cursor = &bytes[0];
+ const byte* end = &bytes[bytes.size()];
+ while (cursor < end) {
unibrow::uchar result =
- unibrow::Utf8::ValueOfIncremental(bytes[i], &i, &state, &buffer);
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &buffer);
if (result != unibrow::Utf8::kIncomplete) {
output->push_back(result);
}
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 7582deaedd..a105afe987 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -22,9 +22,16 @@
}], # '(arch == arm or arch == mips) and not simulator_run'
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'ValueSerializerTestWithSharedArrayBufferClone.RoundTripWebAssemblyMemory': [SKIP],
'ValueSerializerTestWithWasm.*': [SKIP],
-}], # lite_mode
+ 'Parameterized/WasmCodeManagerTest.*': [SKIP],
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/unittests/utils-unittest.cc b/deps/v8/test/unittests/utils-unittest.cc
index c8032d187d..614880f2e7 100644
--- a/deps/v8/test/unittests/utils-unittest.cc
+++ b/deps/v8/test/unittests/utils-unittest.cc
@@ -24,7 +24,7 @@ typedef ::testing::Types<signed char, unsigned char,
int64_t, uint64_t>
IntegerTypes;
-TYPED_TEST_CASE(UtilsTest, IntegerTypes);
+TYPED_TEST_SUITE(UtilsTest, IntegerTypes);
TYPED_TEST(UtilsTest, SaturateSub) {
TypeParam min = std::numeric_limits<TypeParam>::min();
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index e13816744f..13161947e8 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/ostreams.h"
#include "src/v8.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
@@ -54,40 +55,6 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
#define WASM_BRV_IF_ZERO(depth, val) \
val, WASM_ZERO, kExprBrIf, static_cast<byte>(depth)
-#define EXPECT_VERIFIES_C(sig, x) \
- Verify(true, sigs.sig(), ArrayVector(x), kAppendEnd)
-
-#define EXPECT_FAILURE_C(sig, x, ...) \
- Verify(false, sigs.sig(), ArrayVector(x), kAppendEnd, ##__VA_ARGS__)
-
-#define EXPECT_VERIFIES_SC(sig, x) Verify(true, sig, ArrayVector(x), kAppendEnd)
-
-#define EXPECT_FAILURE_SC(sig, x) Verify(false, sig, ArrayVector(x), kAppendEnd)
-
-#define EXPECT_VERIFIES_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(true, env, ArrayVector(code), kAppendEnd); \
- } while (false)
-
-#define EXPECT_FAILURE_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(false, env, ArrayVector(code), kAppendEnd); \
- } while (false)
-
-#define EXPECT_VERIFIES(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_VERIFIES_C(sig, code); \
- } while (false)
-
-#define EXPECT_FAILURE(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_FAILURE_C(sig, code); \
- } while (false)
-
class FunctionBodyDecoderTest : public TestWithZone {
public:
typedef std::pair<uint32_t, ValueType> LocalsDecl;
@@ -116,7 +83,9 @@ class FunctionBodyDecoderTest : public TestWithZone {
// Prepend the local decls to the code.
local_decls.Emit(buffer);
// Emit the code.
- memcpy(buffer + locals_size, code.start(), code.size());
+ if (code.size() > 0) {
+ memcpy(buffer + locals_size, code.start(), code.size());
+ }
if (append_end == kAppendEnd) {
// Append an extra end opcode.
buffer[total_size - 1] = kExprEnd;
@@ -125,13 +94,28 @@ class FunctionBodyDecoderTest : public TestWithZone {
return {buffer, total_size};
}
+ template <size_t N>
+ Vector<const byte> CodeToVector(const byte (&code)[N]) {
+ return ArrayVector(code);
+ }
+
+ Vector<const byte> CodeToVector(
+ const std::initializer_list<const byte>& code) {
+ return VectorOf(&*code.begin(), code.size());
+ }
+
+ Vector<const byte> CodeToVector(Vector<const byte> vec) { return vec; }
+
// Prepends local variable declarations and renders nice error messages for
// verification failures.
- void Verify(bool expected_success, FunctionSig* sig, Vector<const byte> code,
- AppendEnd append_end, const char* message = nullptr) {
- code = PrepareBytecode(code, append_end);
-
- // Verify the code.
+ template <typename Code = std::initializer_list<const byte>>
+ void Validate(bool expected_success, FunctionSig* sig, Code&& raw_code,
+ AppendEnd append_end = kAppendEnd,
+ const char* message = nullptr) {
+ Vector<const byte> code =
+ PrepareBytecode(CodeToVector(std::forward<Code>(raw_code)), append_end);
+
+ // Validate the code.
FunctionBody body(sig, 0, code.start(), code.end());
WasmFeatures unused_detected_features;
DecodeResult result =
@@ -151,10 +135,24 @@ class FunctionBodyDecoderTest : public TestWithZone {
}
}
+ template <typename Code = std::initializer_list<const byte>>
+ void ExpectValidates(FunctionSig* sig, Code&& raw_code,
+ AppendEnd append_end = kAppendEnd,
+ const char* message = nullptr) {
+ Validate(true, sig, std::forward<Code>(raw_code), append_end, message);
+ }
+
+ template <typename Code = std::initializer_list<const byte>>
+ void ExpectFailure(FunctionSig* sig, Code&& raw_code,
+ AppendEnd append_end = kAppendEnd,
+ const char* message = nullptr) {
+ Validate(false, sig, std::forward<Code>(raw_code), append_end, message);
+ }
+
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
// op(local[0], local[1])
byte code[] = {WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- EXPECT_VERIFIES_SC(success, code);
+ ExpectValidates(success, code);
// Try all combinations of return and parameter types.
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
@@ -166,7 +164,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
types[2] != success->GetParam(1)) {
// Test signature mismatch.
FunctionSig sig(1, 2, types);
- EXPECT_FAILURE_SC(&sig, code);
+ ExpectFailure(&sig, code);
}
}
}
@@ -183,7 +181,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
{
ValueType types[] = {ret_type, param_type};
FunctionSig sig(1, 1, types);
- EXPECT_VERIFIES_SC(&sig, code);
+ ExpectValidates(&sig, code);
}
// Try all combinations of return and parameter types.
@@ -193,7 +191,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
if (types[0] != ret_type || types[1] != param_type) {
// Test signature mismatch.
FunctionSig sig(1, 1, types);
- EXPECT_FAILURE_SC(&sig, code);
+ ExpectFailure(&sig, code);
}
}
}
@@ -254,6 +252,18 @@ class TestModuleBuilder {
return static_cast<byte>(mod.exceptions.size() - 1);
}
+ byte AddTable(ValueType type, uint32_t initial_size, bool has_maximum_size,
+ uint32_t maximum_size) {
+ CHECK(type == kWasmAnyRef || type == kWasmAnyFunc);
+ mod.tables.emplace_back();
+ WasmTable& table = mod.tables.back();
+ table.type = type;
+ table.initial_size = initial_size;
+ table.has_maximum_size = has_maximum_size;
+ table.maximum_size = maximum_size;
+ return static_cast<byte>(mod.tables.size() - 1);
+ }
+
void InitializeMemory() {
mod.has_memory = true;
mod.initial_pages = 1;
@@ -285,49 +295,47 @@ TEST_F(FunctionBodyDecoderTest, Int32Const1) {
byte code[] = {kExprI32Const, 0};
for (int i = -64; i <= 63; i++) {
code[1] = static_cast<byte>(i & 0x7F);
- EXPECT_VERIFIES_C(i_i, code);
+ ExpectValidates(sigs.i_i(), code);
}
}
TEST_F(FunctionBodyDecoderTest, RefNull) {
WASM_FEATURE_SCOPE(anyref);
- byte code[] = {kExprRefNull};
- EXPECT_VERIFIES_C(r_v, code);
+ ExpectValidates(sigs.r_v(), {kExprRefNull});
}
TEST_F(FunctionBodyDecoderTest, EmptyFunction) {
- Verify(true, sigs.v_v(), {}, kAppendEnd);
- Verify(false, sigs.i_i(), {}, kAppendEnd);
+ ExpectValidates(sigs.v_v(), {});
+ ExpectFailure(sigs.i_i(), {});
}
TEST_F(FunctionBodyDecoderTest, IncompleteIf1) {
byte code[] = {kExprIf};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, Int32Const_fallthru) {
- EXPECT_VERIFIES(i_i, WASM_I32V_1(0));
+ ExpectValidates(sigs.i_i(), {WASM_I32V_1(0)});
}
TEST_F(FunctionBodyDecoderTest, Int32Const_fallthru2) {
- EXPECT_FAILURE(i_i, WASM_I32V_1(0), WASM_I32V_1(1));
+ ExpectFailure(sigs.i_i(), {WASM_I32V_1(0), WASM_I32V_1(1)});
}
TEST_F(FunctionBodyDecoderTest, Int32Const) {
const int kInc = 4498211;
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
// TODO(binji): expand test for other sized int32s; 1 through 5 bytes.
- byte code[] = {WASM_I32V(i)};
- EXPECT_VERIFIES_C(i_i, code);
+ ExpectValidates(sigs.i_i(), {WASM_I32V(i)});
}
}
TEST_F(FunctionBodyDecoderTest, Int64Const) {
const int kInc = 4498211;
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
- byte code[] = {WASM_I64V((static_cast<int64_t>(i) << 32) | i)};
- EXPECT_VERIFIES_C(l_l, code);
+ ExpectValidates(sigs.l_l(),
+ {WASM_I64V((static_cast<uint64_t>(i) << 32) | i)});
}
}
@@ -336,7 +344,7 @@ TEST_F(FunctionBodyDecoderTest, Float32Const) {
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
WriteLittleEndianValue<float>(ptr, i * -7.75f);
- EXPECT_VERIFIES_C(f_ff, code);
+ ExpectValidates(sigs.f_ff(), code);
}
}
@@ -345,7 +353,7 @@ TEST_F(FunctionBodyDecoderTest, Float64Const) {
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
WriteLittleEndianValue<double>(ptr, i * 33.45);
- EXPECT_VERIFIES_C(d_dd, code);
+ ExpectValidates(sigs.d_dd(), code);
}
}
@@ -353,31 +361,31 @@ TEST_F(FunctionBodyDecoderTest, Int32Const_off_end) {
byte code[] = {kExprI32Const, 0xAA, 0xBB, 0xCC, 0x44};
for (size_t size = 1; size <= 4; ++size) {
- Verify(false, sigs.i_i(), {code, size}, kAppendEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(code, size), kAppendEnd);
// Should also fail without the trailing 'end' opcode.
- Verify(false, sigs.i_i(), {code, size}, kOmitEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(code, size), kOmitEnd);
}
}
TEST_F(FunctionBodyDecoderTest, GetLocal0_param) {
- EXPECT_VERIFIES_C(i_i, kCodeGetLocal0);
+ ExpectValidates(sigs.i_i(), kCodeGetLocal0);
}
TEST_F(FunctionBodyDecoderTest, GetLocal0_local) {
AddLocals(kWasmI32, 1);
- EXPECT_VERIFIES_C(i_v, kCodeGetLocal0);
+ ExpectValidates(sigs.i_v(), kCodeGetLocal0);
}
TEST_F(FunctionBodyDecoderTest, TooManyLocals) {
AddLocals(kWasmI32, 4034986500);
- EXPECT_FAILURE_C(i_v, kCodeGetLocal0);
+ ExpectFailure(sigs.i_v(), kCodeGetLocal0);
}
TEST_F(FunctionBodyDecoderTest, GetLocal0_param_n) {
FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
for (size_t i = 0; i < arraysize(array); i++) {
- EXPECT_VERIFIES_SC(array[i], kCodeGetLocal0);
+ ExpectValidates(array[i], kCodeGetLocal0);
}
}
@@ -385,149 +393,143 @@ TEST_F(FunctionBodyDecoderTest, GetLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kWasmI32, 1);
for (byte j = 0; j < i; j++) {
- byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES_C(i_v, code);
+ ExpectValidates(sigs.i_v(), {kExprGetLocal, j});
}
}
}
TEST_F(FunctionBodyDecoderTest, GetLocal0_fail_no_params) {
- EXPECT_FAILURE_C(i_v, kCodeGetLocal0);
+ ExpectFailure(sigs.i_v(), kCodeGetLocal0);
}
TEST_F(FunctionBodyDecoderTest, GetLocal1_fail_no_locals) {
- EXPECT_FAILURE_C(i_i, kCodeGetLocal1);
+ ExpectFailure(sigs.i_i(), kCodeGetLocal1);
}
TEST_F(FunctionBodyDecoderTest, GetLocal_off_end) {
- static const byte code[] = {kExprGetLocal};
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.i_i(), {kExprGetLocal});
}
TEST_F(FunctionBodyDecoderTest, NumLocalBelowLimit) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals - 1);
- EXPECT_VERIFIES(v_v, WASM_NOP);
+ ExpectValidates(sigs.v_v(), {WASM_NOP});
}
TEST_F(FunctionBodyDecoderTest, NumLocalAtLimit) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals);
- EXPECT_VERIFIES(v_v, WASM_NOP);
+ ExpectValidates(sigs.v_v(), {WASM_NOP});
}
TEST_F(FunctionBodyDecoderTest, NumLocalAboveLimit) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals + 1);
- EXPECT_FAILURE(v_v, WASM_NOP);
+ ExpectFailure(sigs.v_v(), {WASM_NOP});
}
TEST_F(FunctionBodyDecoderTest, GetLocal_varint) {
const int kMaxLocals = kV8MaxWasmFunctionLocals - 1;
AddLocals(kWasmI32, kMaxLocals);
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_1(66));
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_2(7777));
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_3(8888));
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(9999));
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_1(66)});
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_2(7777)});
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_3(8888)});
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(9999)});
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_5(kMaxLocals - 1));
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_5(kMaxLocals - 1)});
- EXPECT_FAILURE(i_i, kExprGetLocal, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+ ExpectFailure(sigs.i_i(), {kExprGetLocal, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF});
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(kMaxLocals - 1));
- EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(kMaxLocals));
- EXPECT_FAILURE(i_i, kExprGetLocal, U32V_4(kMaxLocals + 1));
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals - 1)});
+ ExpectValidates(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals)});
+ ExpectFailure(sigs.i_i(), {kExprGetLocal, U32V_4(kMaxLocals + 1)});
- EXPECT_FAILURE(i_v, kExprGetLocal, U32V_4(kMaxLocals));
- EXPECT_FAILURE(i_v, kExprGetLocal, U32V_4(kMaxLocals + 1));
+ ExpectFailure(sigs.i_v(), {kExprGetLocal, U32V_4(kMaxLocals)});
+ ExpectFailure(sigs.i_v(), {kExprGetLocal, U32V_4(kMaxLocals + 1)});
}
TEST_F(FunctionBodyDecoderTest, GetLocal_toomany) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals - 100);
AddLocals(kWasmI32, 100);
- EXPECT_VERIFIES(i_v, kExprGetLocal, U32V_1(66));
- EXPECT_FAILURE(i_i, kExprGetLocal, U32V_1(66));
+ ExpectValidates(sigs.i_v(), {kExprGetLocal, U32V_1(66)});
+ ExpectFailure(sigs.i_i(), {kExprGetLocal, U32V_1(66)});
}
TEST_F(FunctionBodyDecoderTest, Binops_off_end) {
byte code1[] = {0}; // [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code1[0] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE_C(i_i, code1);
+ ExpectFailure(sigs.i_i(), code1);
}
byte code3[] = {kExprGetLocal, 0, 0}; // [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code3[2] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE_C(i_i, code3);
+ ExpectFailure(sigs.i_i(), code3);
}
byte code4[] = {kExprGetLocal, 0, 0, 0}; // [expr] [opcode] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code4[2] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE_C(i_i, code4);
+ ExpectFailure(sigs.i_i(), code4);
}
}
TEST_F(FunctionBodyDecoderTest, BinopsAcrossBlock1) {
- static const byte code[] = {WASM_ZERO, kExprBlock, kLocalI32,
- WASM_ZERO, kExprI32Add, kExprEnd};
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.i_i(), {WASM_ZERO, kExprBlock, kLocalI32, WASM_ZERO,
+ kExprI32Add, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, BinopsAcrossBlock2) {
- static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprBlock,
- kLocalI32, kExprI32Add, kExprEnd};
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.i_i(), {WASM_ZERO, WASM_ZERO, kExprBlock, kLocalI32,
+ kExprI32Add, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, BinopsAcrossBlock3) {
- static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprIf, kLocalI32,
- kExprI32Add, kExprElse, kExprI32Add, kExprEnd};
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.i_i(), {WASM_ZERO, WASM_ZERO, kExprIf, kLocalI32,
+ kExprI32Add, kExprElse, kExprI32Add, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, Nop) {
- static const byte code[] = {kExprNop};
- EXPECT_VERIFIES_C(v_v, code);
+ ExpectValidates(sigs.v_v(), {kExprNop});
}
TEST_F(FunctionBodyDecoderTest, SetLocal0_void) {
- EXPECT_FAILURE(i_i, WASM_SET_LOCAL(0, WASM_ZERO));
+ ExpectFailure(sigs.i_i(), {WASM_SET_LOCAL(0, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, SetLocal0_param) {
- EXPECT_FAILURE_C(i_i, kCodeSetLocal0);
- EXPECT_FAILURE_C(f_ff, kCodeSetLocal0);
- EXPECT_FAILURE_C(d_dd, kCodeSetLocal0);
+ ExpectFailure(sigs.i_i(), kCodeSetLocal0);
+ ExpectFailure(sigs.f_ff(), kCodeSetLocal0);
+ ExpectFailure(sigs.d_dd(), kCodeSetLocal0);
}
TEST_F(FunctionBodyDecoderTest, TeeLocal0_param) {
- EXPECT_VERIFIES_C(i_i, kCodeTeeLocal0);
- EXPECT_FAILURE_C(f_ff, kCodeTeeLocal0);
- EXPECT_FAILURE_C(d_dd, kCodeTeeLocal0);
+ ExpectValidates(sigs.i_i(), kCodeTeeLocal0);
+ ExpectFailure(sigs.f_ff(), kCodeTeeLocal0);
+ ExpectFailure(sigs.d_dd(), kCodeTeeLocal0);
}
TEST_F(FunctionBodyDecoderTest, SetLocal0_local) {
- EXPECT_FAILURE_C(i_v, kCodeSetLocal0);
- EXPECT_FAILURE_C(v_v, kCodeSetLocal0);
+ ExpectFailure(sigs.i_v(), kCodeSetLocal0);
+ ExpectFailure(sigs.v_v(), kCodeSetLocal0);
AddLocals(kWasmI32, 1);
- EXPECT_FAILURE_C(i_v, kCodeSetLocal0);
- EXPECT_VERIFIES_C(v_v, kCodeSetLocal0);
+ ExpectFailure(sigs.i_v(), kCodeSetLocal0);
+ ExpectValidates(sigs.v_v(), kCodeSetLocal0);
}
TEST_F(FunctionBodyDecoderTest, TeeLocal0_local) {
- EXPECT_FAILURE_C(i_v, kCodeTeeLocal0);
+ ExpectFailure(sigs.i_v(), kCodeTeeLocal0);
AddLocals(kWasmI32, 1);
- EXPECT_VERIFIES_C(i_v, kCodeTeeLocal0);
+ ExpectValidates(sigs.i_v(), kCodeTeeLocal0);
}
TEST_F(FunctionBodyDecoderTest, TeeLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kWasmI32, 1);
for (byte j = 0; j < i; j++) {
- EXPECT_FAILURE(v_v, WASM_TEE_LOCAL(j, WASM_I32V_1(i)));
- EXPECT_VERIFIES(i_i, WASM_TEE_LOCAL(j, WASM_I32V_1(i)));
+ ExpectFailure(sigs.v_v(), {WASM_TEE_LOCAL(j, WASM_I32V_1(i))});
+ ExpectValidates(sigs.i_i(), {WASM_TEE_LOCAL(j, WASM_I32V_1(i))});
}
}
}
@@ -541,243 +543,251 @@ TEST_F(FunctionBodyDecoderTest, BlockN) {
buffer[0] = kExprBlock;
buffer[1] = kLocalVoid;
buffer[i + 2] = kExprEnd;
- Verify(true, sigs.v_i(), {buffer, i + 3}, kAppendEnd);
+ ExpectValidates(sigs.v_i(), VectorOf(buffer, i + 3), kAppendEnd);
}
}
#define WASM_EMPTY_BLOCK kExprBlock, kLocalVoid, kExprEnd
TEST_F(FunctionBodyDecoderTest, Block0) {
- static const byte code[] = {WASM_EMPTY_BLOCK};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectValidates(sigs.v_v(), {WASM_EMPTY_BLOCK});
+ ExpectFailure(sigs.i_i(), {WASM_EMPTY_BLOCK});
}
TEST_F(FunctionBodyDecoderTest, Block0_fallthru1) {
- static const byte code[] = {WASM_BLOCK(WASM_EMPTY_BLOCK)};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectValidates(sigs.v_v(), {WASM_BLOCK(WASM_EMPTY_BLOCK)});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK(WASM_EMPTY_BLOCK)});
}
TEST_F(FunctionBodyDecoderTest, Block0Block0) {
- static const byte code[] = {WASM_EMPTY_BLOCK, WASM_EMPTY_BLOCK};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectValidates(sigs.v_v(), {WASM_EMPTY_BLOCK, WASM_EMPTY_BLOCK});
+ ExpectFailure(sigs.i_i(), {WASM_EMPTY_BLOCK, WASM_EMPTY_BLOCK});
}
TEST_F(FunctionBodyDecoderTest, Block0_end) {
- EXPECT_FAILURE(v_v, WASM_EMPTY_BLOCK, kExprEnd);
+ ExpectFailure(sigs.v_v(), {WASM_EMPTY_BLOCK, kExprEnd});
}
#undef WASM_EMPTY_BLOCK
TEST_F(FunctionBodyDecoderTest, Block1) {
byte code[] = {WASM_BLOCK_I(WASM_GET_LOCAL(0))};
- EXPECT_VERIFIES_C(i_i, code);
- EXPECT_FAILURE_C(v_i, code);
- EXPECT_FAILURE_C(d_dd, code);
- EXPECT_FAILURE_C(i_f, code);
- EXPECT_FAILURE_C(i_d, code);
+ ExpectValidates(sigs.i_i(), code);
+ ExpectFailure(sigs.v_i(), code);
+ ExpectFailure(sigs.d_dd(), code);
+ ExpectFailure(sigs.i_f(), code);
+ ExpectFailure(sigs.i_d(), code);
}
TEST_F(FunctionBodyDecoderTest, Block1_i) {
byte code[] = {WASM_BLOCK_I(WASM_ZERO)};
- EXPECT_VERIFIES_C(i_i, code);
- EXPECT_FAILURE_C(f_ff, code);
- EXPECT_FAILURE_C(d_dd, code);
- EXPECT_FAILURE_C(l_ll, code);
+ ExpectValidates(sigs.i_i(), code);
+ ExpectFailure(sigs.f_ff(), code);
+ ExpectFailure(sigs.d_dd(), code);
+ ExpectFailure(sigs.l_ll(), code);
}
TEST_F(FunctionBodyDecoderTest, Block1_f) {
byte code[] = {WASM_BLOCK_F(WASM_F32(0))};
- EXPECT_FAILURE_C(i_i, code);
- EXPECT_VERIFIES_C(f_ff, code);
- EXPECT_FAILURE_C(d_dd, code);
- EXPECT_FAILURE_C(l_ll, code);
+ ExpectFailure(sigs.i_i(), code);
+ ExpectValidates(sigs.f_ff(), code);
+ ExpectFailure(sigs.d_dd(), code);
+ ExpectFailure(sigs.l_ll(), code);
}
TEST_F(FunctionBodyDecoderTest, Block1_continue) {
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_BR(0))});
}
TEST_F(FunctionBodyDecoderTest, Block1_br) {
- EXPECT_VERIFIES(v_v, B1(WASM_BR(0)));
- EXPECT_VERIFIES(v_v, B1(WASM_BR(1)));
- EXPECT_FAILURE(v_v, B1(WASM_BR(2)));
+ ExpectValidates(sigs.v_v(), {B1(WASM_BR(0))});
+ ExpectValidates(sigs.v_v(), {B1(WASM_BR(1))});
+ ExpectFailure(sigs.v_v(), {B1(WASM_BR(2))});
}
TEST_F(FunctionBodyDecoderTest, Block2_br) {
- EXPECT_VERIFIES(v_v, B2(WASM_NOP, WASM_BR(0)));
- EXPECT_VERIFIES(v_v, B2(WASM_BR(0), WASM_NOP));
- EXPECT_VERIFIES(v_v, B2(WASM_BR(0), WASM_BR(0)));
+ ExpectValidates(sigs.v_v(), {B2(WASM_NOP, WASM_BR(0))});
+ ExpectValidates(sigs.v_v(), {B2(WASM_BR(0), WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {B2(WASM_BR(0), WASM_BR(0))});
}
TEST_F(FunctionBodyDecoderTest, Block2) {
- EXPECT_FAILURE(i_i, WASM_BLOCK(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_NOP, WASM_ZERO));
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_ZERO, WASM_NOP));
- EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_I(WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.i_i(), {WASM_BLOCK_I(WASM_NOP, WASM_ZERO)});
+ ExpectValidates(sigs.i_i(), {WASM_BLOCK_I(WASM_ZERO, WASM_NOP)});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_I(WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, Block2b) {
byte code[] = {WASM_BLOCK_I(WASM_SET_LOCAL(0, WASM_ZERO), WASM_ZERO)};
- EXPECT_VERIFIES_C(i_i, code);
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(f_ff, code);
+ ExpectValidates(sigs.i_i(), code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.f_ff(), code);
}
TEST_F(FunctionBodyDecoderTest, Block2_fallthru) {
- EXPECT_VERIFIES(
- i_i, B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_SET_LOCAL(0, WASM_ZERO)),
- WASM_I32V_1(23));
+ ExpectValidates(sigs.i_i(), {B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_I32V_1(23)});
}
TEST_F(FunctionBodyDecoderTest, Block3) {
- EXPECT_VERIFIES(i_i,
- WASM_BLOCK_I(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO), WASM_I32V_1(11)));
+ ExpectValidates(sigs.i_i(), {WASM_BLOCK_I(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_I32V_1(11))});
}
TEST_F(FunctionBodyDecoderTest, Block5) {
- EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO));
+ ExpectFailure(sigs.v_i(), {WASM_BLOCK(WASM_ZERO)});
- EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_i(), {WASM_BLOCK(WASM_ZERO, WASM_ZERO)});
- EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_i(), {WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_i(),
+ {WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- EXPECT_FAILURE(
- v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_i(), {WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, BlockType) {
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(l_l, WASM_BLOCK_L(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(f_f, WASM_BLOCK_F(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(d_d, WASM_BLOCK_D(WASM_GET_LOCAL(0)));
+ ExpectValidates(sigs.i_i(), {WASM_BLOCK_I(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.l_l(), {WASM_BLOCK_L(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.f_f(), {WASM_BLOCK_F(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.d_d(), {WASM_BLOCK_D(WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, BlockType_fail) {
- EXPECT_FAILURE(i_i, WASM_BLOCK_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(i_i, WASM_BLOCK_F(WASM_F32(0.0)));
- EXPECT_FAILURE(i_i, WASM_BLOCK_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_D(WASM_F64(1.1))});
- EXPECT_FAILURE(l_l, WASM_BLOCK_I(WASM_ZERO));
- EXPECT_FAILURE(l_l, WASM_BLOCK_F(WASM_F32(0.0)));
- EXPECT_FAILURE(l_l, WASM_BLOCK_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_I(WASM_ZERO)});
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_D(WASM_F64(1.1))});
- EXPECT_FAILURE(f_ff, WASM_BLOCK_I(WASM_ZERO));
- EXPECT_FAILURE(f_ff, WASM_BLOCK_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(f_ff, WASM_BLOCK_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_I(WASM_ZERO)});
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_D(WASM_F64(1.1))});
- EXPECT_FAILURE(d_dd, WASM_BLOCK_I(WASM_ZERO));
- EXPECT_FAILURE(d_dd, WASM_BLOCK_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(d_dd, WASM_BLOCK_F(WASM_F32(0.0)));
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_I(WASM_ZERO)});
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_F(WASM_F32(0.0))});
}
TEST_F(FunctionBodyDecoderTest, BlockF32) {
static const byte code[] = {WASM_BLOCK_F(kExprF32Const, 0, 0, 0, 0)};
- EXPECT_VERIFIES_C(f_ff, code);
- EXPECT_FAILURE_C(i_i, code);
- EXPECT_FAILURE_C(d_dd, code);
+ ExpectValidates(sigs.f_ff(), code);
+ ExpectFailure(sigs.i_i(), code);
+ ExpectFailure(sigs.d_dd(), code);
}
TEST_F(FunctionBodyDecoderTest, BlockN_off_end) {
byte code[] = {WASM_BLOCK(kExprNop, kExprNop, kExprNop, kExprNop)};
- EXPECT_VERIFIES_C(v_v, code);
+ ExpectValidates(sigs.v_v(), code);
for (size_t i = 1; i < arraysize(code); i++) {
- Verify(false, sigs.v_v(), {code, i}, kAppendEnd);
- Verify(false, sigs.v_v(), {code, i}, kOmitEnd);
+ ExpectFailure(sigs.v_v(), VectorOf(code, i), kAppendEnd);
+ ExpectFailure(sigs.v_v(), VectorOf(code, i), kOmitEnd);
}
}
TEST_F(FunctionBodyDecoderTest, Block2_continue) {
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_BR(0)));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_BR(1)));
- EXPECT_FAILURE(v_v, WASM_LOOP(WASM_NOP, WASM_BR(2)));
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_NOP, WASM_BR(0))});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_NOP, WASM_BR(1))});
+ ExpectFailure(sigs.v_v(), {WASM_LOOP(WASM_NOP, WASM_BR(2))});
}
TEST_F(FunctionBodyDecoderTest, Block3_continue) {
- EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(0))));
- EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(1))));
- EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(2))));
- EXPECT_FAILURE(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(3))));
+ ExpectValidates(sigs.v_v(), {B1(WASM_LOOP(WASM_NOP, WASM_BR(0)))});
+ ExpectValidates(sigs.v_v(), {B1(WASM_LOOP(WASM_NOP, WASM_BR(1)))});
+ ExpectValidates(sigs.v_v(), {B1(WASM_LOOP(WASM_NOP, WASM_BR(2)))});
+ ExpectFailure(sigs.v_v(), {B1(WASM_LOOP(WASM_NOP, WASM_BR(3)))});
}
TEST_F(FunctionBodyDecoderTest, NestedBlock_return) {
- EXPECT_VERIFIES(i_i, B1(B1(WASM_RETURN1(WASM_ZERO))), WASM_ZERO);
+ ExpectValidates(sigs.i_i(), {B1(B1(WASM_RETURN1(WASM_ZERO))), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, BlockBrBinop) {
- EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(1))),
- WASM_I32V_1(2)));
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_AND(WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(1))),
+ WASM_I32V_1(2))});
}
TEST_F(FunctionBodyDecoderTest, If_empty1) {
- EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprEnd);
+ ExpectValidates(sigs.v_v(), {WASM_ZERO, WASM_IF_OP, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_empty2) {
- EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, kExprEnd);
+ ExpectValidates(sigs.v_v(), {WASM_ZERO, WASM_IF_OP, kExprElse, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_empty3) {
- EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, WASM_NOP, kExprElse, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_ZERO, WASM_IF_OP, WASM_ZERO, kExprElse, kExprEnd);
+ ExpectValidates(sigs.v_v(),
+ {WASM_ZERO, WASM_IF_OP, WASM_NOP, kExprElse, kExprEnd});
+ ExpectFailure(sigs.v_v(),
+ {WASM_ZERO, WASM_IF_OP, WASM_ZERO, kExprElse, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_empty4) {
- EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, WASM_NOP, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, WASM_ZERO, kExprEnd);
+ ExpectValidates(sigs.v_v(),
+ {WASM_ZERO, WASM_IF_OP, kExprElse, WASM_NOP, kExprEnd});
+ ExpectFailure(sigs.v_v(),
+ {WASM_ZERO, WASM_IF_OP, kExprElse, WASM_ZERO, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_empty_stack) {
byte code[] = {kExprIf};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, If_incomplete1) {
byte code[] = {kExprI32Const, 0, kExprIf};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, If_incomplete2) {
byte code[] = {kExprI32Const, 0, kExprIf, kExprNop};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, If_else_else) {
byte code[] = {kExprI32Const, 0, WASM_IF_OP, kExprElse, kExprElse, kExprEnd};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, IfEmpty) {
- EXPECT_VERIFIES(v_i, kExprGetLocal, 0, WASM_IF_OP, kExprEnd);
+ ExpectValidates(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, IfSet) {
- EXPECT_VERIFIES(v_i,
- WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO)));
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO))});
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, IfElseEmpty) {
- EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), WASM_IF_OP, kExprElse, kExprEnd);
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ ExpectValidates(sigs.v_i(),
+ {WASM_GET_LOCAL(0), WASM_IF_OP, kExprElse, kExprEnd});
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, IfElseUnreachable1) {
- EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_UNREACHABLE,
- WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_UNREACHABLE));
+ ExpectValidates(
+ sigs.i_i(),
+ {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0))});
+ ExpectValidates(
+ sigs.i_i(),
+ {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE)});
}
TEST_F(FunctionBodyDecoderTest, IfElseUnreachable2) {
@@ -788,247 +798,258 @@ TEST_F(FunctionBodyDecoderTest, IfElseUnreachable2) {
ValueType types[] = {kWasmI32, kValueTypes[i]};
FunctionSig sig(1, 1, types);
- if (kValueTypes[i] == kWasmI32) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(kValueTypes[i] == kWasmI32, &sig, code);
}
}
TEST_F(FunctionBodyDecoderTest, OneArmedIfWithArity) {
static const byte code[] = {WASM_ZERO, kExprIf, kLocalI32, WASM_ONE,
kExprEnd};
- EXPECT_FAILURE_C(i_v, code,
- "start-arity and end-arity of one-armed if must match");
+ ExpectFailure(sigs.i_v(), code, kAppendEnd,
+ "start-arity and end-arity of one-armed if must match");
}
TEST_F(FunctionBodyDecoderTest, IfBreak) {
- EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
- EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
- EXPECT_FAILURE(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)));
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0))});
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1))});
+ ExpectFailure(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2))});
}
TEST_F(FunctionBodyDecoderTest, IfElseBreak) {
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0)));
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1)));
- EXPECT_FAILURE(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(2)));
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0))});
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1))});
+ ExpectFailure(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(2))});
}
TEST_F(FunctionBodyDecoderTest, Block_else) {
byte code[] = {kExprI32Const, 0, kExprBlock, kExprElse, kExprEnd};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, IfNop) {
- EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0), WASM_NOP)});
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, If_end) {
- EXPECT_VERIFIES(v_i, kExprGetLocal, 0, WASM_IF_OP, kExprEnd);
- EXPECT_FAILURE(v_i, kExprGetLocal, 0, WASM_IF_OP, kExprEnd, kExprEnd);
+ ExpectValidates(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd});
+ ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP, kExprEnd, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, If_falloff1) {
- EXPECT_FAILURE(v_i, kExprGetLocal, 0, kExprIf);
- EXPECT_FAILURE(v_i, kExprGetLocal, 0, WASM_IF_OP);
- EXPECT_FAILURE(v_i, kExprGetLocal, 0, WASM_IF_OP, kExprNop, kExprElse);
+ ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, kExprIf});
+ ExpectFailure(sigs.v_i(), {kExprGetLocal, 0, WASM_IF_OP});
+ ExpectFailure(sigs.v_i(),
+ {kExprGetLocal, 0, WASM_IF_OP, kExprNop, kExprElse});
}
TEST_F(FunctionBodyDecoderTest, IfElseNop) {
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, IfBlock1) {
- EXPECT_VERIFIES(
- v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO)),
- WASM_NOP));
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ B1(WASM_SET_LOCAL(0, WASM_ZERO)), WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, IfBlock1b) {
- EXPECT_VERIFIES(v_i,
- WASM_IF(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO))));
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0),
+ B1(WASM_SET_LOCAL(0, WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, IfBlock2a) {
- EXPECT_VERIFIES(v_i,
- WASM_IF(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO))));
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0),
+ B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, IfBlock2b) {
- EXPECT_VERIFIES(
- v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)),
- WASM_NOP));
+ ExpectValidates(sigs.v_i(), {WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, IfElseSet) {
- EXPECT_VERIFIES(v_i,
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_I32V_1(1))));
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_I32V_1(1)))});
}
TEST_F(FunctionBodyDecoderTest, Loop0) {
- EXPECT_VERIFIES(v_v, WASM_LOOP_OP, kExprEnd);
+ ExpectValidates(sigs.v_v(), {WASM_LOOP_OP, kExprEnd});
}
TEST_F(FunctionBodyDecoderTest, Loop1) {
static const byte code[] = {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO))};
- EXPECT_VERIFIES_C(v_i, code);
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(f_ff, code);
+ ExpectValidates(sigs.v_i(), code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.f_ff(), code);
}
TEST_F(FunctionBodyDecoderTest, Loop2) {
- EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)));
+ ExpectValidates(sigs.v_i(), {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO))});
}
TEST_F(FunctionBodyDecoderTest, Loop1_continue) {
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_BR(0))});
}
TEST_F(FunctionBodyDecoderTest, Loop1_break) {
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(1)));
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_BR(1))});
}
TEST_F(FunctionBodyDecoderTest, Loop2_continue) {
- EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0)));
+ ExpectValidates(sigs.v_i(),
+ {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0))});
}
TEST_F(FunctionBodyDecoderTest, Loop2_break) {
- EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1)));
+ ExpectValidates(sigs.v_i(),
+ {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1))});
}
TEST_F(FunctionBodyDecoderTest, InfiniteLoop1) {
- EXPECT_VERIFIES(i_i, WASM_LOOP(WASM_BR(0)), WASM_ZERO);
- EXPECT_VERIFIES(i_i, WASM_LOOP(WASM_BR(0)), WASM_ZERO);
- EXPECT_VERIFIES(i_i, WASM_LOOP_I(WASM_BRV(1, WASM_ZERO)));
+ ExpectValidates(sigs.i_i(), {WASM_LOOP(WASM_BR(0)), WASM_ZERO});
+ ExpectValidates(sigs.i_i(), {WASM_LOOP(WASM_BR(0)), WASM_ZERO});
+ ExpectValidates(sigs.i_i(), {WASM_LOOP_I(WASM_BRV(1, WASM_ZERO))});
}
TEST_F(FunctionBodyDecoderTest, InfiniteLoop2) {
- EXPECT_FAILURE(i_i, WASM_LOOP(WASM_BR(0), WASM_ZERO), WASM_ZERO);
+ ExpectFailure(sigs.i_i(), {WASM_LOOP(WASM_BR(0), WASM_ZERO), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, Loop2_unreachable) {
- EXPECT_VERIFIES(i_i, WASM_LOOP_I(WASM_BR(0), WASM_NOP));
+ ExpectValidates(sigs.i_i(), {WASM_LOOP_I(WASM_BR(0), WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, LoopType) {
- EXPECT_VERIFIES(i_i, WASM_LOOP_I(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(l_l, WASM_LOOP_L(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(f_f, WASM_LOOP_F(WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(d_d, WASM_LOOP_D(WASM_GET_LOCAL(0)));
+ ExpectValidates(sigs.i_i(), {WASM_LOOP_I(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.l_l(), {WASM_LOOP_L(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.f_f(), {WASM_LOOP_F(WASM_GET_LOCAL(0))});
+ ExpectValidates(sigs.d_d(), {WASM_LOOP_D(WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, LoopType_void) {
- EXPECT_FAILURE(v_v, WASM_LOOP_I(WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_LOOP_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(v_v, WASM_LOOP_F(WASM_F32(0.0)));
- EXPECT_FAILURE(v_v, WASM_LOOP_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.v_v(), {WASM_LOOP_I(WASM_ZERO)});
+ ExpectFailure(sigs.v_v(), {WASM_LOOP_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.v_v(), {WASM_LOOP_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.v_v(), {WASM_LOOP_D(WASM_F64(1.1))});
}
TEST_F(FunctionBodyDecoderTest, LoopType_fail) {
- EXPECT_FAILURE(i_i, WASM_LOOP_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(i_i, WASM_LOOP_F(WASM_F32(0.0)));
- EXPECT_FAILURE(i_i, WASM_LOOP_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.i_i(), {WASM_LOOP_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.i_i(), {WASM_LOOP_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.i_i(), {WASM_LOOP_D(WASM_F64(1.1))});
- EXPECT_FAILURE(l_l, WASM_LOOP_I(WASM_ZERO));
- EXPECT_FAILURE(l_l, WASM_LOOP_F(WASM_F32(0.0)));
- EXPECT_FAILURE(l_l, WASM_LOOP_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.l_l(), {WASM_LOOP_I(WASM_ZERO)});
+ ExpectFailure(sigs.l_l(), {WASM_LOOP_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.l_l(), {WASM_LOOP_D(WASM_F64(1.1))});
- EXPECT_FAILURE(f_ff, WASM_LOOP_I(WASM_ZERO));
- EXPECT_FAILURE(f_ff, WASM_LOOP_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(f_ff, WASM_LOOP_D(WASM_F64(1.1)));
+ ExpectFailure(sigs.f_ff(), {WASM_LOOP_I(WASM_ZERO)});
+ ExpectFailure(sigs.f_ff(), {WASM_LOOP_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.f_ff(), {WASM_LOOP_D(WASM_F64(1.1))});
- EXPECT_FAILURE(d_dd, WASM_LOOP_I(WASM_ZERO));
- EXPECT_FAILURE(d_dd, WASM_LOOP_L(WASM_I64V_1(0)));
- EXPECT_FAILURE(d_dd, WASM_LOOP_F(WASM_F32(0.0)));
+ ExpectFailure(sigs.d_dd(), {WASM_LOOP_I(WASM_ZERO)});
+ ExpectFailure(sigs.d_dd(), {WASM_LOOP_L(WASM_I64V_1(0))});
+ ExpectFailure(sigs.d_dd(), {WASM_LOOP_F(WASM_F32(0.0))});
}
TEST_F(FunctionBodyDecoderTest, ReturnVoid1) {
static const byte code[] = {kExprNop};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
- EXPECT_FAILURE_C(i_f, code);
+ ExpectValidates(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
+ ExpectFailure(sigs.i_f(), code);
}
TEST_F(FunctionBodyDecoderTest, ReturnVoid2) {
static const byte code[] = {WASM_BLOCK(WASM_BR(0))};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
- EXPECT_FAILURE_C(i_f, code);
+ ExpectValidates(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
+ ExpectFailure(sigs.i_f(), code);
}
TEST_F(FunctionBodyDecoderTest, ReturnVoid3) {
- EXPECT_FAILURE(v_v, kExprI32Const, 0);
- EXPECT_FAILURE(v_v, kExprI64Const, 0);
- EXPECT_FAILURE(v_v, kExprF32Const, 0, 0, 0, 0);
- EXPECT_FAILURE(v_v, kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
- EXPECT_FAILURE(v_v, kExprRefNull);
+ ExpectFailure(sigs.v_v(), {kExprI32Const, 0});
+ ExpectFailure(sigs.v_v(), {kExprI64Const, 0});
+ ExpectFailure(sigs.v_v(), {kExprF32Const, 0, 0, 0, 0});
+ ExpectFailure(sigs.v_v(), {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0});
+ ExpectFailure(sigs.v_v(), {kExprRefNull});
- EXPECT_FAILURE(v_i, kExprGetLocal, 0);
+ ExpectFailure(sigs.v_i(), {kExprGetLocal, 0});
}
TEST_F(FunctionBodyDecoderTest, Unreachable1) {
- EXPECT_VERIFIES(v_v, WASM_UNREACHABLE);
- EXPECT_VERIFIES(v_v, WASM_UNREACHABLE, WASM_UNREACHABLE);
- EXPECT_VERIFIES(i_i, WASM_UNREACHABLE, WASM_ZERO);
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_UNREACHABLE});
+ ExpectValidates(sigs.i_i(), {WASM_UNREACHABLE, WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, Unreachable2) {
- EXPECT_FAILURE(v_v, B2(WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_FAILURE(v_v, B2(WASM_BR(0), WASM_ZERO));
+ ExpectFailure(sigs.v_v(), {B2(WASM_UNREACHABLE, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(), {B2(WASM_BR(0), WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, UnreachableLoop1) {
- EXPECT_FAILURE(v_v, WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_LOOP(WASM_BR(0), WASM_ZERO));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_UNREACHABLE, WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0), WASM_NOP));
+ ExpectFailure(sigs.v_v(), {WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(), {WASM_LOOP(WASM_BR(0), WASM_ZERO)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_UNREACHABLE, WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_BR(0), WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, Unreachable_binop1) {
- EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE));
- EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO));
+ ExpectValidates(sigs.i_i(), {WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE)});
+ ExpectValidates(sigs.i_i(), {WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, Unreachable_binop2) {
- EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_F32(0.0), WASM_UNREACHABLE));
- EXPECT_FAILURE(i_i, WASM_I32_AND(WASM_UNREACHABLE, WASM_F32(0.0)));
+ ExpectValidates(sigs.i_i(), {WASM_I32_AND(WASM_F32(0.0), WASM_UNREACHABLE)});
+ ExpectFailure(sigs.i_i(), {WASM_I32_AND(WASM_UNREACHABLE, WASM_F32(0.0))});
}
TEST_F(FunctionBodyDecoderTest, Unreachable_select1) {
- EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO));
- EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE));
+ ExpectValidates(sigs.i_i(),
+ {WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO)});
+ ExpectValidates(sigs.i_i(),
+ {WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO)});
+ ExpectValidates(sigs.i_i(),
+ {WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE)});
}
TEST_F(FunctionBodyDecoderTest, Unreachable_select2) {
- EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_F32(0.0), WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_FAILURE(i_i, WASM_SELECT(WASM_UNREACHABLE, WASM_F32(0.0), WASM_ZERO));
- EXPECT_FAILURE(i_i, WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_F32(0.0)));
+ ExpectValidates(sigs.i_i(),
+ {WASM_SELECT(WASM_F32(0.0), WASM_UNREACHABLE, WASM_ZERO)});
+ ExpectFailure(sigs.i_i(),
+ {WASM_SELECT(WASM_UNREACHABLE, WASM_F32(0.0), WASM_ZERO)});
+ ExpectFailure(sigs.i_i(),
+ {WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_F32(0.0))});
}
TEST_F(FunctionBodyDecoderTest, If1) {
- EXPECT_VERIFIES(
- i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9), WASM_I32V_1(8)));
- EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9),
- WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_I32V_1(8)));
+ ExpectValidates(sigs.i_i(), {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9),
+ WASM_I32V_1(8))});
+ ExpectValidates(sigs.i_i(), {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9),
+ WASM_GET_LOCAL(0))});
+ ExpectValidates(
+ sigs.i_i(),
+ {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_I32V_1(8))});
}
TEST_F(FunctionBodyDecoderTest, If_off_end) {
static const byte kCode[] = {
WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))};
for (size_t len = 3; len < arraysize(kCode); len++) {
- Verify(false, sigs.i_i(), {kCode, len}, kAppendEnd);
- Verify(false, sigs.i_i(), {kCode, len}, kOmitEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(kCode, len), kAppendEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(kCode, len), kOmitEnd);
}
}
@@ -1036,56 +1057,56 @@ TEST_F(FunctionBodyDecoderTest, If_type1) {
// float|double ? 1 : 2
static const byte kCode[] = {
WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(0), WASM_I32V_1(2))};
- EXPECT_VERIFIES_C(i_i, kCode);
- EXPECT_FAILURE_C(i_f, kCode);
- EXPECT_FAILURE_C(i_d, kCode);
+ ExpectValidates(sigs.i_i(), kCode);
+ ExpectFailure(sigs.i_f(), kCode);
+ ExpectFailure(sigs.i_d(), kCode);
}
TEST_F(FunctionBodyDecoderTest, If_type2) {
// 1 ? float|double : 2
static const byte kCode[] = {
WASM_IF_ELSE_I(WASM_I32V_1(1), WASM_GET_LOCAL(0), WASM_I32V_1(1))};
- EXPECT_VERIFIES_C(i_i, kCode);
- EXPECT_FAILURE_C(i_f, kCode);
- EXPECT_FAILURE_C(i_d, kCode);
+ ExpectValidates(sigs.i_i(), kCode);
+ ExpectFailure(sigs.i_f(), kCode);
+ ExpectFailure(sigs.i_d(), kCode);
}
TEST_F(FunctionBodyDecoderTest, If_type3) {
// stmt ? 0 : 1
static const byte kCode[] = {
WASM_IF_ELSE_I(WASM_NOP, WASM_I32V_1(0), WASM_I32V_1(1))};
- EXPECT_FAILURE_C(i_i, kCode);
- EXPECT_FAILURE_C(i_f, kCode);
- EXPECT_FAILURE_C(i_d, kCode);
+ ExpectFailure(sigs.i_i(), kCode);
+ ExpectFailure(sigs.i_f(), kCode);
+ ExpectFailure(sigs.i_d(), kCode);
}
TEST_F(FunctionBodyDecoderTest, If_type4) {
// 0 ? stmt : 1
static const byte kCode[] = {
WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_NOP, WASM_I32V_1(1))};
- EXPECT_FAILURE_C(i_i, kCode);
- EXPECT_FAILURE_C(i_f, kCode);
- EXPECT_FAILURE_C(i_d, kCode);
+ ExpectFailure(sigs.i_i(), kCode);
+ ExpectFailure(sigs.i_f(), kCode);
+ ExpectFailure(sigs.i_d(), kCode);
}
TEST_F(FunctionBodyDecoderTest, If_type5) {
// 0 ? 1 : stmt
static const byte kCode[] = {
WASM_IF_ELSE_I(WASM_ZERO, WASM_I32V_1(1), WASM_NOP)};
- EXPECT_FAILURE_C(i_i, kCode);
- EXPECT_FAILURE_C(i_f, kCode);
- EXPECT_FAILURE_C(i_d, kCode);
+ ExpectFailure(sigs.i_i(), kCode);
+ ExpectFailure(sigs.i_f(), kCode);
+ ExpectFailure(sigs.i_d(), kCode);
}
TEST_F(FunctionBodyDecoderTest, Int64Local_param) {
- EXPECT_VERIFIES_C(l_l, kCodeGetLocal0);
+ ExpectValidates(sigs.l_l(), kCodeGetLocal0);
}
TEST_F(FunctionBodyDecoderTest, Int64Locals) {
for (byte i = 1; i < 8; i++) {
AddLocals(kWasmI64, 1);
for (byte j = 0; j < i; j++) {
- EXPECT_VERIFIES(l_v, WASM_GET_LOCAL(j));
+ ExpectValidates(sigs.l_v(), {WASM_GET_LOCAL(j)});
}
}
}
@@ -1150,47 +1171,49 @@ TEST_F(FunctionBodyDecoderTest, MacrosStmt) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
- EXPECT_VERIFIES(v_i, WASM_SET_LOCAL(0, WASM_I32V_3(87348)));
- EXPECT_VERIFIES(v_i, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(24),
- WASM_I32V_1(40)));
- EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
- EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_NOP);
- EXPECT_VERIFIES(v_v, B1(WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
+ ExpectValidates(sigs.v_i(), {WASM_SET_LOCAL(0, WASM_I32V_3(87348))});
+ ExpectValidates(
+ sigs.v_i(),
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(24), WASM_I32V_1(40))});
+ ExpectValidates(sigs.v_i(), {WASM_IF(WASM_GET_LOCAL(0), WASM_NOP)});
+ ExpectValidates(sigs.v_i(),
+ {WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_NOP});
+ ExpectValidates(sigs.v_v(), {B1(WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_BR(0))});
}
TEST_F(FunctionBodyDecoderTest, MacrosContinue) {
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_CONTINUE(0)));
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_CONTINUE(0))});
}
TEST_F(FunctionBodyDecoderTest, MacrosVariadic) {
- EXPECT_VERIFIES(v_v, B2(WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(v_v, B3(WASM_NOP, WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
+ ExpectValidates(sigs.v_v(), {B2(WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {B3(WASM_NOP, WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.v_v(), {WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, MacrosNestedBlocks) {
- EXPECT_VERIFIES(v_v, B2(WASM_NOP, B2(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(v_v, B3(WASM_NOP, // --
- B2(WASM_NOP, WASM_NOP), // --
- B2(WASM_NOP, WASM_NOP))); // --
- EXPECT_VERIFIES(v_v, B1(B1(B2(WASM_NOP, WASM_NOP))));
+ ExpectValidates(sigs.v_v(), {B2(WASM_NOP, B2(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.v_v(), {B3(WASM_NOP, // --
+ B2(WASM_NOP, WASM_NOP), // --
+ B2(WASM_NOP, WASM_NOP))}); // --
+ ExpectValidates(sigs.v_v(), {B1(B1(B2(WASM_NOP, WASM_NOP)))});
}
TEST_F(FunctionBodyDecoderTest, MultipleReturn) {
static ValueType kIntTypes5[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
kWasmI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_S(&sig_ii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
- EXPECT_FAILURE_S(&sig_ii_v, WASM_RETURNN(1, WASM_ZERO));
+ ExpectValidates(&sig_ii_v, {WASM_RETURNN(2, WASM_ZERO, WASM_ONE)});
+ ExpectFailure(&sig_ii_v, {WASM_RETURNN(1, WASM_ZERO)});
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- EXPECT_VERIFIES_S(&sig_iii_v,
- WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I32V_1(44)));
- EXPECT_FAILURE_S(&sig_iii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
+ ExpectValidates(&sig_iii_v,
+ {WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I32V_1(44))});
+ ExpectFailure(&sig_iii_v, {WASM_RETURNN(2, WASM_ZERO, WASM_ONE)});
}
TEST_F(FunctionBodyDecoderTest, MultipleReturn_fallthru) {
@@ -1198,74 +1221,124 @@ TEST_F(FunctionBodyDecoderTest, MultipleReturn_fallthru) {
kWasmI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_S(&sig_ii_v, WASM_ZERO, WASM_ONE);
- EXPECT_FAILURE_S(&sig_ii_v, WASM_ZERO);
+ ExpectValidates(&sig_ii_v, {WASM_ZERO, WASM_ONE});
+ ExpectFailure(&sig_ii_v, {WASM_ZERO});
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- EXPECT_VERIFIES_S(&sig_iii_v, WASM_ZERO, WASM_ONE, WASM_I32V_1(44));
- EXPECT_FAILURE_S(&sig_iii_v, WASM_ZERO, WASM_ONE);
+ ExpectValidates(&sig_iii_v, {WASM_ZERO, WASM_ONE, WASM_I32V_1(44)});
+ ExpectFailure(&sig_iii_v, {WASM_ZERO, WASM_ONE});
}
TEST_F(FunctionBodyDecoderTest, MacrosInt32) {
- EXPECT_VERIFIES(i_i, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I32V_1(12)));
- EXPECT_VERIFIES(i_i, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V_1(13)));
- EXPECT_VERIFIES(i_i, WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I32V_1(14)));
- EXPECT_VERIFIES(i_i, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(15)));
- EXPECT_VERIFIES(i_i, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(16)));
- EXPECT_VERIFIES(i_i, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(17)));
- EXPECT_VERIFIES(i_i, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I32V_1(18)));
- EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I32V_1(19)));
- EXPECT_VERIFIES(i_i, WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I32V_1(20)));
- EXPECT_VERIFIES(i_i, WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I32V_1(21)));
- EXPECT_VERIFIES(i_i, WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I32V_1(22)));
- EXPECT_VERIFIES(i_i, WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I32V_1(23)));
- EXPECT_VERIFIES(i_i, WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I32V_1(24)));
- EXPECT_VERIFIES(i_i, WASM_I32_ROR(WASM_GET_LOCAL(0), WASM_I32V_1(24)));
- EXPECT_VERIFIES(i_i, WASM_I32_ROL(WASM_GET_LOCAL(0), WASM_I32V_1(24)));
- EXPECT_VERIFIES(i_i, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I32V_1(25)));
- EXPECT_VERIFIES(i_i, WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I32V_1(25)));
-
- EXPECT_VERIFIES(i_i, WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I32V_1(26)));
- EXPECT_VERIFIES(i_i, WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I32V_1(27)));
- EXPECT_VERIFIES(i_i, WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I32V_1(28)));
- EXPECT_VERIFIES(i_i, WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I32V_1(29)));
-
- EXPECT_VERIFIES(i_i, WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I32V_1(26)));
- EXPECT_VERIFIES(i_i, WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I32V_1(27)));
- EXPECT_VERIFIES(i_i, WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I32V_1(28)));
- EXPECT_VERIFIES(i_i, WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I32V_1(29)));
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I32V_1(12))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V_1(13))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I32V_1(14))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(15))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(16))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(17))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I32V_1(18))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I32V_1(19))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I32V_1(20))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I32V_1(21))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I32V_1(22))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I32V_1(23))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I32V_1(24))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_ROR(WASM_GET_LOCAL(0), WASM_I32V_1(24))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_ROL(WASM_GET_LOCAL(0), WASM_I32V_1(24))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I32V_1(25))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I32V_1(25))});
+
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I32V_1(26))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I32V_1(27))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I32V_1(28))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I32V_1(29))});
+
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I32V_1(26))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I32V_1(27))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I32V_1(28))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I32V_1(29))});
}
TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
- EXPECT_VERIFIES(l_ll, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64V_1(12)));
- EXPECT_VERIFIES(l_ll, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64V_1(13)));
- EXPECT_VERIFIES(l_ll, WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64V_1(14)));
- EXPECT_VERIFIES(l_ll, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(15)));
- EXPECT_VERIFIES(l_ll, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(16)));
- EXPECT_VERIFIES(l_ll, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64V_1(17)));
- EXPECT_VERIFIES(l_ll, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64V_1(18)));
- EXPECT_VERIFIES(l_ll, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64V_1(19)));
- EXPECT_VERIFIES(l_ll, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
- EXPECT_VERIFIES(l_ll, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64V_1(21)));
-
- EXPECT_VERIFIES(l_ll, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(22)));
- EXPECT_VERIFIES(l_ll, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(23)));
- EXPECT_VERIFIES(l_ll, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
- EXPECT_VERIFIES(l_ll, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
- EXPECT_VERIFIES(l_ll, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
-
- EXPECT_VERIFIES(i_ll, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
- EXPECT_VERIFIES(i_ll, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
- EXPECT_VERIFIES(i_ll, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
- EXPECT_VERIFIES(i_ll, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
-
- EXPECT_VERIFIES(i_ll, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
- EXPECT_VERIFIES(i_ll, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
- EXPECT_VERIFIES(i_ll, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
- EXPECT_VERIFIES(i_ll, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
-
- EXPECT_VERIFIES(i_ll, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
- EXPECT_VERIFIES(i_ll, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64V_1(12))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64V_1(13))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64V_1(14))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(15))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(16))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64V_1(17))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64V_1(18))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64V_1(19))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64V_1(20))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64V_1(21))});
+
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(22))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(23))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(24))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_I64V_1(24))});
+ ExpectValidates(sigs.l_ll(),
+ {WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_I64V_1(24))});
+
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64V_1(26))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64V_1(27))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64V_1(28))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64V_1(29))});
+
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64V_1(26))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64V_1(27))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64V_1(28))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64V_1(29))});
+
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64V_1(25))});
+ ExpectValidates(sigs.i_ll(),
+ {WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64V_1(25))});
}
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
@@ -1292,8 +1365,8 @@ TEST_F(FunctionBodyDecoderTest, MemorySize) {
module = builder.module();
builder.InitializeMemory();
byte code[] = {kExprMemorySize, 0};
- EXPECT_VERIFIES_C(i_i, code);
- EXPECT_FAILURE_C(f_ff, code);
+ ExpectValidates(sigs.i_i(), code);
+ ExpectFailure(sigs.f_ff(), code);
}
TEST_F(FunctionBodyDecoderTest, LoadMemOffset) {
@@ -1303,7 +1376,7 @@ TEST_F(FunctionBodyDecoderTest, LoadMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
byte code[] = {kExprI32Const, 0, kExprI32LoadMem, ZERO_ALIGNMENT,
static_cast<byte>(offset)};
- EXPECT_VERIFIES_C(i_i, code);
+ ExpectValidates(sigs.i_i(), code);
}
}
@@ -1335,11 +1408,7 @@ TEST_F(FunctionBodyDecoderTest, LoadMemAlignment) {
for (byte alignment = 0; alignment <= 4; alignment++) {
byte code[] = {WASM_ZERO, static_cast<byte>(values[i].instruction),
alignment, ZERO_OFFSET, WASM_DROP};
- if (static_cast<uint32_t>(alignment) <= values[i].maximum_aligment) {
- EXPECT_VERIFIES_C(v_i, code);
- } else {
- EXPECT_FAILURE_C(v_i, code);
- }
+ Validate(alignment <= values[i].maximum_aligment, sigs.v_i(), code);
}
}
}
@@ -1351,7 +1420,7 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset) {
for (byte offset = 0; offset < 128; offset += 7) {
byte code[] = {WASM_STORE_MEM_OFFSET(MachineType::Int32(), offset,
WASM_ZERO, WASM_ZERO)};
- EXPECT_VERIFIES_C(v_i, code);
+ ExpectValidates(sigs.v_i(), code);
}
}
@@ -1359,8 +1428,8 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset_void) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
- EXPECT_FAILURE(i_i, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 0, WASM_ZERO,
- WASM_ZERO));
+ ExpectFailure(sigs.i_i(), {WASM_STORE_MEM_OFFSET(MachineType::Int32(), 0,
+ WASM_ZERO, WASM_ZERO)});
}
#define BYTE0(x) ((x)&0x7F)
@@ -1377,28 +1446,28 @@ TEST_F(FunctionBodyDecoderTest, LoadMemOffset_varint) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
- EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT1(0x45));
- EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT2(0x3999));
- EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT3(0x344445));
- EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT4(0x36666667));
+ ExpectValidates(sigs.i_i(),
+ {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT, VARINT1(0x45)});
+ ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT2(0x3999)});
+ ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT3(0x344445)});
+ ExpectValidates(sigs.i_i(), {WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT4(0x36666667)});
}
TEST_F(FunctionBodyDecoderTest, StoreMemOffset_varint) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
- EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
- VARINT1(0x33));
- EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
- VARINT2(0x1111));
- EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
- VARINT3(0x222222));
- EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
- VARINT4(0x44444444));
+ ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT1(0x33)});
+ ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT2(0x1111)});
+ ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT3(0x222222)});
+ ExpectValidates(sigs.v_i(), {WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT4(0x44444444)});
}
#undef BYTE0
@@ -1421,11 +1490,7 @@ TEST_F(FunctionBodyDecoderTest, AllLoadMemCombinations) {
MachineType mem_type = machineTypes[j];
byte code[] = {WASM_LOAD_MEM(mem_type, WASM_ZERO)};
FunctionSig sig(1, 0, &local_type);
- if (local_type == ValueTypes::ValueTypeFor(mem_type)) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(local_type == ValueTypes::ValueTypeFor(mem_type), &sig, code);
}
}
}
@@ -1440,11 +1505,7 @@ TEST_F(FunctionBodyDecoderTest, AllStoreMemCombinations) {
MachineType mem_type = machineTypes[j];
byte code[] = {WASM_STORE_MEM(mem_type, WASM_ZERO, WASM_GET_LOCAL(0))};
FunctionSig sig(0, 1, &local_type);
- if (local_type == ValueTypes::ValueTypeFor(mem_type)) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(local_type == ValueTypes::ValueTypeFor(mem_type), &sig, code);
}
}
}
@@ -1458,10 +1519,10 @@ TEST_F(FunctionBodyDecoderTest, SimpleCalls) {
builder.AddFunction(sigs.i_i());
builder.AddFunction(sigs.i_ii());
- EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(1, WASM_I32V_1(27)));
- EXPECT_VERIFIES_S(sig,
- WASM_CALL_FUNCTION(2, WASM_I32V_1(37), WASM_I32V_2(77)));
+ ExpectValidates(sig, {WASM_CALL_FUNCTION0(0)});
+ ExpectValidates(sig, {WASM_CALL_FUNCTION(1, WASM_I32V_1(27))});
+ ExpectValidates(sig,
+ {WASM_CALL_FUNCTION(2, WASM_I32V_1(37), WASM_I32V_2(77))});
}
TEST_F(FunctionBodyDecoderTest, CallsWithTooFewArguments) {
@@ -1473,9 +1534,9 @@ TEST_F(FunctionBodyDecoderTest, CallsWithTooFewArguments) {
builder.AddFunction(sigs.i_ii());
builder.AddFunction(sigs.f_ff());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_ZERO));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION0(0)});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(1, WASM_ZERO)});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs2) {
@@ -1485,9 +1546,9 @@ TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs2) {
builder.AddFunction(sigs.i_i());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(17)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_I64V_1(17))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_F32(17.1))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_F64(17.1))});
}
TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs3) {
@@ -1497,15 +1558,167 @@ TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs3) {
builder.AddFunction(sigs.i_f());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I32V_1(17)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(27)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(0, WASM_F64(37.2))});
builder.AddFunction(sigs.i_d());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_I32V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_I64V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(1, WASM_I32V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(1, WASM_I64V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(1, WASM_F32(17.6))});
+}
+
+TEST_F(FunctionBodyDecoderTest, SimpleReturnCalls) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ module = builder.module();
+
+ builder.AddFunction(sigs.i_v());
+ builder.AddFunction(sigs.i_i());
+ builder.AddFunction(sigs.i_ii());
+
+ ExpectValidates(sig, {WASM_RETURN_CALL_FUNCTION0(0)});
+ ExpectValidates(sig, {WASM_RETURN_CALL_FUNCTION(1, WASM_I32V_1(27))});
+ ExpectValidates(
+ sig, {WASM_RETURN_CALL_FUNCTION(2, WASM_I32V_1(37), WASM_I32V_2(77))});
+}
+
+TEST_F(FunctionBodyDecoderTest, ReturnCallsWithTooFewArguments) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ module = builder.module();
+
+ builder.AddFunction(sigs.i_i());
+ builder.AddFunction(sigs.i_ii());
+ builder.AddFunction(sigs.f_ff());
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION0(0)});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(1, WASM_ZERO)});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(2, WASM_GET_LOCAL(0))});
+}
+
+TEST_F(FunctionBodyDecoderTest, ReturnCallsWithMismatchedSigs) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ module = builder.module();
+
+ builder.AddFunction(sigs.i_f());
+ builder.AddFunction(sigs.f_f());
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(0, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(0, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(0, WASM_F64(37.2))});
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(1, WASM_F64(37.2))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(1, WASM_F32(37.2))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(1, WASM_I32V_1(17))});
+}
+
+TEST_F(FunctionBodyDecoderTest, SimpleIndirectReturnCalls) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ module = builder.module();
+
+ byte f0 = builder.AddSignature(sigs.i_v());
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+
+ ExpectValidates(sig, {WASM_RETURN_CALL_INDIRECT0(f0, WASM_ZERO)});
+ ExpectValidates(sig,
+ {WASM_RETURN_CALL_INDIRECT(f1, WASM_ZERO, WASM_I32V_1(22))});
+ ExpectValidates(sig, {WASM_RETURN_CALL_INDIRECT(
+ f2, WASM_ZERO, WASM_I32V_1(32), WASM_I32V_2(72))});
+}
+
+TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsOutOfBounds) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ module = builder.module();
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(0, WASM_ZERO)});
+ builder.AddSignature(sigs.i_v());
+ ExpectValidates(sig, {WASM_RETURN_CALL_INDIRECT0(0, WASM_ZERO)});
+
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(1, WASM_ZERO, WASM_I32V_1(22))});
+ builder.AddSignature(sigs.i_i());
+ ExpectValidates(sig,
+ {WASM_RETURN_CALL_INDIRECT(1, WASM_ZERO, WASM_I32V_1(27))});
+
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(2, WASM_ZERO, WASM_I32V_1(27))});
+}
+
+TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithMismatchedSigs3) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ module = builder.module();
+
+ byte f0 = builder.AddFunction(sigs.i_f());
+
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f0, WASM_ZERO, WASM_I32V_1(17))});
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64V_1(27))});
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2))});
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(f0, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(f0, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(f0, WASM_F64(37.2))});
+
+ byte f1 = builder.AddFunction(sigs.i_d());
+
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f1, WASM_ZERO, WASM_I32V_1(16))});
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64V_1(16))});
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6))});
+}
+
+TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithoutTableCrash) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ module = builder.module();
+
+ byte f0 = builder.AddSignature(sigs.i_v());
+ byte f1 = builder.AddSignature(sigs.i_i());
+ byte f2 = builder.AddSignature(sigs.i_ii());
+
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(f0, WASM_ZERO)});
+ ExpectFailure(sig,
+ {WASM_RETURN_CALL_INDIRECT(f1, WASM_ZERO, WASM_I32V_1(22))});
+ ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT(f2, WASM_ZERO, WASM_I32V_1(32),
+ WASM_I32V_2(72))});
+}
+
+TEST_F(FunctionBodyDecoderTest, IncompleteIndirectReturnCall) {
+ FunctionSig* sig = sigs.i_i();
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ module = builder.module();
+
+ static byte code[] = {kExprReturnCallIndirect};
+ ExpectFailure(sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, MultiReturn) {
@@ -1519,9 +1732,9 @@ TEST_F(FunctionBodyDecoderTest, MultiReturn) {
builder.AddFunction(&sig_v_ii);
builder.AddFunction(&sig_ii_v);
- EXPECT_VERIFIES_S(&sig_ii_v, WASM_CALL_FUNCTION0(1));
- EXPECT_VERIFIES(v_v, WASM_CALL_FUNCTION0(1), WASM_DROP, WASM_DROP);
- EXPECT_VERIFIES(v_v, WASM_CALL_FUNCTION0(1), kExprCallFunction, 0);
+ ExpectValidates(&sig_ii_v, {WASM_CALL_FUNCTION0(1)});
+ ExpectValidates(sigs.v_v(), {WASM_CALL_FUNCTION0(1), WASM_DROP, WASM_DROP});
+ ExpectValidates(sigs.v_v(), {WASM_CALL_FUNCTION0(1), kExprCallFunction, 0});
}
TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
@@ -1539,12 +1752,12 @@ TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
module = builder.module();
builder.AddFunction(&sig_cd_v);
- EXPECT_VERIFIES_S(&sig_cd_v, WASM_CALL_FUNCTION0(0));
+ ExpectValidates(&sig_cd_v, {WASM_CALL_FUNCTION0(0)});
if (a == c && b == d) {
- EXPECT_VERIFIES_S(&sig_ab_v, WASM_CALL_FUNCTION0(0));
+ ExpectValidates(&sig_ab_v, {WASM_CALL_FUNCTION0(0)});
} else {
- EXPECT_FAILURE_S(&sig_ab_v, WASM_CALL_FUNCTION0(0));
+ ExpectFailure(&sig_ab_v, {WASM_CALL_FUNCTION0(0)});
}
}
}
@@ -1562,10 +1775,10 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(22)));
- EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I32V_1(32),
- WASM_I32V_2(72)));
+ ExpectValidates(sig, {WASM_CALL_INDIRECT0(f0, WASM_ZERO)});
+ ExpectValidates(sig, {WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(22))});
+ ExpectValidates(sig, {WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I32V_1(32),
+ WASM_I32V_2(72))});
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
@@ -1574,15 +1787,15 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
builder.InitializeTable();
module = builder.module();
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT0(0, WASM_ZERO)});
builder.AddSignature(sigs.i_v());
- EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ ExpectValidates(sig, {WASM_CALL_INDIRECT0(0, WASM_ZERO)});
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I32V_1(22)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I32V_1(22))});
builder.AddSignature(sigs.i_i());
- EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I32V_1(27)));
+ ExpectValidates(sig, {WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I32V_1(27))});
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I32V_1(27)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I32V_1(27))});
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs3) {
@@ -1593,19 +1806,19 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f0 = builder.AddFunction(sigs.i_f());
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I32V_1(17)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2))});
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_I32V_1(17)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT0(f0, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT0(f0, WASM_F64(37.2))});
byte f1 = builder.AddFunction(sigs.i_d());
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6))});
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
@@ -1617,10 +1830,10 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(22)));
- EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I32V_1(32),
- WASM_I32V_2(72)));
+ ExpectFailure(sig, {WASM_CALL_INDIRECT0(f0, WASM_ZERO)});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I32V_1(22))});
+ ExpectFailure(sig, {WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I32V_1(32),
+ WASM_I32V_2(72))});
}
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
@@ -1630,7 +1843,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
module = builder.module();
static byte code[] = {kExprCallIndirect};
- Verify(false, sig, ArrayVector(code), kOmitEnd);
+ ExpectFailure(sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
@@ -1641,7 +1854,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
module = builder.module();
static byte code[] = {kExprI32StoreMem};
- Verify(false, sig, ArrayVector(code), kOmitEnd);
+ ExpectFailure(sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
@@ -1654,7 +1867,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
static byte code[] = {kSimdPrefix,
static_cast<byte>(kExprS8x16Shuffle & 0xff)};
- Verify(false, sig, ArrayVector(code), kOmitEnd);
+ ExpectFailure(sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
@@ -1666,10 +1879,10 @@ TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
byte f1 = builder.AddImport(sigs.i_i());
byte f2 = builder.AddImport(sigs.i_ii());
- EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION0(f0));
- EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(f1, WASM_I32V_1(22)));
- EXPECT_VERIFIES_S(sig,
- WASM_CALL_FUNCTION(f2, WASM_I32V_1(32), WASM_I32V_2(72)));
+ ExpectValidates(sig, {WASM_CALL_FUNCTION0(f0)});
+ ExpectValidates(sig, {WASM_CALL_FUNCTION(f1, WASM_I32V_1(22))});
+ ExpectValidates(sig,
+ {WASM_CALL_FUNCTION(f2, WASM_I32V_1(32), WASM_I32V_2(72))});
}
TEST_F(FunctionBodyDecoderTest, ImportCallsWithMismatchedSigs3) {
@@ -1679,17 +1892,17 @@ TEST_F(FunctionBodyDecoderTest, ImportCallsWithMismatchedSigs3) {
byte f0 = builder.AddImport(sigs.i_f());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(f0));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_I32V_1(17)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_F64(37.2)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION0(f0)});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f0, WASM_I32V_1(17))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f0, WASM_I64V_1(27))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f0, WASM_F64(37.2))});
byte f1 = builder.AddImport(sigs.i_d());
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(f1));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_I32V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_I64V_1(16)));
- EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_F32(17.6)));
+ ExpectFailure(sig, {WASM_CALL_FUNCTION0(f1)});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f1, WASM_I32V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f1, WASM_I64V_1(16))});
+ ExpectFailure(sig, {WASM_CALL_FUNCTION(f1, WASM_F32(17.6))});
}
TEST_F(FunctionBodyDecoderTest, Int32Globals) {
@@ -1699,9 +1912,9 @@ TEST_F(FunctionBodyDecoderTest, Int32Globals) {
builder.AddGlobal(kWasmI32);
- EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO);
+ ExpectValidates(sig, {WASM_GET_GLOBAL(0)});
+ ExpectFailure(sig, {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0))});
+ ExpectValidates(sig, {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, ImmutableGlobal) {
@@ -1712,8 +1925,8 @@ TEST_F(FunctionBodyDecoderTest, ImmutableGlobal) {
uint32_t g0 = builder.AddGlobal(kWasmI32, true);
uint32_t g1 = builder.AddGlobal(kWasmI32, false);
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(g0, WASM_ZERO));
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(g1, WASM_ZERO));
+ ExpectValidates(sig, {WASM_SET_GLOBAL(g0, WASM_ZERO)});
+ ExpectFailure(sig, {WASM_SET_GLOBAL(g1, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, Int32Globals_fail) {
@@ -1726,15 +1939,15 @@ TEST_F(FunctionBodyDecoderTest, Int32Globals_fail) {
builder.AddGlobal(kWasmF32);
builder.AddGlobal(kWasmF64);
- EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(0));
- EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(1));
- EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(2));
- EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(3));
+ ExpectFailure(sig, {WASM_GET_GLOBAL(0)});
+ ExpectFailure(sig, {WASM_GET_GLOBAL(1)});
+ ExpectFailure(sig, {WASM_GET_GLOBAL(2)});
+ ExpectFailure(sig, {WASM_GET_GLOBAL(3)});
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO);
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)), WASM_ZERO);
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)), WASM_ZERO);
- EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)), WASM_ZERO);
+ ExpectFailure(sig, {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO});
+ ExpectFailure(sig, {WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)), WASM_ZERO});
+ ExpectFailure(sig, {WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)), WASM_ZERO});
+ ExpectFailure(sig, {WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, Int64Globals) {
@@ -1745,13 +1958,13 @@ TEST_F(FunctionBodyDecoderTest, Int64Globals) {
builder.AddGlobal(kWasmI64);
builder.AddGlobal(kWasmI64);
- EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(1));
+ ExpectValidates(sig, {WASM_GET_GLOBAL(0)});
+ ExpectValidates(sig, {WASM_GET_GLOBAL(1)});
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0));
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0));
+ ExpectValidates(sig,
+ {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_GET_LOCAL(0)});
+ ExpectValidates(sig,
+ {WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)), WASM_GET_LOCAL(0)});
}
TEST_F(FunctionBodyDecoderTest, Float32Globals) {
@@ -1761,9 +1974,9 @@ TEST_F(FunctionBodyDecoderTest, Float32Globals) {
builder.AddGlobal(kWasmF32);
- EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0));
+ ExpectValidates(sig, {WASM_GET_GLOBAL(0)});
+ ExpectValidates(sig,
+ {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_GET_LOCAL(0)});
}
TEST_F(FunctionBodyDecoderTest, Float64Globals) {
@@ -1773,9 +1986,9 @@ TEST_F(FunctionBodyDecoderTest, Float64Globals) {
builder.AddGlobal(kWasmF64);
- EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0));
+ ExpectValidates(sig, {WASM_GET_GLOBAL(0)});
+ ExpectValidates(sig,
+ {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_GET_LOCAL(0)});
}
TEST_F(FunctionBodyDecoderTest, AllGetGlobalCombinations) {
@@ -1787,11 +2000,7 @@ TEST_F(FunctionBodyDecoderTest, AllGetGlobalCombinations) {
TestModuleBuilder builder;
module = builder.module();
builder.AddGlobal(global_type);
- if (local_type == global_type) {
- EXPECT_VERIFIES_S(&sig, WASM_GET_GLOBAL(0));
- } else {
- EXPECT_FAILURE_S(&sig, WASM_GET_GLOBAL(0));
- }
+ Validate(local_type == global_type, &sig, {WASM_GET_GLOBAL(0)});
}
}
}
@@ -1805,23 +2014,113 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
TestModuleBuilder builder;
module = builder.module();
builder.AddGlobal(global_type);
- if (local_type == global_type) {
- EXPECT_VERIFIES_S(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
- } else {
- EXPECT_FAILURE_S(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
- }
+ Validate(local_type == global_type, &sig,
+ {WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0))});
}
}
}
+TEST_F(FunctionBodyDecoderTest, SetTable) {
+ WASM_FEATURE_SCOPE(anyref);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
+ byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
+ ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ FunctionSig sig(0, 3, sig_types);
+ byte local_ref = 0;
+ byte local_func = 1;
+ byte local_int = 2;
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(6),
+ WASM_GET_LOCAL(local_ref))});
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(5),
+ WASM_GET_LOCAL(local_func))});
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(7),
+ WASM_GET_LOCAL(local_func))});
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(8),
+ WASM_GET_LOCAL(local_ref))});
+
+ // We can store anyfunc values as anyref, but not the other way around.
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(4),
+ WASM_GET_LOCAL(local_func))});
+ ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(9),
+ WASM_GET_LOCAL(local_ref))});
+ ExpectFailure(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(3),
+ WASM_GET_LOCAL(local_ref))});
+ ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(2),
+ WASM_GET_LOCAL(local_func))});
+ ExpectFailure(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(9),
+ WASM_GET_LOCAL(local_int))});
+ ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(3),
+ WASM_GET_LOCAL(local_int))});
+ // Out-of-bounds table index should fail.
+ byte oob_tab = 37;
+ ExpectFailure(
+ &sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(9), WASM_GET_LOCAL(local_ref))});
+ ExpectFailure(&sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(3),
+ WASM_GET_LOCAL(local_func))});
+}
+
+TEST_F(FunctionBodyDecoderTest, GetTable) {
+ WASM_FEATURE_SCOPE(anyref);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
+ byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
+ ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ FunctionSig sig(0, 3, sig_types);
+ byte local_ref = 0;
+ byte local_func = 1;
+ byte local_int = 2;
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref1, WASM_I32V(6)))});
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref2, WASM_I32V(8)))});
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func1, WASM_I32V(5)))});
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func2, WASM_I32V(7)))});
+
+ // We can store anyfunc values as anyref, but not the other way around.
+ ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
+ WASM_GET_TABLE(tab_ref1, WASM_I32V(4)))});
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func1, WASM_I32V(9)))});
+ ExpectValidates(
+ &sig,
+ {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func2, WASM_I32V(3)))});
+ ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
+ WASM_GET_TABLE(tab_ref2, WASM_I32V(2)))});
+
+ ExpectFailure(&sig, {WASM_SET_LOCAL(local_int,
+ WASM_GET_TABLE(tab_ref1, WASM_I32V(9)))});
+ ExpectFailure(&sig, {WASM_SET_LOCAL(
+ local_int, WASM_GET_TABLE(tab_func1, WASM_I32V(3)))});
+ // Out-of-bounds table index should fail.
+ byte oob_tab = 37;
+ ExpectFailure(
+ &sig, {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(oob_tab, WASM_I32V(9)))});
+ ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
+ WASM_GET_TABLE(oob_tab, WASM_I32V(3)))});
+}
+
TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
byte code[] = {WASM_GET_LOCAL(0), kExprMemoryGrow, 0};
- EXPECT_VERIFIES_C(i_i, code);
- EXPECT_FAILURE_C(i_d, code);
+ ExpectValidates(sigs.i_i(), code);
+ ExpectFailure(sigs.i_d(), code);
}
TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
@@ -1830,7 +2129,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
builder.InitializeMemory();
byte code[] = {WASM_GET_LOCAL(0), kExprMemoryGrow, 0};
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
@@ -1870,9 +2169,9 @@ TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
- byte code[] = {
- WASM_BINOP(AsmJsBinOps[i].op, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- EXPECT_FAILURE_SC(AsmJsBinOps[i].sig, code);
+ ExpectFailure(AsmJsBinOps[i].sig,
+ {WASM_BINOP(AsmJsBinOps[i].op, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1))});
}
}
}
@@ -1918,44 +2217,48 @@ TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
- byte code[] = {WASM_UNOP(AsmJsUnOps[i].op, WASM_GET_LOCAL(0))};
- EXPECT_FAILURE_SC(AsmJsUnOps[i].sig, code);
+ ExpectFailure(AsmJsUnOps[i].sig,
+ {WASM_UNOP(AsmJsUnOps[i].op, WASM_GET_LOCAL(0))});
}
}
}
TEST_F(FunctionBodyDecoderTest, BreakEnd) {
- EXPECT_VERIFIES(
- i_i, WASM_BLOCK_I(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO)));
- EXPECT_VERIFIES(
- i_i, WASM_BLOCK_I(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO))));
+ ExpectValidates(
+ sigs.i_i(),
+ {WASM_BLOCK_I(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO))});
+ ExpectValidates(
+ sigs.i_i(),
+ {WASM_BLOCK_I(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, BreakIfBinop) {
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_I32_ADD(
- WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO), WASM_ZERO)));
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_I32_ADD(
- WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
- EXPECT_VERIFIES_S(
+ ExpectValidates(sigs.i_i(),
+ {WASM_BLOCK_I(WASM_I32_ADD(
+ WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO), WASM_ZERO))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_BLOCK_I(WASM_I32_ADD(
+ WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)))});
+ ExpectValidates(
sigs.f_ff(),
- WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO))));
+ {WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, BreakIfBinop_fail) {
- EXPECT_FAILURE_S(
+ ExpectFailure(
sigs.f_ff(),
- WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
- EXPECT_FAILURE_S(
+ {WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)))});
+ ExpectFailure(
sigs.i_i(),
- WASM_BLOCK_I(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO))));
+ {WASM_BLOCK_I(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, BreakIfUnrNarrow) {
- EXPECT_FAILURE_S(
+ ExpectFailure(
sigs.f_ff(),
- WASM_BLOCK_I(WASM_BRV_IF(0, WASM_UNREACHABLE, WASM_UNREACHABLE),
- WASM_RETURN0),
- WASM_F32(0.0));
+ {WASM_BLOCK_I(WASM_BRV_IF(0, WASM_UNREACHABLE, WASM_UNREACHABLE),
+ WASM_RETURN0),
+ WASM_F32(0.0)});
}
TEST_F(FunctionBodyDecoderTest, BreakNesting1) {
@@ -1965,22 +2268,14 @@ TEST_F(FunctionBodyDecoderTest, BreakNesting1) {
WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i + 1, WASM_ZERO)),
WASM_SET_LOCAL(0, WASM_I32V_1(1))),
WASM_ZERO)};
- if (i < 3) {
- EXPECT_VERIFIES_C(i_i, code);
- } else {
- EXPECT_FAILURE_C(i_i, code);
- }
+ Validate(i < 3, sigs.i_i(), code);
}
}
TEST_F(FunctionBodyDecoderTest, BreakNesting2) {
for (int i = 0; i < 7; i++) {
byte code[] = {B1(WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BR(i)), WASM_NOP))};
- if (i <= 3) {
- EXPECT_VERIFIES_C(v_v, code);
- } else {
- EXPECT_FAILURE_C(v_v, code);
- }
+ Validate(i <= 3, sigs.v_v(), code);
}
}
@@ -1989,25 +2284,22 @@ TEST_F(FunctionBodyDecoderTest, BreakNesting3) {
// (block[1] (loop[1] (block[1] (if 0 break[N])
byte code[] = {
WASM_BLOCK(WASM_LOOP(B1(WASM_IF(WASM_ZERO, WASM_BR(i + 1)))))};
- if (i < 4) {
- EXPECT_VERIFIES_C(v_v, code);
- } else {
- EXPECT_FAILURE_C(v_v, code);
- }
+ Validate(i < 4, sigs.v_v(), code);
}
}
TEST_F(FunctionBodyDecoderTest, BreaksWithMultipleTypes) {
- EXPECT_FAILURE(i_i, B2(WASM_BRV_IF_ZERO(0, WASM_I32V_1(7)), WASM_F32(7.7)));
+ ExpectFailure(sigs.i_i(),
+ {B2(WASM_BRV_IF_ZERO(0, WASM_I32V_1(7)), WASM_F32(7.7))});
- EXPECT_FAILURE(i_i, B2(WASM_BRV_IF_ZERO(0, WASM_I32V_1(7)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE(i_i, B3(WASM_BRV_IF_ZERO(0, WASM_I32V_1(8)),
- WASM_BRV_IF_ZERO(0, WASM_I32V_1(0)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE(i_i, B3(WASM_BRV_IF_ZERO(0, WASM_I32V_1(9)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
- WASM_BRV_IF_ZERO(0, WASM_I32V_1(11))));
+ ExpectFailure(sigs.i_i(), {B2(WASM_BRV_IF_ZERO(0, WASM_I32V_1(7)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)))});
+ ExpectFailure(sigs.i_i(), {B3(WASM_BRV_IF_ZERO(0, WASM_I32V_1(8)),
+ WASM_BRV_IF_ZERO(0, WASM_I32V_1(0)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)))});
+ ExpectFailure(sigs.i_i(), {B3(WASM_BRV_IF_ZERO(0, WASM_I32V_1(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I32V_1(11)))});
}
TEST_F(FunctionBodyDecoderTest, BreakNesting_6_levels) {
@@ -2027,11 +2319,7 @@ TEST_F(FunctionBodyDecoderTest, BreakNesting_6_levels) {
m >>= 1;
}
- if (i <= depth) {
- EXPECT_VERIFIES_C(v_v, code);
- } else {
- EXPECT_FAILURE_C(v_v, code);
- }
+ Validate(i <= depth, sigs.v_v(), code);
}
}
}
@@ -2044,17 +2332,19 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheck) {
byte code[] = {WASM_BLOCK_T(
sig->GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(0))};
- EXPECT_VERIFIES_SC(sig, code);
+ ExpectValidates(sig, code);
}
// unify i32 and f32 => fail
- EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)),
- WASM_F32(1.2)));
+ ExpectFailure(sigs.i_i(),
+ {WASM_BLOCK_I(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)),
+ WASM_F32(1.2))});
// unify f64 and f64 => OK
- EXPECT_VERIFIES(
- d_dd, WASM_BLOCK_D(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_F64(1.2)));
+ ExpectValidates(
+ sigs.d_dd(),
+ {WASM_BLOCK_D(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_F64(1.2))});
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
@@ -2066,11 +2356,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
sig.GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
- if (i == j) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(i == j, &sig, code);
}
}
}
@@ -2084,11 +2370,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)),
WASM_GET_LOCAL(1))};
- if (i == j) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(i == j, &sig, code);
}
}
}
@@ -2102,11 +2384,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll3) {
WASM_GET_LOCAL(1),
WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))};
- if (i == j) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(i == j, &sig, code);
}
}
}
@@ -2122,11 +2400,7 @@ TEST_F(FunctionBodyDecoderTest, Break_Unify) {
type, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
WASM_GET_LOCAL(which ^ 1))};
- if (type == kWasmI32) {
- EXPECT_VERIFIES_SC(&sig, code1);
- } else {
- EXPECT_FAILURE_SC(&sig, code1);
- }
+ Validate(type == kWasmI32, &sig, code1);
}
}
}
@@ -2139,11 +2413,7 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_cond_type) {
byte code[] = {WASM_BLOCK_T(
types[0], WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
- if (types[2] == kWasmI32) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(types[2] == kWasmI32, &sig, code);
}
}
}
@@ -2158,11 +2428,7 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_val_type) {
types[1], WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
WASM_DROP, WASM_GET_LOCAL(0))};
- if (i == j) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(i == j, &sig, code);
}
}
}
@@ -2176,73 +2442,64 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_Unify) {
byte code[] = {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
WASM_DROP, WASM_GET_LOCAL(which ^ 1))};
- if (type == kWasmI32) {
- EXPECT_VERIFIES_SC(&sig, code);
- } else {
- EXPECT_FAILURE_SC(&sig, code);
- }
+ Validate(type == kWasmI32, &sig, code);
}
}
}
TEST_F(FunctionBodyDecoderTest, BrTable0) {
- static byte code[] = {kExprBrTable, 0, BR_TARGET(0)};
- EXPECT_FAILURE_C(v_v, code);
+ ExpectFailure(sigs.v_v(), {kExprBrTable, 0, BR_TARGET(0)});
}
TEST_F(FunctionBodyDecoderTest, BrTable0b) {
static byte code[] = {kExprI32Const, 11, kExprBrTable, 0, BR_TARGET(0)};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectValidates(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, BrTable0c) {
static byte code[] = {kExprI32Const, 11, kExprBrTable, 0, BR_TARGET(1)};
- EXPECT_FAILURE_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
+ ExpectFailure(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
}
TEST_F(FunctionBodyDecoderTest, BrTable1a) {
- static byte code[] = {B1(WASM_BR_TABLE(WASM_I32V_2(67), 0, BR_TARGET(0)))};
- EXPECT_VERIFIES_C(v_v, code);
+ ExpectValidates(sigs.v_v(),
+ {B1(WASM_BR_TABLE(WASM_I32V_2(67), 0, BR_TARGET(0)))});
}
TEST_F(FunctionBodyDecoderTest, BrTable1b) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
- EXPECT_VERIFIES_C(v_v, code);
- EXPECT_FAILURE_C(i_i, code);
- EXPECT_FAILURE_C(f_ff, code);
- EXPECT_FAILURE_C(d_dd, code);
+ ExpectValidates(sigs.v_v(), code);
+ ExpectFailure(sigs.i_i(), code);
+ ExpectFailure(sigs.f_ff(), code);
+ ExpectFailure(sigs.d_dd(), code);
}
TEST_F(FunctionBodyDecoderTest, BrTable2a) {
- static byte code[] = {
- B1(WASM_BR_TABLE(WASM_I32V_2(67), 1, BR_TARGET(0), BR_TARGET(0)))};
- EXPECT_VERIFIES_C(v_v, code);
+ ExpectValidates(
+ sigs.v_v(),
+ {B1(WASM_BR_TABLE(WASM_I32V_2(67), 1, BR_TARGET(0), BR_TARGET(0)))});
}
TEST_F(FunctionBodyDecoderTest, BrTable2b) {
- static byte code[] = {WASM_BLOCK(WASM_BLOCK(
- WASM_BR_TABLE(WASM_I32V_2(67), 1, BR_TARGET(0), BR_TARGET(1))))};
- EXPECT_VERIFIES_C(v_v, code);
+ ExpectValidates(sigs.v_v(),
+ {WASM_BLOCK(WASM_BLOCK(WASM_BR_TABLE(
+ WASM_I32V_2(67), 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_off_end) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
for (size_t len = 1; len < sizeof(code); len++) {
- Verify(false, sigs.i_i(), {code, len}, kAppendEnd);
- Verify(false, sigs.i_i(), {code, len}, kOmitEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(code, len), kAppendEnd);
+ ExpectFailure(sigs.i_i(), VectorOf(code, len), kOmitEnd);
}
}
TEST_F(FunctionBodyDecoderTest, BrTable_invalid_br1) {
for (int depth = 0; depth < 4; depth++) {
byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
- if (depth <= 1) {
- EXPECT_VERIFIES_C(v_i, code);
- } else {
- EXPECT_FAILURE_C(v_i, code);
- }
+ Validate(depth <= 1, sigs.v_i(), code);
}
}
@@ -2250,136 +2507,143 @@ TEST_F(FunctionBodyDecoderTest, BrTable_invalid_br2) {
for (int depth = 0; depth < 7; depth++) {
byte code[] = {
WASM_LOOP(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
- if (depth < 2) {
- EXPECT_VERIFIES_C(v_i, code);
- } else {
- EXPECT_FAILURE_C(v_i, code);
- }
+ Validate(depth < 2, sigs.v_i(), code);
}
}
TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch1) {
- EXPECT_FAILURE(
- v_v,
- WASM_BLOCK(WASM_BLOCK_I(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_BLOCK(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch2) {
- EXPECT_FAILURE(
- v_v,
- WASM_BLOCK_I(WASM_BLOCK(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_BLOCK_I(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch_loop1) {
- EXPECT_FAILURE(
- v_v,
- WASM_LOOP(WASM_BLOCK_I(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_LOOP(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_arity_mismatch_loop2) {
- EXPECT_FAILURE(
- v_v,
- WASM_BLOCK_I(WASM_LOOP(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_BLOCK_I(WASM_LOOP(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_loop_block) {
- EXPECT_VERIFIES(
- v_v,
- WASM_LOOP(WASM_BLOCK(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectValidates(
+ sigs.v_v(),
+ {WASM_LOOP(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_block_loop) {
- EXPECT_VERIFIES(
- v_v,
- WASM_LOOP(WASM_BLOCK(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectValidates(
+ sigs.v_v(),
+ {WASM_LOOP(WASM_BLOCK(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch1) {
- EXPECT_FAILURE(
- v_v,
- WASM_BLOCK_I(WASM_BLOCK_F(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_BLOCK_I(WASM_BLOCK_F(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch2) {
- EXPECT_FAILURE(
- v_v,
- WASM_BLOCK_F(WASM_BLOCK_I(
- WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_BLOCK_F(WASM_BLOCK_I(
+ WASM_ONE, WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrTable_type_mismatch_unreachable) {
- EXPECT_FAILURE(v_v,
- WASM_BLOCK_F(WASM_BLOCK_I(
- WASM_UNREACHABLE,
- WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1)))));
+ ExpectFailure(sigs.v_v(),
+ {WASM_BLOCK_F(WASM_BLOCK_I(
+ WASM_UNREACHABLE,
+ WASM_BR_TABLE(WASM_ONE, 1, BR_TARGET(0), BR_TARGET(1))))});
}
TEST_F(FunctionBodyDecoderTest, BrUnreachable1) {
- EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0));
+ ExpectValidates(sigs.v_i(),
+ {WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0)});
}
TEST_F(FunctionBodyDecoderTest, BrUnreachable2) {
- EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0),
- WASM_NOP);
- EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0),
- WASM_ZERO);
+ ExpectValidates(sigs.v_i(),
+ {WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0), WASM_NOP});
+ ExpectFailure(sigs.v_i(),
+ {WASM_GET_LOCAL(0), kExprBrTable, 0, BR_TARGET(0), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, Brv1) {
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_LOOP_I(WASM_BRV(2, WASM_ZERO))));
+ ExpectValidates(sigs.i_i(), {WASM_BLOCK_I(WASM_BRV(0, WASM_ZERO))});
+ ExpectValidates(sigs.i_i(),
+ {WASM_BLOCK_I(WASM_LOOP_I(WASM_BRV(2, WASM_ZERO)))});
}
TEST_F(FunctionBodyDecoderTest, Brv1_type) {
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(l_ll, WASM_BLOCK_L(WASM_BRV(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(f_ff, WASM_BLOCK_F(WASM_BRV(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(d_dd, WASM_BLOCK_D(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ ExpectValidates(sigs.i_ii(), {WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.l_ll(), {WASM_BLOCK_L(WASM_BRV(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.f_ff(), {WASM_BLOCK_F(WASM_BRV(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.d_dd(), {WASM_BLOCK_D(WASM_BRV(0, WASM_GET_LOCAL(0)))});
}
TEST_F(FunctionBodyDecoderTest, Brv1_type_n) {
- EXPECT_FAILURE(i_f, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
- EXPECT_FAILURE(i_d, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ ExpectFailure(sigs.i_f(), {WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0)))});
+ ExpectFailure(sigs.i_d(), {WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0)))});
}
TEST_F(FunctionBodyDecoderTest, BrvIf1) {
- EXPECT_VERIFIES(i_v, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_ZERO)));
+ ExpectValidates(sigs.i_v(), {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_ZERO))});
}
TEST_F(FunctionBodyDecoderTest, BrvIf1_type) {
- EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(l_l, WASM_BLOCK_L(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(f_ff, WASM_BLOCK_F(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
- EXPECT_VERIFIES(d_dd, WASM_BLOCK_D(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ ExpectValidates(sigs.i_i(),
+ {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.l_l(),
+ {WASM_BLOCK_L(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.f_ff(),
+ {WASM_BLOCK_F(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
+ ExpectValidates(sigs.d_dd(),
+ {WASM_BLOCK_D(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
}
TEST_F(FunctionBodyDecoderTest, BrvIf1_type_n) {
- EXPECT_FAILURE(i_f, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
- EXPECT_FAILURE(i_d, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ ExpectFailure(sigs.i_f(),
+ {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
+ ExpectFailure(sigs.i_d(),
+ {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))});
}
TEST_F(FunctionBodyDecoderTest, Select) {
- EXPECT_VERIFIES(i_i,
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
- EXPECT_VERIFIES(f_ff, WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO));
- EXPECT_VERIFIES(d_dd, WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO));
- EXPECT_VERIFIES(l_l, WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO));
+ ExpectValidates(sigs.i_i(), {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_ZERO)});
+ ExpectValidates(sigs.f_ff(),
+ {WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO)});
+ ExpectValidates(sigs.d_dd(),
+ {WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO)});
+ ExpectValidates(sigs.l_l(),
+ {WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, Select_fail1) {
- EXPECT_FAILURE(
- i_i, WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(
- i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(
- i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_F32(0.0)));
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0),
+ WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_F32(0.0))});
}
TEST_F(FunctionBodyDecoderTest, Select_fail2) {
@@ -2390,29 +2654,29 @@ TEST_F(FunctionBodyDecoderTest, Select_fail2) {
ValueType types[] = {type, kWasmI32, type};
FunctionSig sig(1, 2, types);
- EXPECT_VERIFIES_S(&sig, WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(0)));
+ ExpectValidates(&sig, {WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0))});
- EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)));
+ ExpectFailure(&sig, {WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0))});
- EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(0)));
+ ExpectFailure(&sig, {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0))});
- EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1)));
+ ExpectFailure(&sig, {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1))});
}
}
TEST_F(FunctionBodyDecoderTest, Select_TypeCheck) {
- EXPECT_FAILURE(
- i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0))});
- EXPECT_FAILURE(
- i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25), WASM_GET_LOCAL(0)));
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25),
+ WASM_GET_LOCAL(0))});
- EXPECT_FAILURE(i_i,
- WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
+ ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
+ WASM_I64V_1(0))});
}
TEST_F(FunctionBodyDecoderTest, Throw) {
@@ -2422,12 +2686,12 @@ TEST_F(FunctionBodyDecoderTest, Throw) {
byte ex1 = builder.AddException(sigs.v_v());
byte ex2 = builder.AddException(sigs.v_i());
byte ex3 = builder.AddException(sigs.v_ii());
- EXPECT_VERIFIES(v_v, kExprThrow, ex1);
- EXPECT_VERIFIES(v_v, WASM_I32V(0), kExprThrow, ex2);
- EXPECT_FAILURE(v_v, WASM_F32(0.0), kExprThrow, ex2);
- EXPECT_VERIFIES(v_v, WASM_I32V(0), WASM_I32V(0), kExprThrow, ex3);
- EXPECT_FAILURE(v_v, WASM_F32(0.0), WASM_I32V(0), kExprThrow, ex3);
- EXPECT_FAILURE(v_v, kExprThrow, 99);
+ ExpectValidates(sigs.v_v(), {kExprThrow, ex1});
+ ExpectValidates(sigs.v_v(), {WASM_I32V(0), kExprThrow, ex2});
+ ExpectFailure(sigs.v_v(), {WASM_F32(0.0), kExprThrow, ex2});
+ ExpectValidates(sigs.v_v(), {WASM_I32V(0), WASM_I32V(0), kExprThrow, ex3});
+ ExpectFailure(sigs.v_v(), {WASM_F32(0.0), WASM_I32V(0), kExprThrow, ex3});
+ ExpectFailure(sigs.v_v(), {kExprThrow, 99});
}
TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
@@ -2436,12 +2700,14 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
module = builder.module();
byte ex1 = builder.AddException(sigs.v_v());
byte ex2 = builder.AddException(sigs.v_i());
- EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_NOP);
- EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_NOP);
- EXPECT_VERIFIES(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_ZERO);
- EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_ZERO);
- EXPECT_FAILURE(i_i, WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_F32(0.0));
- EXPECT_FAILURE(v_i, WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_F32(0.0));
+ ExpectValidates(sigs.i_i(), {WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_NOP});
+ ExpectValidates(sigs.v_i(), {WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_NOP});
+ ExpectValidates(sigs.i_i(), {WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_ZERO});
+ ExpectFailure(sigs.v_i(), {WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_ZERO});
+ ExpectFailure(sigs.i_i(),
+ {WASM_GET_LOCAL(0), kExprThrow, ex1, WASM_F32(0.0)});
+ ExpectFailure(sigs.v_i(),
+ {WASM_GET_LOCAL(0), kExprThrow, ex2, WASM_F32(0.0)});
}
#define WASM_TRY_OP kExprTry, kLocalVoid
@@ -2452,21 +2718,22 @@ TEST_F(FunctionBodyDecoderTest, TryCatch) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, kExprDrop, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, kExprCatch, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd); // Missing catch.
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch); // Missing end.
- EXPECT_FAILURE(v_v, kExprCatch, kExprEnd); // Missing try.
+ ExpectValidates(sigs.v_v(), {WASM_TRY_OP, kExprCatch, kExprDrop, kExprEnd});
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprCatch, kExprCatch, kExprEnd});
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprEnd}); // Missing catch.
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprCatch}); // Missing end.
+ ExpectFailure(sigs.v_v(), {kExprCatch, kExprEnd}); // Missing try.
}
TEST_F(FunctionBodyDecoderTest, Rethrow) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, kExprRethrow, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprRethrow, kExprCatch, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_BLOCK(kExprRethrow));
- EXPECT_FAILURE(v_v, kExprRethrow);
+ ExpectValidates(sigs.v_v(),
+ {WASM_TRY_OP, kExprCatch, kExprRethrow, kExprEnd});
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprRethrow, kExprCatch, kExprEnd});
+ ExpectFailure(sigs.v_v(), {WASM_BLOCK(kExprRethrow)});
+ ExpectFailure(sigs.v_v(), {kExprRethrow});
}
TEST_F(FunctionBodyDecoderTest, BrOnExn) {
@@ -2475,23 +2742,24 @@ TEST_F(FunctionBodyDecoderTest, BrOnExn) {
module = builder.module();
byte ex1 = builder.AddException(sigs.v_v());
byte ex2 = builder.AddException(sigs.v_i());
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
- kExprDrop, kExprEnd);
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex1),
- kExprDrop, kExprEnd);
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
- WASM_BR_ON_EXN(0, ex1), kExprDrop, kExprEnd);
- EXPECT_VERIFIES(v_v, WASM_BLOCK(WASM_TRY_OP, kExprCatch,
- WASM_BR_ON_EXN(1, ex1), kExprDrop, kExprEnd));
- EXPECT_VERIFIES(i_v,
- WASM_BLOCK_I(WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex2),
- kExprDrop, kExprEnd, kExprI32Const, 0));
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(2, ex1),
- kExprDrop, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, kExprDrop,
- WASM_BR_ON_EXN(0, ex1), kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
- kExprEnd);
+ ExpectValidates(sigs.v_v(), {WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
+ kExprDrop, kExprEnd});
+ ExpectValidates(sigs.v_v(), {WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex1),
+ kExprDrop, kExprEnd});
+ ExpectValidates(sigs.v_v(), {WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
+ WASM_BR_ON_EXN(0, ex1), kExprDrop, kExprEnd});
+ ExpectValidates(sigs.v_v(),
+ {WASM_BLOCK(WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex1),
+ kExprDrop, kExprEnd)});
+ ExpectValidates(sigs.i_v(),
+ {WASM_BLOCK_I(WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex2),
+ kExprDrop, kExprEnd, kExprI32Const, 0)});
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(2, ex1),
+ kExprDrop, kExprEnd});
+ ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprCatch, kExprDrop,
+ WASM_BR_ON_EXN(0, ex1), kExprEnd});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1), kExprEnd});
}
#undef WASM_BR_ON_EXN
@@ -2502,15 +2770,19 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_NOP), kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)), kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(0)),
- kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprF32Add);
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), kExprI32Add});
+ ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(f0, WASM_NOP), kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)), kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)),
+ kExprI32Add});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), kExprF32Add});
}
TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
@@ -2518,17 +2790,20 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_NOP),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(0)),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_F32_ADD(WASM_NOP, WASM_NOP));
+ ExpectValidates(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_NOP), WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_F32_ADD(WASM_NOP, WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, MultiValBlockBr) {
@@ -2536,11 +2811,11 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlockBr) {
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_BR(0)),
- kExprI32Add);
- EXPECT_VERIFIES(i_ii, WASM_BLOCK_X(f0, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(1), WASM_BR(0)),
- kExprI32Add);
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0), WASM_BR(0)), kExprI32Add});
+ ExpectValidates(sigs.i_ii(), {WASM_BLOCK_X(f0, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_BR(0)),
+ kExprI32Add});
}
TEST_F(FunctionBodyDecoderTest, MultiValLoop1) {
@@ -2548,15 +2823,17 @@ TEST_F(FunctionBodyDecoderTest, MultiValLoop1) {
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
- EXPECT_VERIFIES(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_NOP), kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0)), kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(0)),
- kExprI32Add);
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprF32Add);
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), kExprI32Add});
+ ExpectFailure(sigs.i_ii(), {WASM_LOOP_X(f0, WASM_NOP), kExprI32Add});
+ ExpectFailure(sigs.i_ii(), {WASM_LOOP_X(f0, WASM_GET_LOCAL(0)), kExprI32Add});
+ ExpectFailure(sigs.i_ii(), {WASM_LOOP_X(f0, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
+ kExprI32Add});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_LOOP_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), kExprF32Add});
}
TEST_F(FunctionBodyDecoderTest, MultiValIf) {
@@ -2564,62 +2841,61 @@ TEST_F(FunctionBodyDecoderTest, MultiValIf) {
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
- EXPECT_VERIFIES(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_NOP,
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_NOP),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_GET_LOCAL(1)),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(1))),
- kExprI32Add);
- EXPECT_FAILURE(
- i_ii, WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
- WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
- kExprF32Add);
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP), kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_NOP,
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_NOP),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_GET_LOCAL(1)),
+ kExprI32Add});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_IF_ELSE_X(
+ f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(1))),
+ kExprI32Add});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_IF_ELSE_X(f0, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprF32Add});
}
TEST_F(FunctionBodyDecoderTest, BlockParam) {
@@ -2628,24 +2904,27 @@ TEST_F(FunctionBodyDecoderTest, BlockParam) {
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_BLOCK_X(f1, WASM_GET_LOCAL(1),
- WASM_I32_ADD(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_BLOCK_X(f1, WASM_NOP),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f1, WASM_NOP),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_BLOCK_X(f1, WASM_GET_LOCAL(0)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
- WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
- WASM_BLOCK_X(f1, WASM_F32_NEG(WASM_NOP)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_GET_LOCAL(0),
+ WASM_BLOCK_X(f1, WASM_GET_LOCAL(1), WASM_I32_ADD(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_BLOCK_X(f1, WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(f1, WASM_NOP), WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(f1, WASM_GET_LOCAL(0)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(
+ sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_BLOCK_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_BLOCK_X(f1, WASM_F32_NEG(WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, LoopParam) {
@@ -2654,24 +2933,25 @@ TEST_F(FunctionBodyDecoderTest, LoopParam) {
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_GET_LOCAL(1),
- WASM_I32_ADD(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_LOOP_X(f1, WASM_NOP),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f1, WASM_NOP),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_LOOP_X(f1, WASM_GET_LOCAL(0)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_F32_NEG(WASM_NOP)),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_GET_LOCAL(1),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f1, WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_LOOP_X(f1, WASM_NOP), WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(), {WASM_LOOP_X(f1, WASM_GET_LOCAL(0)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(), {WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_LOOP_X(f1, WASM_F32_NEG(WASM_NOP)),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
@@ -2680,20 +2960,21 @@ TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_BR(0)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_BRV(0, WASM_GET_LOCAL(1))));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_LOOP_X(f2, WASM_BR(0)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_BLOCK_X(f1, WASM_BR(1))));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0),
- WASM_LOOP_X(f1, WASM_BLOCK(WASM_BR(1))),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
- EXPECT_FAILURE(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_LOOP_X(f2, WASM_BLOCK_X(f1, WASM_BR(1))),
- WASM_RETURN1(WASM_GET_LOCAL(0)));
+ ExpectValidates(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_LOOP_X(f1, WASM_BR(0))});
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_LOOP_X(f1, WASM_BRV(0, WASM_GET_LOCAL(1)))});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_BR(0))});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0),
+ WASM_LOOP_X(f1, WASM_BLOCK_X(f1, WASM_BR(1)))});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_LOOP_X(f1, WASM_BLOCK(WASM_BR(1))),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
+ ExpectFailure(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_LOOP_X(f2, WASM_BLOCK_X(f1, WASM_BR(1))),
+ WASM_RETURN1(WASM_GET_LOCAL(0))});
}
TEST_F(FunctionBodyDecoderTest, IfParam) {
@@ -2702,29 +2983,32 @@ TEST_F(FunctionBodyDecoderTest, IfParam) {
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
byte f2 = builder.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_IF_X(f1, WASM_GET_LOCAL(0),
- WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1))));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0),
- WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0),
- WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1)),
- WASM_I32_EQZ(WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_IF_ELSE_X(f2, WASM_GET_LOCAL(0),
- WASM_I32_ADD(WASM_NOP, WASM_NOP),
- WASM_I32_MUL(WASM_NOP, WASM_NOP)));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_IF_X(f1, WASM_GET_LOCAL(0), WASM_NOP),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
- EXPECT_VERIFIES(i_ii, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0),
- WASM_NOP, WASM_I32_EQZ(WASM_NOP)),
- WASM_I32_ADD(WASM_NOP, WASM_NOP));
+ ExpectValidates(sigs.i_ii(),
+ {WASM_GET_LOCAL(0),
+ WASM_IF_X(f1, WASM_GET_LOCAL(0),
+ WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1)))});
+ ExpectValidates(sigs.i_ii(),
+ {WASM_GET_LOCAL(0),
+ WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0),
+ WASM_I32_ADD(WASM_NOP, WASM_GET_LOCAL(1)),
+ WASM_I32_EQZ(WASM_NOP))});
+ ExpectValidates(
+ sigs.i_ii(),
+ {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_ELSE_X(f2, WASM_GET_LOCAL(0), WASM_I32_ADD(WASM_NOP, WASM_NOP),
+ WASM_I32_MUL(WASM_NOP, WASM_NOP))});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_X(f1, WASM_GET_LOCAL(0), WASM_NOP),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
+ ExpectValidates(sigs.i_ii(), {WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_IF_ELSE_X(f1, WASM_GET_LOCAL(0), WASM_NOP,
+ WASM_I32_EQZ(WASM_NOP)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP)});
}
TEST_F(FunctionBodyDecoderTest, Regression709741) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals - 1);
- EXPECT_VERIFIES(v_v, WASM_NOP);
+ ExpectValidates(sigs.v_v(), {WASM_NOP});
byte code[] = {WASM_NOP, WASM_END};
for (size_t i = 0; i < arraysize(code); ++i) {
@@ -2746,10 +3030,13 @@ TEST_F(FunctionBodyDecoderTest, MemoryInit) {
builder.SetDataSegmentCount(1);
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectValidates(sigs.v_v(),
+ {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
@@ -2762,20 +3049,20 @@ TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
byte code[] = {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
- Verify(i == arraysize(code), sigs.v_v(), {code, i}, kOmitEnd);
+ Validate(i == arraysize(code), sigs.v_v(), VectorOf(code, i), kOmitEnd);
}
}
-TEST_F(FunctionBodyDecoderTest, MemoryDrop) {
+TEST_F(FunctionBodyDecoderTest, DataDrop) {
TestModuleBuilder builder;
builder.InitializeMemory();
builder.SetDataSegmentCount(1);
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_MEMORY_DROP(0));
+ ExpectFailure(sigs.v_v(), {WASM_DATA_DROP(0)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_MEMORY_DROP(0));
- EXPECT_FAILURE(v_v, WASM_MEMORY_DROP(1));
+ ExpectValidates(sigs.v_v(), {WASM_DATA_DROP(0)});
+ ExpectFailure(sigs.v_v(), {WASM_DATA_DROP(1)});
}
TEST_F(FunctionBodyDecoderTest, MemoryCopy) {
@@ -2783,9 +3070,11 @@ TEST_F(FunctionBodyDecoderTest, MemoryCopy) {
builder.InitializeMemory();
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectValidates(sigs.v_v(),
+ {WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, MemoryFill) {
@@ -2793,16 +3082,21 @@ TEST_F(FunctionBodyDecoderTest, MemoryFill) {
builder.InitializeMemory();
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectValidates(sigs.v_v(),
+ {WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, BulkMemoryOpsWithoutMemory) {
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_FAILURE(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableInit) {
@@ -2811,10 +3105,13 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
builder.AddPassiveElementSegment();
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
@@ -2826,20 +3123,20 @@ TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
WASM_FEATURE_SCOPE(bulk_memory);
byte code[] = {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO), WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
- Verify(i == arraysize(code), sigs.v_v(), {code, i}, kOmitEnd);
+ Validate(i == arraysize(code), sigs.v_v(), VectorOf(code, i), kOmitEnd);
}
}
-TEST_F(FunctionBodyDecoderTest, TableDrop) {
+TEST_F(FunctionBodyDecoderTest, ElemDrop) {
TestModuleBuilder builder;
builder.InitializeTable();
builder.AddPassiveElementSegment();
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_TABLE_DROP(0));
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_TABLE_DROP(0));
- EXPECT_FAILURE(v_v, WASM_TABLE_DROP(1));
+ ExpectValidates(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
}
TEST_F(FunctionBodyDecoderTest, TableCopy) {
@@ -2847,9 +3144,10 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
builder.InitializeTable();
module = builder.module();
- EXPECT_FAILURE(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_VERIFIES(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, BulkTableOpsWithoutTable) {
@@ -2858,9 +3156,10 @@ TEST_F(FunctionBodyDecoderTest, BulkTableOpsWithoutTable) {
builder.AddPassiveElementSegment();
WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_FAILURE(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
- EXPECT_FAILURE(v_v, WASM_TABLE_DROP(0));
- EXPECT_FAILURE(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
class BranchTableIteratorTest : public TestWithZone {
@@ -2926,252 +3225,146 @@ TEST_F(BranchTableIteratorTest, error0) {
#undef CHECK_BR_TABLE_LENGTH
#undef CHECK_BR_TABLE_ERROR
+struct PrintOpcodes {
+ const byte* start;
+ const byte* end;
+};
+std::ostream& operator<<(std::ostream& out, const PrintOpcodes& range) {
+ out << "First opcode: \""
+ << WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(*range.start))
+ << "\"\nall bytes: [";
+ for (const byte* b = range.start; b < range.end; ++b) {
+ out << (b == range.start ? "" : ", ") << uint32_t{*b} << "/"
+ << AsHex(*b, 2, true);
+ }
+ return out << "]";
+}
+
class WasmOpcodeLengthTest : public TestWithZone {
public:
WasmOpcodeLengthTest() : TestWithZone() {}
-};
-#define EXPECT_LENGTH(expected, opcode) \
- { \
- static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
- EXPECT_EQ(static_cast<unsigned>(expected), \
- OpcodeLength(code, code + sizeof(code))); \
- }
-
-#define EXPECT_LENGTH_N(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(static_cast<unsigned>(expected), \
- OpcodeLength(code, code + sizeof(code))); \
+ template <typename... Bytes>
+ void ExpectLength(unsigned expected, Bytes... bytes) {
+ const byte code[] = {bytes..., 0, 0, 0, 0, 0, 0, 0, 0};
+ EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code)))
+ << PrintOpcodes{code, code + sizeof...(bytes)};
}
+};
TEST_F(WasmOpcodeLengthTest, Statements) {
- EXPECT_LENGTH(1, kExprNop);
- EXPECT_LENGTH(1, kExprElse);
- EXPECT_LENGTH(1, kExprEnd);
- EXPECT_LENGTH(1, kExprSelect);
- EXPECT_LENGTH(1, kExprCatch);
- EXPECT_LENGTH(1, kExprRethrow);
- EXPECT_LENGTH(2, kExprBr);
- EXPECT_LENGTH(2, kExprBrIf);
- EXPECT_LENGTH(2, kExprThrow);
- EXPECT_LENGTH(3, kExprBrOnExn);
- EXPECT_LENGTH_N(2, kExprBlock, kLocalI32);
- EXPECT_LENGTH_N(2, kExprLoop, kLocalI32);
- EXPECT_LENGTH_N(2, kExprIf, kLocalI32);
- EXPECT_LENGTH_N(2, kExprTry, kLocalI32);
+ ExpectLength(1, kExprNop);
+ ExpectLength(1, kExprElse);
+ ExpectLength(1, kExprEnd);
+ ExpectLength(1, kExprSelect);
+ ExpectLength(1, kExprCatch);
+ ExpectLength(1, kExprRethrow);
+ ExpectLength(2, kExprBr);
+ ExpectLength(2, kExprBrIf);
+ ExpectLength(2, kExprThrow);
+ ExpectLength(3, kExprBrOnExn);
+ ExpectLength(2, kExprBlock, kLocalI32);
+ ExpectLength(2, kExprLoop, kLocalI32);
+ ExpectLength(2, kExprIf, kLocalI32);
+ ExpectLength(2, kExprTry, kLocalI32);
}
TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
- EXPECT_LENGTH(5, kExprF32Const);
- EXPECT_LENGTH(9, kExprF64Const);
- EXPECT_LENGTH(1, kExprRefNull);
- EXPECT_LENGTH(2, kExprGetLocal);
- EXPECT_LENGTH(2, kExprSetLocal);
- EXPECT_LENGTH(2, kExprGetGlobal);
- EXPECT_LENGTH(2, kExprSetGlobal);
- EXPECT_LENGTH(2, kExprCallFunction);
- EXPECT_LENGTH(3, kExprCallIndirect);
+ ExpectLength(5, kExprF32Const);
+ ExpectLength(9, kExprF64Const);
+ ExpectLength(1, kExprRefNull);
+ ExpectLength(2, kExprGetLocal);
+ ExpectLength(2, kExprSetLocal);
+ ExpectLength(2, kExprGetGlobal);
+ ExpectLength(2, kExprSetGlobal);
+ ExpectLength(2, kExprCallFunction);
+ ExpectLength(3, kExprCallIndirect);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
- EXPECT_LENGTH_N(2, kExprI32Const, U32V_1(1));
- EXPECT_LENGTH_N(3, kExprI32Const, U32V_2(999));
- EXPECT_LENGTH_N(4, kExprI32Const, U32V_3(9999));
- EXPECT_LENGTH_N(5, kExprI32Const, U32V_4(999999));
- EXPECT_LENGTH_N(6, kExprI32Const, U32V_5(99999999));
+ ExpectLength(2, kExprI32Const, U32V_1(1));
+ ExpectLength(3, kExprI32Const, U32V_2(999));
+ ExpectLength(4, kExprI32Const, U32V_3(9999));
+ ExpectLength(5, kExprI32Const, U32V_4(999999));
+ ExpectLength(6, kExprI32Const, U32V_5(99999999));
}
TEST_F(WasmOpcodeLengthTest, I64Const) {
- EXPECT_LENGTH_N(2, kExprI64Const, U32V_1(1));
- EXPECT_LENGTH_N(3, kExprI64Const, U32V_2(99));
- EXPECT_LENGTH_N(4, kExprI64Const, U32V_3(9999));
- EXPECT_LENGTH_N(5, kExprI64Const, U32V_4(99999));
- EXPECT_LENGTH_N(6, kExprI64Const, U32V_5(9999999));
- EXPECT_LENGTH_N(7, WASM_I64V_6(777777));
- EXPECT_LENGTH_N(8, WASM_I64V_7(7777777));
- EXPECT_LENGTH_N(9, WASM_I64V_8(77777777));
- EXPECT_LENGTH_N(10, WASM_I64V_9(777777777));
+ ExpectLength(2, kExprI64Const, U32V_1(1));
+ ExpectLength(3, kExprI64Const, U32V_2(99));
+ ExpectLength(4, kExprI64Const, U32V_3(9999));
+ ExpectLength(5, kExprI64Const, U32V_4(99999));
+ ExpectLength(6, kExprI64Const, U32V_5(9999999));
+ ExpectLength(7, WASM_I64V_6(777777));
+ ExpectLength(8, WASM_I64V_7(7777777));
+ ExpectLength(9, WASM_I64V_8(77777777));
+ ExpectLength(10, WASM_I64V_9(777777777));
}
TEST_F(WasmOpcodeLengthTest, VariableLength) {
- EXPECT_LENGTH_N(2, kExprGetGlobal, U32V_1(1));
- EXPECT_LENGTH_N(3, kExprGetGlobal, U32V_2(33));
- EXPECT_LENGTH_N(4, kExprGetGlobal, U32V_3(44));
- EXPECT_LENGTH_N(5, kExprGetGlobal, U32V_4(66));
- EXPECT_LENGTH_N(6, kExprGetGlobal, U32V_5(77));
+ ExpectLength(2, kExprGetGlobal, U32V_1(1));
+ ExpectLength(3, kExprGetGlobal, U32V_2(33));
+ ExpectLength(4, kExprGetGlobal, U32V_3(44));
+ ExpectLength(5, kExprGetGlobal, U32V_4(66));
+ ExpectLength(6, kExprGetGlobal, U32V_5(77));
}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
- EXPECT_LENGTH(3, kExprI32LoadMem8S);
- EXPECT_LENGTH(3, kExprI32LoadMem8U);
- EXPECT_LENGTH(3, kExprI32LoadMem16S);
- EXPECT_LENGTH(3, kExprI32LoadMem16U);
- EXPECT_LENGTH(3, kExprI32LoadMem);
- EXPECT_LENGTH(3, kExprI64LoadMem8S);
- EXPECT_LENGTH(3, kExprI64LoadMem8U);
- EXPECT_LENGTH(3, kExprI64LoadMem16S);
- EXPECT_LENGTH(3, kExprI64LoadMem16U);
- EXPECT_LENGTH(3, kExprI64LoadMem32S);
- EXPECT_LENGTH(3, kExprI64LoadMem32U);
- EXPECT_LENGTH(3, kExprI64LoadMem);
- EXPECT_LENGTH(3, kExprF32LoadMem);
- EXPECT_LENGTH(3, kExprF64LoadMem);
-
- EXPECT_LENGTH(3, kExprI32StoreMem8);
- EXPECT_LENGTH(3, kExprI32StoreMem16);
- EXPECT_LENGTH(3, kExprI32StoreMem);
- EXPECT_LENGTH(3, kExprI64StoreMem8);
- EXPECT_LENGTH(3, kExprI64StoreMem16);
- EXPECT_LENGTH(3, kExprI64StoreMem32);
- EXPECT_LENGTH(3, kExprI64StoreMem);
- EXPECT_LENGTH(3, kExprF32StoreMem);
- EXPECT_LENGTH(3, kExprF64StoreMem);
+ ExpectLength(3, kExprI32LoadMem8S);
+ ExpectLength(3, kExprI32LoadMem8U);
+ ExpectLength(3, kExprI32LoadMem16S);
+ ExpectLength(3, kExprI32LoadMem16U);
+ ExpectLength(3, kExprI32LoadMem);
+ ExpectLength(3, kExprI64LoadMem8S);
+ ExpectLength(3, kExprI64LoadMem8U);
+ ExpectLength(3, kExprI64LoadMem16S);
+ ExpectLength(3, kExprI64LoadMem16U);
+ ExpectLength(3, kExprI64LoadMem32S);
+ ExpectLength(3, kExprI64LoadMem32U);
+ ExpectLength(3, kExprI64LoadMem);
+ ExpectLength(3, kExprF32LoadMem);
+ ExpectLength(3, kExprF64LoadMem);
+
+ ExpectLength(3, kExprI32StoreMem8);
+ ExpectLength(3, kExprI32StoreMem16);
+ ExpectLength(3, kExprI32StoreMem);
+ ExpectLength(3, kExprI64StoreMem8);
+ ExpectLength(3, kExprI64StoreMem16);
+ ExpectLength(3, kExprI64StoreMem32);
+ ExpectLength(3, kExprI64StoreMem);
+ ExpectLength(3, kExprF32StoreMem);
+ ExpectLength(3, kExprF64StoreMem);
}
TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
- EXPECT_LENGTH(2, kExprMemorySize);
- EXPECT_LENGTH(2, kExprMemoryGrow);
+ ExpectLength(2, kExprMemorySize);
+ ExpectLength(2, kExprMemoryGrow);
}
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
- EXPECT_LENGTH(1, kExprI32Add);
- EXPECT_LENGTH(1, kExprI32Sub);
- EXPECT_LENGTH(1, kExprI32Mul);
- EXPECT_LENGTH(1, kExprI32DivS);
- EXPECT_LENGTH(1, kExprI32DivU);
- EXPECT_LENGTH(1, kExprI32RemS);
- EXPECT_LENGTH(1, kExprI32RemU);
- EXPECT_LENGTH(1, kExprI32And);
- EXPECT_LENGTH(1, kExprI32Ior);
- EXPECT_LENGTH(1, kExprI32Xor);
- EXPECT_LENGTH(1, kExprI32Shl);
- EXPECT_LENGTH(1, kExprI32ShrU);
- EXPECT_LENGTH(1, kExprI32ShrS);
- EXPECT_LENGTH(1, kExprI32Eq);
- EXPECT_LENGTH(1, kExprI32Ne);
- EXPECT_LENGTH(1, kExprI32LtS);
- EXPECT_LENGTH(1, kExprI32LeS);
- EXPECT_LENGTH(1, kExprI32LtU);
- EXPECT_LENGTH(1, kExprI32LeU);
- EXPECT_LENGTH(1, kExprI32GtS);
- EXPECT_LENGTH(1, kExprI32GeS);
- EXPECT_LENGTH(1, kExprI32GtU);
- EXPECT_LENGTH(1, kExprI32GeU);
- EXPECT_LENGTH(1, kExprI32Clz);
- EXPECT_LENGTH(1, kExprI32Ctz);
- EXPECT_LENGTH(1, kExprI32Popcnt);
- EXPECT_LENGTH(1, kExprI32Eqz);
- EXPECT_LENGTH(1, kExprI64Add);
- EXPECT_LENGTH(1, kExprI64Sub);
- EXPECT_LENGTH(1, kExprI64Mul);
- EXPECT_LENGTH(1, kExprI64DivS);
- EXPECT_LENGTH(1, kExprI64DivU);
- EXPECT_LENGTH(1, kExprI64RemS);
- EXPECT_LENGTH(1, kExprI64RemU);
- EXPECT_LENGTH(1, kExprI64And);
- EXPECT_LENGTH(1, kExprI64Ior);
- EXPECT_LENGTH(1, kExprI64Xor);
- EXPECT_LENGTH(1, kExprI64Shl);
- EXPECT_LENGTH(1, kExprI64ShrU);
- EXPECT_LENGTH(1, kExprI64ShrS);
- EXPECT_LENGTH(1, kExprI64Eq);
- EXPECT_LENGTH(1, kExprI64Ne);
- EXPECT_LENGTH(1, kExprI64LtS);
- EXPECT_LENGTH(1, kExprI64LeS);
- EXPECT_LENGTH(1, kExprI64LtU);
- EXPECT_LENGTH(1, kExprI64LeU);
- EXPECT_LENGTH(1, kExprI64GtS);
- EXPECT_LENGTH(1, kExprI64GeS);
- EXPECT_LENGTH(1, kExprI64GtU);
- EXPECT_LENGTH(1, kExprI64GeU);
- EXPECT_LENGTH(1, kExprI64Clz);
- EXPECT_LENGTH(1, kExprI64Ctz);
- EXPECT_LENGTH(1, kExprI64Popcnt);
- EXPECT_LENGTH(1, kExprF32Add);
- EXPECT_LENGTH(1, kExprF32Sub);
- EXPECT_LENGTH(1, kExprF32Mul);
- EXPECT_LENGTH(1, kExprF32Div);
- EXPECT_LENGTH(1, kExprF32Min);
- EXPECT_LENGTH(1, kExprF32Max);
- EXPECT_LENGTH(1, kExprF32Abs);
- EXPECT_LENGTH(1, kExprF32Neg);
- EXPECT_LENGTH(1, kExprF32CopySign);
- EXPECT_LENGTH(1, kExprF32Ceil);
- EXPECT_LENGTH(1, kExprF32Floor);
- EXPECT_LENGTH(1, kExprF32Trunc);
- EXPECT_LENGTH(1, kExprF32NearestInt);
- EXPECT_LENGTH(1, kExprF32Sqrt);
- EXPECT_LENGTH(1, kExprF32Eq);
- EXPECT_LENGTH(1, kExprF32Ne);
- EXPECT_LENGTH(1, kExprF32Lt);
- EXPECT_LENGTH(1, kExprF32Le);
- EXPECT_LENGTH(1, kExprF32Gt);
- EXPECT_LENGTH(1, kExprF32Ge);
- EXPECT_LENGTH(1, kExprF64Add);
- EXPECT_LENGTH(1, kExprF64Sub);
- EXPECT_LENGTH(1, kExprF64Mul);
- EXPECT_LENGTH(1, kExprF64Div);
- EXPECT_LENGTH(1, kExprF64Min);
- EXPECT_LENGTH(1, kExprF64Max);
- EXPECT_LENGTH(1, kExprF64Abs);
- EXPECT_LENGTH(1, kExprF64Neg);
- EXPECT_LENGTH(1, kExprF64CopySign);
- EXPECT_LENGTH(1, kExprF64Ceil);
- EXPECT_LENGTH(1, kExprF64Floor);
- EXPECT_LENGTH(1, kExprF64Trunc);
- EXPECT_LENGTH(1, kExprF64NearestInt);
- EXPECT_LENGTH(1, kExprF64Sqrt);
- EXPECT_LENGTH(1, kExprF64Eq);
- EXPECT_LENGTH(1, kExprF64Ne);
- EXPECT_LENGTH(1, kExprF64Lt);
- EXPECT_LENGTH(1, kExprF64Le);
- EXPECT_LENGTH(1, kExprF64Gt);
- EXPECT_LENGTH(1, kExprF64Ge);
- EXPECT_LENGTH(1, kExprI32SConvertF32);
- EXPECT_LENGTH(1, kExprI32SConvertF64);
- EXPECT_LENGTH(1, kExprI32UConvertF32);
- EXPECT_LENGTH(1, kExprI32UConvertF64);
- EXPECT_LENGTH(1, kExprI32ConvertI64);
- EXPECT_LENGTH(1, kExprI64SConvertF32);
- EXPECT_LENGTH(1, kExprI64SConvertF64);
- EXPECT_LENGTH(1, kExprI64UConvertF32);
- EXPECT_LENGTH(1, kExprI64UConvertF64);
- EXPECT_LENGTH(1, kExprI64SConvertI32);
- EXPECT_LENGTH(1, kExprI64UConvertI32);
- EXPECT_LENGTH(1, kExprF32SConvertI32);
- EXPECT_LENGTH(1, kExprF32UConvertI32);
- EXPECT_LENGTH(1, kExprF32SConvertI64);
- EXPECT_LENGTH(1, kExprF32UConvertI64);
- EXPECT_LENGTH(1, kExprF32ConvertF64);
- EXPECT_LENGTH(1, kExprF32ReinterpretI32);
- EXPECT_LENGTH(1, kExprF64SConvertI32);
- EXPECT_LENGTH(1, kExprF64UConvertI32);
- EXPECT_LENGTH(1, kExprF64SConvertI64);
- EXPECT_LENGTH(1, kExprF64UConvertI64);
- EXPECT_LENGTH(1, kExprF64ConvertF32);
- EXPECT_LENGTH(1, kExprF64ReinterpretI64);
- EXPECT_LENGTH(1, kExprI32ReinterpretF32);
- EXPECT_LENGTH(1, kExprI64ReinterpretF64);
+#define SIMPLE_OPCODE(name, byte, sig) byte,
+ static constexpr uint8_t kSimpleOpcodes[] = {
+ FOREACH_SIMPLE_OPCODE(SIMPLE_OPCODE)};
+#undef SIMPLE_OPCODE
+ for (uint8_t simple_opcode : kSimpleOpcodes) {
+ ExpectLength(1, simple_opcode);
+ }
}
TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
#define TEST_SIMD(name, opcode, sig) \
- EXPECT_LENGTH_N(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
+ ExpectLength(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
FOREACH_SIMD_0_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
#define TEST_SIMD(name, opcode, sig) \
- EXPECT_LENGTH_N(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
+ ExpectLength(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
- EXPECT_LENGTH_N(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xFF));
+ ExpectLength(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xFF));
// test for bad simd opcode
- EXPECT_LENGTH_N(2, kSimdPrefix, 0xFF);
+ ExpectLength(2, kSimdPrefix, 0xFF);
}
-#undef EXPECT_LENGTH
-#undef EXPECT_LENGTH_N
-
typedef ZoneVector<ValueType> TypesOfLocals;
class LocalDeclDecoderTest : public TestWithZone {
@@ -3385,14 +3578,6 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
#undef WASM_IF_OP
#undef WASM_LOOP_OP
#undef WASM_BRV_IF_ZERO
-#undef EXPECT_VERIFIES_C
-#undef EXPECT_FAILURE_C
-#undef EXPECT_VERIFIES_SC
-#undef EXPECT_FAILURE_SC
-#undef EXPECT_VERIFIES_S
-#undef EXPECT_FAILURE_S
-#undef EXPECT_VERIFIES
-#undef EXPECT_FAILURE
} // namespace function_body_decoder_unittest
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 3d99dffa72..b3b069b7cc 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -32,6 +32,9 @@ namespace module_decoder_unittest {
#define WASM_INIT_EXPR_ANYREF WASM_REF_NULL, kExprEnd
#define WASM_INIT_EXPR_GLOBAL(index) WASM_GET_GLOBAL(index), kExprEnd
+#define REF_NULL_ELEMENT kExprRefNull, kExprEnd
+#define REF_FUNC_ELEMENT(v) kExprRefFunc, U32V_1(v), kExprEnd
+
#define EMPTY_BODY 0
#define NOP_BODY 2, 0, kExprNop
@@ -173,7 +176,9 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
size_t total = sizeof(header) + size;
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
- memcpy(temp + sizeof(header), module_start, size);
+ if (size > 0) {
+ memcpy(temp + sizeof(header), module_start, size);
+ }
ModuleResult result = DecodeWasmModule(
enabled_features_, temp, temp + total, false, kWasmOrigin,
isolate()->counters(), isolate()->allocator());
@@ -582,7 +587,7 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeGlobal) {
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section: Global");
+ EXPECT_NOT_OK(result, "unexpected section <Global>");
}
TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterMemoryBeforeGlobal) {
@@ -594,7 +599,7 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterMemoryBeforeGlobal) {
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section: Global");
+ EXPECT_NOT_OK(result, "unexpected section <Global>");
}
TEST_F(WasmModuleVerifyTest, ExceptionImport) {
@@ -2256,8 +2261,8 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
// table declaration -----------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE,
- ADD_COUNT(FUNC_INDEX(0), FUNC_INDEX(0))),
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyFunc, U32V_1(3),
+ REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0), REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
EXPECT_FAILURE(data);
@@ -2266,6 +2271,22 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
+TEST_F(WasmModuleVerifyTest, PassiveElementSegmentAnyRef) {
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // table declaration -----------------------------------------------------
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ // element segments -----------------------------------------------------
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyRef, U32V_1(0)),
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
static const byte data[] = {SECTION(Element, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0)),
@@ -2289,7 +2310,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionBeforeElement) {
SECTION(Element, ENTRY_COUNT(0))};
WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section: Element");
+ EXPECT_NOT_OK(result, "unexpected section <Element>");
}
TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
@@ -2307,7 +2328,7 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
};
WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section: Element");
+ EXPECT_NOT_OK(result, "unexpected section <Element>");
}
TEST_F(WasmModuleVerifyTest, MultipleDataCountSections) {
@@ -2373,6 +2394,8 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
#undef WASM_INIT_EXPR_F64
#undef WASM_INIT_EXPR_ANYREF
#undef WASM_INIT_EXPR_GLOBAL
+#undef REF_NULL_ELEMENT
+#undef REF_FUNC_ELEMENT
#undef EMPTY_BODY
#undef NOP_BODY
#undef SIG_ENTRY_i_i
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index a5b89762ad..765e5a74ff 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -20,9 +20,11 @@ namespace wasm {
struct MockStreamingResult {
size_t num_sections = 0;
size_t num_functions = 0;
- bool ok = true;
+ WasmError error;
OwnedVector<uint8_t> received_bytes;
+ bool ok() const { return !error.has_error(); }
+
MockStreamingResult() = default;
};
@@ -33,20 +35,20 @@ class MockStreamingProcessor : public StreamingProcessor {
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override {
- // TODO(ahaas): Share code with the module-decoder.
Decoder decoder(bytes.begin(), bytes.end());
uint32_t magic_word = decoder.consume_u32("wasm magic");
if (decoder.failed() || magic_word != kWasmMagic) {
- result_->ok = false;
+ result_->error = WasmError(0, "expected wasm magic");
return false;
}
uint32_t magic_version = decoder.consume_u32("wasm version");
if (decoder.failed() || magic_version != kWasmVersion) {
- result_->ok = false;
+ result_->error = WasmError(4, "expected wasm version");
return false;
}
return true;
}
+
// Process all sections but the code section.
bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset) override {
@@ -54,7 +56,7 @@ class MockStreamingProcessor : public StreamingProcessor {
return true;
}
- bool ProcessCodeSectionHeader(size_t num_functions, uint32_t offset,
+ bool ProcessCodeSectionHeader(int num_functions, uint32_t offset,
std::shared_ptr<WireBytesStorage>) override {
return true;
}
@@ -74,14 +76,17 @@ class MockStreamingProcessor : public StreamingProcessor {
}
// Report an error detected in the StreamingDecoder.
- void OnError(const WasmError&) override { result_->ok = false; }
+ void OnError(const WasmError& error) override {
+ result_->error = error;
+ CHECK(!result_->ok());
+ }
void OnAbort() override {}
bool Deserialize(Vector<const uint8_t> module_bytes,
Vector<const uint8_t> wire_bytes) override {
return false;
- };
+ }
private:
MockStreamingResult* const result_;
@@ -98,14 +103,14 @@ class WasmStreamingDecoderTest : public ::testing::Test {
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
- EXPECT_TRUE(result.ok);
+ EXPECT_TRUE(result.ok());
EXPECT_EQ(expected_sections, result.num_sections);
EXPECT_EQ(expected_functions, result.num_functions);
EXPECT_EQ(data, result.received_bytes.as_vector());
}
}
- void ExpectFailure(Vector<const uint8_t> data) {
+ void ExpectFailure(Vector<const uint8_t> data, const char* message) {
for (int split = 0; split <= data.length(); ++split) {
MockStreamingResult result;
StreamingDecoder stream(
@@ -113,18 +118,17 @@ class WasmStreamingDecoderTest : public ::testing::Test {
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
- EXPECT_FALSE(result.ok);
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(message, result.error.message());
}
}
-
- MockStreamingResult result;
};
TEST_F(WasmStreamingDecoderTest, EmptyStream) {
MockStreamingResult result;
StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
stream.Finish();
- EXPECT_FALSE(result.ok);
+ EXPECT_FALSE(result.ok());
}
TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
@@ -134,10 +138,11 @@ TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(Vector<const uint8_t>(data, 1));
stream.Finish();
- EXPECT_FALSE(result.ok);
+ EXPECT_FALSE(result.ok());
}
for (int length = 1; length < static_cast<int>(arraysize(data)); ++length) {
- ExpectFailure(Vector<const uint8_t>(data, length));
+ ExpectFailure(Vector<const uint8_t>(data, length),
+ "unexpected end of stream");
}
}
@@ -149,14 +154,14 @@ TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
TEST_F(WasmStreamingDecoderTest, BadMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "expected wasm magic");
}
}
TEST_F(WasmStreamingDecoderTest, BadVersion) {
for (uint32_t x = 1; x; x <<= 1) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "expected wasm version");
}
}
@@ -243,7 +248,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
0x0, // 4
0x0 // 5
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "unexpected end of stream");
}
TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
@@ -254,7 +259,7 @@ TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
0x6, // Section Length
0x0 // Payload
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "unexpected end of stream");
}
TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
@@ -262,13 +267,13 @@ TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
U32_LE(kWasmMagic), // --
U32_LE(kWasmVersion), // --
0x1, // Section ID
- 0x80, // Section Length (0 in LEB)
+ 0x80, // Section Length (invalid LEB)
0x80, // --
0x80, // --
0x80, // --
0x80, // --
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "expected section length");
}
TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
@@ -383,7 +388,7 @@ TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
0x1, // Number of Functions
0x0, // Function Length
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "invalid function length (0)");
}
TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
@@ -440,7 +445,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthZero) {
kCodeSectionCode, // Section ID
0x0, // Section Length
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "code section cannot have size 0");
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
@@ -461,7 +466,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "not all code section bytes were used");
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
@@ -472,7 +477,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHighZeroFunctions) {
0xD, // Section Length
0x0, // Number of Functions
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "not all code section bytes were used");
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
@@ -493,7 +498,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "read past code section end");
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
@@ -516,7 +521,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "invalid code section length");
}
TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
@@ -541,7 +546,7 @@ TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "read past code section end");
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
@@ -562,7 +567,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "unexpected end of stream");
}
TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
@@ -586,7 +591,7 @@ TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
0x0, // 6
0x0, // 7
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "not all code section bytes were used");
}
TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
@@ -604,7 +609,7 @@ TEST_F(WasmStreamingDecoderTest, TwoCodeSections) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "code section can only appear once");
}
TEST_F(WasmStreamingDecoderTest, UnknownSection) {
@@ -645,7 +650,7 @@ TEST_F(WasmStreamingDecoderTest, UnknownSectionSandwich) {
0x1, // Function Length
0x0, // Function
};
- ExpectFailure(ArrayVector(data));
+ ExpectFailure(ArrayVector(data), "code section can only appear once");
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 8c42b1735c..6baf42e076 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -468,9 +468,9 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
*trap_handler::GetThreadInWasmThreadLocalAddress() = 0;
}
-INSTANTIATE_TEST_CASE_P(/* no prefix */, TrapHandlerTest,
- ::testing::Values(kDefault, kCallback),
- PrintTrapHandlerTestParam);
+INSTANTIATE_TEST_SUITE_P(/* no prefix */, TrapHandlerTest,
+ ::testing::Values(kDefault, kCallback),
+ PrintTrapHandlerTestParam);
#undef __
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index e90c97f3a1..e43f0ab5f9 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -165,8 +165,8 @@ class WasmCodeManagerTest : public TestWithContext,
std::shared_ptr<WasmModule> module(new WasmModule);
module->num_declared_functions = kNumFunctions;
bool can_request_more = style == Growable;
- return manager()->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
- can_request_more, std::move(module));
+ return engine()->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
+ can_request_more, std::move(module));
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
@@ -175,24 +175,24 @@ class WasmCodeManagerTest : public TestWithContext,
std::unique_ptr<byte[]> exec_buff(new byte[size]);
desc.buffer = exec_buff.get();
desc.instr_size = static_cast<int>(size);
- return native_module->AddCode(index, desc, 0, 0, 0, {}, OwnedVector<byte>(),
+ return native_module->AddCode(index, desc, 0, 0, {}, OwnedVector<byte>(),
WasmCode::kFunction, WasmCode::kOther);
}
size_t page() const { return AllocatePageSize(); }
- WasmCodeManager* manager() {
- return i_isolate()->wasm_engine()->code_manager();
- }
+ WasmEngine* engine() { return i_isolate()->wasm_engine(); }
+
+ WasmCodeManager* manager() { return engine()->code_manager(); }
void SetMaxCommittedMemory(size_t limit) {
manager()->SetMaxCommittedMemoryForTesting(limit);
}
};
-INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
- ::testing::Values(Fixed, Growable),
- PrintWasmCodeManageTestParam);
+INSTANTIATE_TEST_SUITE_P(Parameterized, WasmCodeManagerTest,
+ ::testing::Values(Fixed, Growable),
+ PrintWasmCodeManageTestParam);
TEST_P(WasmCodeManagerTest, EmptyCase) {
SetMaxCommittedMemory(0 * page());
diff --git a/deps/v8/test/unittests/zone/segmentpool-unittest.cc b/deps/v8/test/unittests/zone/segmentpool-unittest.cc
deleted file mode 100644
index b3556a9519..0000000000
--- a/deps/v8/test/unittests/zone/segmentpool-unittest.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/zone/accounting-allocator.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-TEST(Zone, SegmentPoolConstraints) {
- size_t sizes[]{
- 0, // Corner case
- AccountingAllocator::kMaxPoolSize,
- GB // Something really large
- };
-
- AccountingAllocator allocator;
- for (size_t size : sizes) {
- allocator.ConfigureSegmentPool(size);
- size_t total_size = 0;
- for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
- ++power) {
- total_size +=
- allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
- }
- EXPECT_LE(total_size, size);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index b0763e008a..8d67366e30 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -11,35 +11,44 @@ from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
+META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
+
+
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def extension(self):
+ return ANY_JS
+
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
- self.testroot = os.path.join(self.root, "data", "test", "js-api")
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
+ self.test_root = os.path.join(self.root, "data", "test", "js-api")
+ self._test_loader.test_root = self.test_root
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.testroot):
- for dotted in [x for x in dirs if x.startswith(".")]:
- dirs.remove(dotted)
- dirs.sort()
- files.sort()
- for filename in files:
- if (filename.endswith(ANY_JS)):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.testroot) + 1 : -len(ANY_JS)]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
+ def _get_timeout_param(self):
+ source = self.get_source()
+ timeout_params = META_TIMEOUT_REGEXP.findall(source)
+ if not timeout_params:
+ return None
+
+ if timeout_params[0] in ["long"]:
+ return timeout_params[0]
+ else:
+ print("unknown timeout param %s in %s%s"
+ % (timeout_params[0], self.path, ANY_JS))
+ return None
+
def _get_files_params(self):
files = [os.path.join(self.suite.mjsunit_js),
os.path.join(self.suite.root, "testharness.js")]
@@ -49,7 +58,7 @@ class TestCase(testcase.D8TestCase):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
- script = os.path.join(self.suite.testroot, script[len(WPT_ROOT):])
+ script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
thisdir = os.path.dirname(self._get_source_path())
@@ -67,7 +76,7 @@ class TestCase(testcase.D8TestCase):
def _get_source_path(self):
# All tests are named `path/name.any.js`
- return os.path.join(self.suite.testroot, self.path + ANY_JS)
+ return os.path.join(self.suite.test_root, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 22e5457ae1..51961fd46d 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -4,11 +4,6 @@
[
[ALWAYS, {
- # https://bugs.chromium.org/p/v8/issues/detail?id=8319
- 'module/customSections': [FAIL],
-}], # ALWAYS
-
-[ALWAYS, {
# https://bugs.chromium.org/p/v8/issues/detail?id=8633
'limits': [SKIP],
}], # ALWAYS
@@ -20,9 +15,14 @@
}], # 'arch == s390 or arch == s390x or system == aix'
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'*': [SKIP],
-}], # lite_mode
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index b8d8ed8bd8..b849b63dca 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -7,20 +7,13 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
+
+class TestLoader(testsuite.JSTestLoader):
+ pass
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- for filename in files:
- if (filename.endswith(".js")):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index b676370dad..d7b0ee9dc9 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-6a2c7fb6e2a4ead5d261c9fdac77d3129268848e \ No newline at end of file
+ed10dcd09ecd4e7c4fd4c0de1a2cbac60632c21b \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index e61171b6b2..1953efae0a 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -44,10 +44,15 @@
}], # 'arch == s390 or arch == s390x'
##############################################################################
-['lite_mode', {
+['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'*': [SKIP],
-}], # lite_mode
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
]
diff --git a/deps/v8/test/webkit/JSON-stringify-replacer-expected.txt b/deps/v8/test/webkit/JSON-stringify-replacer-expected.txt
index cea851edab..1955bd4633 100644
--- a/deps/v8/test/webkit/JSON-stringify-replacer-expected.txt
+++ b/deps/v8/test/webkit/JSON-stringify-replacer-expected.txt
@@ -40,10 +40,18 @@ PASS JSON.stringify(object, returnNullFor1) is '{"0":0,"1":null,"2":2}'
PASS JSON.stringify(array, returnNullFor1) is '[0,null,2,null]'
PASS JSON.stringify(object, returnStringForUndefined) is '{"0":0,"1":1,"2":2,"3":"undefined value"}'
PASS JSON.stringify(array, returnStringForUndefined) is '[0,1,2,"undefined value"]'
-PASS JSON.stringify(object, returnCycleObjectFor1) threw exception TypeError: Converting circular structure to JSON.
-PASS JSON.stringify(array, returnCycleObjectFor1) threw exception TypeError: Converting circular structure to JSON.
-PASS JSON.stringify(object, returnCycleArrayFor1) threw exception TypeError: Converting circular structure to JSON.
-PASS JSON.stringify(array, returnCycleArrayFor1) threw exception TypeError: Converting circular structure to JSON.
+PASS JSON.stringify(object, returnCycleObjectFor1) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property '1' closes the circle.
+PASS JSON.stringify(array, returnCycleObjectFor1) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property '1' closes the circle.
+PASS JSON.stringify(object, returnCycleArrayFor1) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Array'
+ --- index 1 closes the circle.
+PASS JSON.stringify(array, returnCycleArrayFor1) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Array'
+ --- index 1 closes the circle.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/class-syntax-name-expected.txt b/deps/v8/test/webkit/class-syntax-name-expected.txt
index ed49be3309..27ae3088c2 100644
--- a/deps/v8/test/webkit/class-syntax-name-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-name-expected.txt
@@ -108,8 +108,8 @@ PASS 'use strict'; var VarA = class A { constructor() {} }; var VarB = class B e
Class statement binding in other circumstances
PASS var result = A; result threw exception ReferenceError: A is not defined.
PASS 'use strict'; var result = A; result threw exception ReferenceError: A is not defined.
-PASS var result = A; class A {}; result threw exception ReferenceError: A is not defined.
-PASS 'use strict'; var result = A; class A {}; result threw exception ReferenceError: A is not defined.
+PASS var result = A; class A {}; result threw exception ReferenceError: Cannot access 'A' before initialization.
+PASS 'use strict'; var result = A; class A {}; result threw exception ReferenceError: Cannot access 'A' before initialization.
PASS class A { constructor() { A = 1; } }; new A threw exception TypeError: Assignment to constant variable..
PASS 'use strict'; class A { constructor() { A = 1; } }; new A threw exception TypeError: Assignment to constant variable..
PASS class A { constructor() { } }; A = 1; A is 1
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index 437897efa2..256b552939 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -434,7 +434,9 @@ function (jsonObject){
cycleTracker = "";
return jsonObject.stringify(cyclicObject);
}
-PASS tests[i](nativeJSON) threw exception TypeError: Converting circular structure to JSON.
+PASS tests[i](nativeJSON) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Object'
+ --- property 'self' closes the circle.
function (jsonObject){
cycleTracker = "";
try { jsonObject.stringify(cyclicObject); } catch(e) { cycleTracker += " -> exception" }
@@ -445,7 +447,9 @@ function (jsonObject){
cycleTracker = "";
return jsonObject.stringify(cyclicArray);
}
-PASS tests[i](nativeJSON) threw exception TypeError: Converting circular structure to JSON.
+PASS tests[i](nativeJSON) threw exception TypeError: Converting circular structure to JSON
+ --> starting at object with constructor 'Array'
+ --- index 1 closes the circle.
function (jsonObject){
cycleTracker = "";
try { jsonObject.stringify(cyclicArray); } catch { cycleTracker += " -> exception" }
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index 5d564b69bc..500f44656b 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -36,26 +36,15 @@ FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
-# TODO (machenbach): Share commonalities with mjstest.
+class TestLoader(testsuite.JSTestLoader):
+ @property
+ def excluded_dirs(self):
+ return {"resources"}
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- tests = []
- for dirname, dirs, files in os.walk(self.root):
- for dotted in [x for x in dirs if x.startswith('.')]:
- dirs.remove(dotted)
- if 'resources' in dirs:
- dirs.remove('resources')
-
- dirs.sort()
- files.sort()
- for filename in files:
- if filename.endswith(".js"):
- fullpath = os.path.join(dirname, filename)
- relpath = fullpath[len(self.root) + 1 : -3]
- testname = relpath.replace(os.path.sep, "/")
- test = self._create_test(testname)
- tests.append(test)
- return tests
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index d5dac2ba9d..53d0c3b167 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -132,4 +132,9 @@
}], # variant == no_wasm_traps
##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
+##############################################################################
]
diff --git a/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py b/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
deleted file mode 100755
index adbc74addb..0000000000
--- a/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2018 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import sys
-
-import check_protocol_compatibility
-
-
-sys.exit(check_protocol_compatibility.main())
diff --git a/deps/v8/third_party/inspector_protocol/README.md b/deps/v8/third_party/inspector_protocol/README.md
index 4eff4338ff..da3f93f3f3 100644
--- a/deps/v8/third_party/inspector_protocol/README.md
+++ b/deps/v8/third_party/inspector_protocol/README.md
@@ -26,3 +26,8 @@ to fetch the package (and dependencies) and build and run the tests:
gn gen out/Release
ninja -C out/Release json_parser_test
out/Release/json_parser_test
+
+You'll probably also need to install g++, since Clang uses this to find the
+standard C++ headers. E.g.,
+
+ sudo apt-get install g++-8
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index cdca51a9bf..9b6a62e829 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: fdbdb154336fc1f15a0a6775349dd90243b8d3fc
+Revision: b13e24ccee66d7e0590ce1266db9c906e3648561
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py b/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
index e23bd70213..d2df244fa9 100755
--- a/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
+++ b/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
@@ -45,6 +45,7 @@
#
# Adding --show_changes to the command line prints out a list of valid public API changes.
+from __future__ import print_function
import copy
import os.path
import optparse
@@ -475,9 +476,9 @@ def main():
if arg_options.show_changes:
changes = compare_schemas(domains, baseline_domains, True)
if len(changes) > 0:
- print " Public changes since %s:" % version
+ print(" Public changes since %s:" % version)
for change in changes:
- print " %s" % change
+ print(" %s" % change)
if arg_options.stamp:
with open(arg_options.stamp, 'a') as _:
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index edf8c4de21..fb9959d608 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -33,14 +33,14 @@ def read_config():
def json_object_hook(object_dict):
items = [(k, os.path.join(config_base, v) if k == "path" else v) for (k, v) in object_dict.items()]
items = [(k, os.path.join(output_base, v) if k == "output" else v) for (k, v) in items]
- keys, values = zip(*items)
+ keys, values = list(zip(*items))
return collections.namedtuple('X', keys)(*values)
return json.loads(data, object_hook=json_object_hook)
def init_defaults(config_tuple, path, defaults):
keys = list(config_tuple._fields) # pylint: disable=E1101
values = [getattr(config_tuple, k) for k in keys]
- for i in xrange(len(keys)):
+ for i in range(len(keys)):
if hasattr(values[i], "_fields"):
values[i] = init_defaults(values[i], path + "." + keys[i], defaults)
for optional in defaults:
@@ -134,7 +134,7 @@ def dash_to_camelcase(word):
def to_snake_case(name):
- return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxint).lower()
+ return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower()
def to_method_case(config, name):
@@ -623,7 +623,7 @@ def main():
lib_templates_dir = os.path.join(module_path, "lib")
# Note these should be sorted in the right order.
# TODO(dgozman): sort them programmatically based on commented includes.
- lib_h_templates = [
+ protocol_h_templates = [
"ErrorSupport_h.template",
"Values_h.template",
"Object_h.template",
@@ -632,15 +632,17 @@ def main():
"Array_h.template",
"DispatcherBase_h.template",
"Parser_h.template",
+ "CBOR_h.template",
]
- lib_cpp_templates = [
+ protocol_cpp_templates = [
"Protocol_cpp.template",
"ErrorSupport_cpp.template",
"Values_cpp.template",
"Object_cpp.template",
"DispatcherBase_cpp.template",
"Parser_cpp.template",
+ "CBOR_cpp.template",
]
forward_h_templates = [
@@ -649,6 +651,14 @@ def main():
"FrontendChannel_h.template",
]
+ base_string_adapter_h_templates = [
+ "base_string_adapter_h.template",
+ ]
+
+ base_string_adapter_cc_templates = [
+ "base_string_adapter_cc.template",
+ ]
+
def generate_lib_file(file_name, template_files):
parts = []
for template_file in template_files:
@@ -658,20 +668,22 @@ def main():
outputs[file_name] = "\n\n".join(parts)
generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Forward.h")), forward_h_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.h")), lib_h_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.cpp")), lib_cpp_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.h")), protocol_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.cpp")), protocol_cpp_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "base_string_adapter.h")), base_string_adapter_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "base_string_adapter.cc")), base_string_adapter_cc_templates)
# Make gyp / make generatos happy, otherwise make rebuilds world.
inputs_ts = max(map(os.path.getmtime, inputs))
up_to_date = True
- for output_file in outputs.iterkeys():
+ for output_file in outputs.keys():
if not os.path.exists(output_file) or os.path.getmtime(output_file) < inputs_ts:
up_to_date = False
break
if up_to_date:
sys.exit()
- for file_name, content in outputs.iteritems():
+ for file_name, content in outputs.items():
out_file = open(file_name, "w")
out_file.write(content)
out_file.close()
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
index af2f21691c..ecee9428d4 100644
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
@@ -31,8 +31,12 @@ template("inspector_protocol_generate") {
inputs = [
invoker.config_file,
+ "$inspector_protocol_dir/lib/base_string_adapter_cc.template",
+ "$inspector_protocol_dir/lib/base_string_adapter_h.template",
"$inspector_protocol_dir/lib/Allocator_h.template",
"$inspector_protocol_dir/lib/Array_h.template",
+ "$inspector_protocol_dir/lib/CBOR_h.template",
+ "$inspector_protocol_dir/lib/CBOR_cpp.template",
"$inspector_protocol_dir/lib/DispatcherBase_cpp.template",
"$inspector_protocol_dir/lib/DispatcherBase_h.template",
"$inspector_protocol_dir/lib/ErrorSupport_cpp.template",
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi b/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
index 1359e16751..3d0a60e139 100644
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
@@ -7,6 +7,8 @@
'inspector_protocol_files': [
'lib/Allocator_h.template',
'lib/Array_h.template',
+ 'lib/CBOR_h.template',
+ 'lib/CBOR_cpp.template',
'lib/DispatcherBase_cpp.template',
'lib/DispatcherBase_h.template',
'lib/ErrorSupport_cpp.template',
diff --git a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
index d05ddaea7e..15eaaaff02 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Allocator_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Array_h.template b/deps/v8/third_party/inspector_protocol/lib/Array_h.template
index 3854f6e5cd..c420a0f7e9 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Array_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Array_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Array_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/CBOR_cpp.template b/deps/v8/third_party/inspector_protocol/lib/CBOR_cpp.template
new file mode 100644
index 0000000000..6b0209b7b1
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/CBOR_cpp.template
@@ -0,0 +1,827 @@
+{# This template is generated by gen_cbor_templates.py. #}
+// Generated by lib/CBOR_cpp.template.
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include <cassert>
+#include <limits>
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+// ===== encoding/cbor.cc =====
+
+using namespace cbor;
+
+namespace {
+
+// See RFC 7049 Section 2.3, Table 2.
+static constexpr uint8_t kEncodedTrue =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 21);
+static constexpr uint8_t kEncodedFalse =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 20);
+static constexpr uint8_t kEncodedNull =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 22);
+static constexpr uint8_t kInitialByteForDouble =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 27);
+
+} // namespace
+
+uint8_t EncodeTrue() { return kEncodedTrue; }
+uint8_t EncodeFalse() { return kEncodedFalse; }
+uint8_t EncodeNull() { return kEncodedNull; }
+
+uint8_t EncodeIndefiniteLengthArrayStart() {
+ return kInitialByteIndefiniteLengthArray;
+}
+
+uint8_t EncodeIndefiniteLengthMapStart() {
+ return kInitialByteIndefiniteLengthMap;
+}
+
+uint8_t EncodeStop() { return kStopByte; }
+
+namespace {
+// See RFC 7049 Table 3 and Section 2.4.4.2. This is used as a prefix for
+// arbitrary binary data encoded as BYTE_STRING.
+static constexpr uint8_t kExpectedConversionToBase64Tag =
+ EncodeInitialByte(MajorType::TAG, 22);
+
+// When parsing CBOR, we limit recursion depth for objects and arrays
+// to this constant.
+static constexpr int kStackLimit = 1000;
+
+// Writes the bytes for |v| to |out|, starting with the most significant byte.
+// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+template <typename T>
+void WriteBytesMostSignificantByteFirst(T v, std::vector<uint8_t>* out) {
+ for (int shift_bytes = sizeof(T) - 1; shift_bytes >= 0; --shift_bytes)
+ out->push_back(0xff & (v >> (shift_bytes * 8)));
+}
+} // namespace
+
+namespace cbor_internals {
+// Writes the start of a token with |type|. The |value| may indicate the size,
+// or it may be the payload if the value is an unsigned integer.
+void WriteTokenStart(MajorType type, uint64_t value,
+ std::vector<uint8_t>* encoded) {
+ if (value < 24) {
+ // Values 0-23 are encoded directly into the additional info of the
+ // initial byte.
+ encoded->push_back(EncodeInitialByte(type, /*additional_info=*/value));
+ return;
+ }
+ if (value <= std::numeric_limits<uint8_t>::max()) {
+ // Values 24-255 are encoded with one initial byte, followed by the value.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation1Byte));
+ encoded->push_back(value);
+ return;
+ }
+ if (value <= std::numeric_limits<uint16_t>::max()) {
+ // Values 256-65535: 1 initial byte + 2 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation2Bytes));
+ WriteBytesMostSignificantByteFirst<uint16_t>(value, encoded);
+ return;
+ }
+ if (value <= std::numeric_limits<uint32_t>::max()) {
+ // 32 bit uint: 1 initial byte + 4 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation4Bytes));
+ WriteBytesMostSignificantByteFirst<uint32_t>(static_cast<uint32_t>(value),
+ encoded);
+ return;
+ }
+ // 64 bit uint: 1 initial byte + 8 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation8Bytes));
+ WriteBytesMostSignificantByteFirst<uint64_t>(value, encoded);
+}
+} // namespace cbor_internals
+
+namespace {
+// Extracts sizeof(T) bytes from |in| to extract a value of type T
+// (e.g. uint64_t, uint32_t, ...), most significant byte first.
+// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+template <typename T>
+T ReadBytesMostSignificantByteFirst(span<uint8_t> in) {
+ assert(static_cast<std::size_t>(in.size()) >= sizeof(T));
+ T result = 0;
+ for (std::size_t shift_bytes = 0; shift_bytes < sizeof(T); ++shift_bytes)
+ result |= T(in[sizeof(T) - 1 - shift_bytes]) << (shift_bytes * 8);
+ return result;
+}
+} // namespace
+
+namespace cbor_internals {
+int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
+ if (bytes.empty()) return -1;
+ uint8_t initial_byte = bytes[0];
+ *type = MajorType((initial_byte & kMajorTypeMask) >> kMajorTypeBitShift);
+
+ uint8_t additional_information = initial_byte & kAdditionalInformationMask;
+ if (additional_information < 24) {
+ // Values 0-23 are encoded directly into the additional info of the
+ // initial byte.
+ *value = additional_information;
+ return 1;
+ }
+ if (additional_information == kAdditionalInformation1Byte) {
+ // Values 24-255 are encoded with one initial byte, followed by the value.
+ if (bytes.size() < 2) return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint8_t>(bytes.subspan(1));
+ return 2;
+ }
+ if (additional_information == kAdditionalInformation2Bytes) {
+ // Values 256-65535: 1 initial byte + 2 bytes payload.
+ if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint16_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint16_t>(bytes.subspan(1));
+ return 3;
+ }
+ if (additional_information == kAdditionalInformation4Bytes) {
+ // 32 bit uint: 1 initial byte + 4 bytes payload.
+ if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint32_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint32_t>(bytes.subspan(1));
+ return 5;
+ }
+ if (additional_information == kAdditionalInformation8Bytes) {
+ // 64 bit uint: 1 initial byte + 8 bytes payload.
+ if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint64_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint64_t>(bytes.subspan(1));
+ return 9;
+ }
+ return -1;
+}
+} // namespace cbor_internals
+
+using cbor_internals::WriteTokenStart;
+using cbor_internals::ReadTokenStart;
+
+void EncodeInt32(int32_t value, std::vector<uint8_t>* out) {
+ if (value >= 0) {
+ WriteTokenStart(MajorType::UNSIGNED, value, out);
+ } else {
+ uint64_t representation = static_cast<uint64_t>(-(value + 1));
+ WriteTokenStart(MajorType::NEGATIVE, representation, out);
+ }
+}
+
+void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out) {
+ uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
+ WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
+ // When emitting UTF16 characters, we always write the least significant byte
+ // first; this is because it's the native representation for X86.
+ // TODO(johannes): Implement a more efficient thing here later, e.g.
+ // casting *iff* the machine has this byte order.
+ // The wire format for UTF16 chars will probably remain the same
+ // (least significant byte first) since this way we can have
+ // golden files, unittests, etc. that port easily and universally.
+ // See also:
+ // https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+ for (const uint16_t two_bytes : in) {
+ out->push_back(two_bytes);
+ out->push_back(two_bytes >> 8);
+ }
+}
+
+void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out) {
+ WriteTokenStart(MajorType::STRING, static_cast<uint64_t>(in.size_bytes()),
+ out);
+ out->insert(out->end(), in.begin(), in.end());
+}
+
+void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out) {
+ for (std::ptrdiff_t ii = 0; ii < latin1.size(); ++ii) {
+ if (latin1[ii] <= 127) continue;
+ // If there's at least one non-ASCII char, convert to UTF8.
+ std::vector<uint8_t> utf8(latin1.begin(), latin1.begin() + ii);
+ for (; ii < latin1.size(); ++ii) {
+ if (latin1[ii] <= 127) {
+ utf8.push_back(latin1[ii]);
+ } else {
+ // 0xC0 means it's a UTF8 sequence with 2 bytes.
+ utf8.push_back((latin1[ii] >> 6) | 0xc0);
+ utf8.push_back((latin1[ii] | 0x80) & 0xbf);
+ }
+ }
+ EncodeString8(span<uint8_t>(utf8.data(), utf8.size()), out);
+ return;
+ }
+ EncodeString8(latin1, out);
+}
+
+void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out) {
+ // If there's at least one non-ASCII char, encode as STRING16 (UTF16).
+ for (uint16_t ch : utf16) {
+ if (ch <= 127) continue;
+ EncodeString16(utf16, out);
+ return;
+ }
+ // It's all US-ASCII, strip out every second byte and encode as UTF8.
+ WriteTokenStart(MajorType::STRING, static_cast<uint64_t>(utf16.size()), out);
+ out->insert(out->end(), utf16.begin(), utf16.end());
+}
+
+void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out) {
+ out->push_back(kExpectedConversionToBase64Tag);
+ uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
+ WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
+ out->insert(out->end(), in.begin(), in.end());
+}
+
+// A double is encoded with a specific initial byte
+// (kInitialByteForDouble) plus the 64 bits of payload for its value.
+constexpr std::ptrdiff_t kEncodedDoubleSize = 1 + sizeof(uint64_t);
+
+// An envelope is encoded with a specific initial byte
+// (kInitialByteForEnvelope), plus the start byte for a BYTE_STRING with a 32
+// bit wide length, plus a 32 bit length for that string.
+constexpr std::ptrdiff_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
+
+void EncodeDouble(double value, std::vector<uint8_t>* out) {
+ // The additional_info=27 indicates 64 bits for the double follow.
+ // See RFC 7049 Section 2.3, Table 1.
+ out->push_back(kInitialByteForDouble);
+ union {
+ double from_double;
+ uint64_t to_uint64;
+ } reinterpret;
+ reinterpret.from_double = value;
+ WriteBytesMostSignificantByteFirst<uint64_t>(reinterpret.to_uint64, out);
+}
+
+void EnvelopeEncoder::EncodeStart(std::vector<uint8_t>* out) {
+ assert(byte_size_pos_ == 0);
+ out->push_back(kInitialByteForEnvelope);
+ out->push_back(kInitialByteFor32BitLengthByteString);
+ byte_size_pos_ = out->size();
+ out->resize(out->size() + sizeof(uint32_t));
+}
+
+bool EnvelopeEncoder::EncodeStop(std::vector<uint8_t>* out) {
+ assert(byte_size_pos_ != 0);
+ // The byte size is the size of the payload, that is, all the
+ // bytes that were written past the byte size position itself.
+ uint64_t byte_size = out->size() - (byte_size_pos_ + sizeof(uint32_t));
+ // We store exactly 4 bytes, so at most INT32MAX, with most significant
+ // byte first.
+ if (byte_size > std::numeric_limits<uint32_t>::max()) return false;
+ for (int shift_bytes = sizeof(uint32_t) - 1; shift_bytes >= 0;
+ --shift_bytes) {
+ (*out)[byte_size_pos_++] = 0xff & (byte_size >> (shift_bytes * 8));
+ }
+ return true;
+}
+
+namespace {
+class JSONToCBOREncoder : public JSONParserHandler {
+ public:
+ JSONToCBOREncoder(std::vector<uint8_t>* out, Status* status)
+ : out_(out), status_(status) {
+ *status_ = Status();
+ }
+
+ void HandleObjectBegin() override {
+ envelopes_.emplace_back();
+ envelopes_.back().EncodeStart(out_);
+ out_->push_back(kInitialByteIndefiniteLengthMap);
+ }
+
+ void HandleObjectEnd() override {
+ out_->push_back(kStopByte);
+ assert(!envelopes_.empty());
+ envelopes_.back().EncodeStop(out_);
+ envelopes_.pop_back();
+ }
+
+ void HandleArrayBegin() override {
+ envelopes_.emplace_back();
+ envelopes_.back().EncodeStart(out_);
+ out_->push_back(kInitialByteIndefiniteLengthArray);
+ }
+
+ void HandleArrayEnd() override {
+ out_->push_back(kStopByte);
+ assert(!envelopes_.empty());
+ envelopes_.back().EncodeStop(out_);
+ envelopes_.pop_back();
+ }
+
+ void HandleString8(span<uint8_t> chars) override {
+ EncodeString8(chars, out_);
+ }
+
+ void HandleString16(span<uint16_t> chars) override {
+ for (uint16_t ch : chars) {
+ if (ch >= 0x7f) {
+ // If there's at least one non-7bit character, we encode as UTF16.
+ EncodeString16(chars, out_);
+ return;
+ }
+ }
+ std::vector<uint8_t> sevenbit_chars(chars.begin(), chars.end());
+ EncodeString8(span<uint8_t>(sevenbit_chars.data(), sevenbit_chars.size()),
+ out_);
+ }
+
+ void HandleBinary(std::vector<uint8_t> bytes) override {
+ EncodeBinary(span<uint8_t>(bytes.data(), bytes.size()), out_);
+ }
+
+ void HandleDouble(double value) override { EncodeDouble(value, out_); }
+
+ void HandleInt32(int32_t value) override { EncodeInt32(value, out_); }
+
+ void HandleBool(bool value) override {
+ // See RFC 7049 Section 2.3, Table 2.
+ out_->push_back(value ? kEncodedTrue : kEncodedFalse);
+ }
+
+ void HandleNull() override {
+ // See RFC 7049 Section 2.3, Table 2.
+ out_->push_back(kEncodedNull);
+ }
+
+ void HandleError(Status error) override {
+ assert(!error.ok());
+ *status_ = error;
+ out_->clear();
+ }
+
+ private:
+ std::vector<uint8_t>* out_;
+ std::vector<EnvelopeEncoder> envelopes_;
+ Status* status_;
+};
+} // namespace
+
+std::unique_ptr<JSONParserHandler> NewJSONToCBOREncoder(
+ std::vector<uint8_t>* out, Status* status) {
+ return std::unique_ptr<JSONParserHandler>(new JSONToCBOREncoder(out, status));
+}
+
+namespace {
+// Below are three parsing routines for CBOR, which cover enough
+// to roundtrip JSON messages.
+bool ParseMap(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out);
+bool ParseArray(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out);
+bool ParseValue(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out);
+
+void ParseUTF16String(CBORTokenizer* tokenizer, JSONParserHandler* out) {
+ std::vector<uint16_t> value;
+ span<uint8_t> rep = tokenizer->GetString16WireRep();
+ for (std::ptrdiff_t ii = 0; ii < rep.size(); ii += 2)
+ value.push_back((rep[ii + 1] << 8) | rep[ii]);
+ out->HandleString16(span<uint16_t>(value.data(), value.size()));
+ tokenizer->Next();
+}
+
+bool ParseUTF8String(CBORTokenizer* tokenizer, JSONParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::STRING8);
+ out->HandleString8(tokenizer->GetString8());
+ tokenizer->Next();
+ return true;
+}
+
+bool ParseValue(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out) {
+ if (stack_depth > kStackLimit) {
+ out->HandleError(
+ Status{Error::CBOR_STACK_LIMIT_EXCEEDED, tokenizer->Status().pos});
+ return false;
+ }
+ // Skip past the envelope to get to what's inside.
+ if (tokenizer->TokenTag() == CBORTokenTag::ENVELOPE)
+ tokenizer->EnterEnvelope();
+ switch (tokenizer->TokenTag()) {
+ case CBORTokenTag::ERROR_VALUE:
+ out->HandleError(tokenizer->Status());
+ return false;
+ case CBORTokenTag::DONE:
+ out->HandleError(Status{Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE,
+ tokenizer->Status().pos});
+ return false;
+ case CBORTokenTag::TRUE_VALUE:
+ out->HandleBool(true);
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::FALSE_VALUE:
+ out->HandleBool(false);
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::NULL_VALUE:
+ out->HandleNull();
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::INT32:
+ out->HandleInt32(tokenizer->GetInt32());
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::DOUBLE:
+ out->HandleDouble(tokenizer->GetDouble());
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::STRING8:
+ return ParseUTF8String(tokenizer, out);
+ case CBORTokenTag::STRING16:
+ ParseUTF16String(tokenizer, out);
+ return true;
+ case CBORTokenTag::BINARY: {
+ span<uint8_t> binary = tokenizer->GetBinary();
+ out->HandleBinary(std::vector<uint8_t>(binary.begin(), binary.end()));
+ tokenizer->Next();
+ return true;
+ }
+ case CBORTokenTag::MAP_START:
+ return ParseMap(stack_depth + 1, tokenizer, out);
+ case CBORTokenTag::ARRAY_START:
+ return ParseArray(stack_depth + 1, tokenizer, out);
+ default:
+ out->HandleError(
+ Status{Error::CBOR_UNSUPPORTED_VALUE, tokenizer->Status().pos});
+ return false;
+ }
+}
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+bool ParseArray(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::ARRAY_START);
+ tokenizer->Next();
+ out->HandleArrayBegin();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
+ out->HandleError(
+ Status{Error::CBOR_UNEXPECTED_EOF_IN_ARRAY, tokenizer->Status().pos});
+ return false;
+ }
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer->Status());
+ return false;
+ }
+ // Parse value.
+ if (!ParseValue(stack_depth, tokenizer, out)) return false;
+ }
+ out->HandleArrayEnd();
+ tokenizer->Next();
+ return true;
+}
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+bool ParseMap(int32_t stack_depth, CBORTokenizer* tokenizer,
+ JSONParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::MAP_START);
+ out->HandleObjectBegin();
+ tokenizer->Next();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
+ out->HandleError(
+ Status{Error::CBOR_UNEXPECTED_EOF_IN_MAP, tokenizer->Status().pos});
+ return false;
+ }
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer->Status());
+ return false;
+ }
+ // Parse key.
+ if (tokenizer->TokenTag() == CBORTokenTag::STRING8) {
+ if (!ParseUTF8String(tokenizer, out)) return false;
+ } else if (tokenizer->TokenTag() == CBORTokenTag::STRING16) {
+ ParseUTF16String(tokenizer, out);
+ } else {
+ out->HandleError(
+ Status{Error::CBOR_INVALID_MAP_KEY, tokenizer->Status().pos});
+ return false;
+ }
+ // Parse value.
+ if (!ParseValue(stack_depth, tokenizer, out)) return false;
+ }
+ out->HandleObjectEnd();
+ tokenizer->Next();
+ return true;
+}
+} // namespace
+
+void ParseCBOR(span<uint8_t> bytes, JSONParserHandler* json_out) {
+ if (bytes.empty()) {
+ json_out->HandleError(Status{Error::CBOR_NO_INPUT, 0});
+ return;
+ }
+ if (bytes[0] != kInitialByteForEnvelope) {
+ json_out->HandleError(Status{Error::CBOR_INVALID_START_BYTE, 0});
+ return;
+ }
+ CBORTokenizer tokenizer(bytes);
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ json_out->HandleError(tokenizer.Status());
+ return;
+ }
+ // We checked for the envelope start byte above, so the tokenizer
+ // must agree here, since it's not an error.
+ assert(tokenizer.TokenTag() == CBORTokenTag::ENVELOPE);
+ tokenizer.EnterEnvelope();
+ if (tokenizer.TokenTag() != CBORTokenTag::MAP_START) {
+ json_out->HandleError(
+ Status{Error::CBOR_MAP_START_EXPECTED, tokenizer.Status().pos});
+ return;
+ }
+ if (!ParseMap(/*stack_depth=*/1, &tokenizer, json_out)) return;
+ if (tokenizer.TokenTag() == CBORTokenTag::DONE) return;
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ json_out->HandleError(tokenizer.Status());
+ return;
+ }
+ json_out->HandleError(
+ Status{Error::CBOR_TRAILING_JUNK, tokenizer.Status().pos});
+}
+
+CBORTokenizer::CBORTokenizer(span<uint8_t> bytes) : bytes_(bytes) {
+ ReadNextToken(/*enter_envelope=*/false);
+}
+CBORTokenizer::~CBORTokenizer() {}
+
+CBORTokenTag CBORTokenizer::TokenTag() const { return token_tag_; }
+
+void CBORTokenizer::Next() {
+ if (token_tag_ == CBORTokenTag::ERROR_VALUE || token_tag_ == CBORTokenTag::DONE)
+ return;
+ ReadNextToken(/*enter_envelope=*/false);
+}
+
+void CBORTokenizer::EnterEnvelope() {
+ assert(token_tag_ == CBORTokenTag::ENVELOPE);
+ ReadNextToken(/*enter_envelope=*/true);
+}
+
+Status CBORTokenizer::Status() const { return status_; }
+
+int32_t CBORTokenizer::GetInt32() const {
+ assert(token_tag_ == CBORTokenTag::INT32);
+ // The range checks happen in ::ReadNextToken().
+ return static_cast<uint32_t>(
+ token_start_type_ == MajorType::UNSIGNED
+ ? token_start_internal_value_
+ : -static_cast<int64_t>(token_start_internal_value_) - 1);
+}
+
+double CBORTokenizer::GetDouble() const {
+ assert(token_tag_ == CBORTokenTag::DOUBLE);
+ union {
+ uint64_t from_uint64;
+ double to_double;
+ } reinterpret;
+ reinterpret.from_uint64 = ReadBytesMostSignificantByteFirst<uint64_t>(
+ bytes_.subspan(status_.pos + 1));
+ return reinterpret.to_double;
+}
+
+span<uint8_t> CBORTokenizer::GetString8() const {
+ assert(token_tag_ == CBORTokenTag::STRING8);
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+span<uint8_t> CBORTokenizer::GetString16WireRep() const {
+ assert(token_tag_ == CBORTokenTag::STRING16);
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+span<uint8_t> CBORTokenizer::GetBinary() const {
+ assert(token_tag_ == CBORTokenTag::BINARY);
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+void CBORTokenizer::ReadNextToken(bool enter_envelope) {
+ if (enter_envelope) {
+ status_.pos += kEncodedEnvelopeHeaderSize;
+ } else {
+ status_.pos =
+ status_.pos == Status::npos() ? 0 : status_.pos + token_byte_length_;
+ }
+ status_.error = Error::OK;
+ if (status_.pos >= bytes_.size()) {
+ token_tag_ = CBORTokenTag::DONE;
+ return;
+ }
+ switch (bytes_[status_.pos]) {
+ case kStopByte:
+ SetToken(CBORTokenTag::STOP, 1);
+ return;
+ case kInitialByteIndefiniteLengthMap:
+ SetToken(CBORTokenTag::MAP_START, 1);
+ return;
+ case kInitialByteIndefiniteLengthArray:
+ SetToken(CBORTokenTag::ARRAY_START, 1);
+ return;
+ case kEncodedTrue:
+ SetToken(CBORTokenTag::TRUE_VALUE, 1);
+ return;
+ case kEncodedFalse:
+ SetToken(CBORTokenTag::FALSE_VALUE, 1);
+ return;
+ case kEncodedNull:
+ SetToken(CBORTokenTag::NULL_VALUE, 1);
+ return;
+ case kExpectedConversionToBase64Tag: { // BINARY
+ int8_t bytes_read =
+ ReadTokenStart(bytes_.subspan(status_.pos + 1), &token_start_type_,
+ &token_start_internal_value_);
+ int64_t token_byte_length = 1 + bytes_read + token_start_internal_value_;
+ if (-1 == bytes_read || token_start_type_ != MajorType::BYTE_STRING ||
+ status_.pos + token_byte_length > bytes_.size()) {
+ SetError(Error::CBOR_INVALID_BINARY);
+ return;
+ }
+ SetToken(CBORTokenTag::BINARY,
+ static_cast<std::ptrdiff_t>(token_byte_length));
+ return;
+ }
+ case kInitialByteForDouble: { // DOUBLE
+ if (status_.pos + kEncodedDoubleSize > bytes_.size()) {
+ SetError(Error::CBOR_INVALID_DOUBLE);
+ return;
+ }
+ SetToken(CBORTokenTag::DOUBLE, kEncodedDoubleSize);
+ return;
+ }
+ case kInitialByteForEnvelope: { // ENVELOPE
+ if (status_.pos + kEncodedEnvelopeHeaderSize > bytes_.size()) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ // The envelope must be a byte string with 32 bit length.
+ if (bytes_[status_.pos + 1] != kInitialByteFor32BitLengthByteString) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ // Read the length of the byte string.
+ token_start_internal_value_ = ReadBytesMostSignificantByteFirst<uint32_t>(
+ bytes_.subspan(status_.pos + 2));
+ // Make sure the payload is contained within the message.
+ if (token_start_internal_value_ + kEncodedEnvelopeHeaderSize +
+ status_.pos >
+ static_cast<std::size_t>(bytes_.size())) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ SetToken(CBORTokenTag::ENVELOPE,
+ kEncodedEnvelopeHeaderSize + length);
+ return;
+ }
+ default: {
+ span<uint8_t> remainder =
+ bytes_.subspan(status_.pos, bytes_.size() - status_.pos);
+ assert(!remainder.empty());
+ int8_t token_start_length = ReadTokenStart(remainder, &token_start_type_,
+ &token_start_internal_value_);
+ bool success = token_start_length != -1;
+ switch (token_start_type_) {
+ case MajorType::UNSIGNED: // INT32.
+ if (!success || std::numeric_limits<int32_t>::max() <
+ token_start_internal_value_) {
+ SetError(Error::CBOR_INVALID_INT32);
+ return;
+ }
+ SetToken(CBORTokenTag::INT32, token_start_length);
+ return;
+ case MajorType::NEGATIVE: // INT32.
+ if (!success ||
+ std::numeric_limits<int32_t>::min() >
+ -static_cast<int64_t>(token_start_internal_value_) - 1) {
+ SetError(Error::CBOR_INVALID_INT32);
+ return;
+ }
+ SetToken(CBORTokenTag::INT32, token_start_length);
+ return;
+ case MajorType::STRING: { // STRING8.
+ if (!success || remainder.size() < static_cast<int64_t>(
+ token_start_internal_value_)) {
+ SetError(Error::CBOR_INVALID_STRING8);
+ return;
+ }
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ SetToken(CBORTokenTag::STRING8, token_start_length + length);
+ return;
+ }
+ case MajorType::BYTE_STRING: { // STRING16.
+ if (!success ||
+ remainder.size() <
+ static_cast<int64_t>(token_start_internal_value_) ||
+ // Must be divisible by 2 since UTF16 is 2 bytes per character.
+ token_start_internal_value_ & 1) {
+ SetError(Error::CBOR_INVALID_STRING16);
+ return;
+ }
+ auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ SetToken(CBORTokenTag::STRING16, token_start_length + length);
+ return;
+ }
+ case MajorType::ARRAY:
+ case MajorType::MAP:
+ case MajorType::TAG:
+ case MajorType::SIMPLE_VALUE:
+ SetError(Error::CBOR_UNSUPPORTED_VALUE);
+ return;
+ }
+ }
+ }
+}
+
+void CBORTokenizer::SetToken(CBORTokenTag token_tag,
+ std::ptrdiff_t token_byte_length) {
+ token_tag_ = token_tag;
+ token_byte_length_ = token_byte_length;
+}
+
+void CBORTokenizer::SetError(Error error) {
+ token_tag_ = CBORTokenTag::ERROR_VALUE;
+ status_.error = error;
+}
+
+#if 0
+void DumpCBOR(span<uint8_t> cbor) {
+ std::string indent;
+ CBORTokenizer tokenizer(cbor);
+ while (true) {
+ fprintf(stderr, "%s", indent.c_str());
+ switch (tokenizer.TokenTag()) {
+ case CBORTokenTag::ERROR_VALUE:
+ fprintf(stderr, "ERROR {status.error=%d, status.pos=%ld}\n",
+ tokenizer.Status().error, tokenizer.Status().pos);
+ return;
+ case CBORTokenTag::DONE:
+ fprintf(stderr, "DONE\n");
+ return;
+ case CBORTokenTag::TRUE_VALUE:
+ fprintf(stderr, "TRUE_VALUE\n");
+ break;
+ case CBORTokenTag::FALSE_VALUE:
+ fprintf(stderr, "FALSE_VALUE\n");
+ break;
+ case CBORTokenTag::NULL_VALUE:
+ fprintf(stderr, "NULL_VALUE\n");
+ break;
+ case CBORTokenTag::INT32:
+ fprintf(stderr, "INT32 [%d]\n", tokenizer.GetInt32());
+ break;
+ case CBORTokenTag::DOUBLE:
+ fprintf(stderr, "DOUBLE [%lf]\n", tokenizer.GetDouble());
+ break;
+ case CBORTokenTag::STRING8: {
+ span<uint8_t> v = tokenizer.GetString8();
+ std::string t(v.begin(), v.end());
+ fprintf(stderr, "STRING8 [%s]\n", t.c_str());
+ break;
+ }
+ case CBORTokenTag::STRING16: {
+ span<uint8_t> v = tokenizer.GetString16WireRep();
+ std::string t(v.begin(), v.end());
+ fprintf(stderr, "STRING16 [%s]\n", t.c_str());
+ break;
+ }
+ case CBORTokenTag::BINARY: {
+ span<uint8_t> v = tokenizer.GetBinary();
+ std::string t(v.begin(), v.end());
+ fprintf(stderr, "BINARY [%s]\n", t.c_str());
+ break;
+ }
+ case CBORTokenTag::MAP_START:
+ fprintf(stderr, "MAP_START\n");
+ indent += " ";
+ break;
+ case CBORTokenTag::ARRAY_START:
+ fprintf(stderr, "ARRAY_START\n");
+ indent += " ";
+ break;
+ case CBORTokenTag::STOP:
+ fprintf(stderr, "STOP\n");
+ indent.erase(0, 2);
+ break;
+ case CBORTokenTag::ENVELOPE:
+ fprintf(stderr, "ENVELOPE\n");
+ tokenizer.EnterEnvelope();
+ continue;
+ }
+ tokenizer.Next();
+ }
+}
+#endif
+
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/CBOR_h.template b/deps/v8/third_party/inspector_protocol/lib/CBOR_h.template
new file mode 100644
index 0000000000..9d28adbd8e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/CBOR_h.template
@@ -0,0 +1,425 @@
+{# This template is generated by gen_cbor_templates.py. #}
+// Generated by lib/CBOR_h.template.
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_CBOR_h
+#define {{"_".join(config.protocol.namespace)}}_CBOR_h
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+// ===== encoding/status.h =====
+
+// Error codes.
+enum class Error {
+ OK = 0,
+ // JSON parsing errors - json_parser.{h,cc}.
+ JSON_PARSER_UNPROCESSED_INPUT_REMAINS = 0x01,
+ JSON_PARSER_STACK_LIMIT_EXCEEDED = 0x02,
+ JSON_PARSER_NO_INPUT = 0x03,
+ JSON_PARSER_INVALID_TOKEN = 0x04,
+ JSON_PARSER_INVALID_NUMBER = 0x05,
+ JSON_PARSER_INVALID_STRING = 0x06,
+ JSON_PARSER_UNEXPECTED_ARRAY_END = 0x07,
+ JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED = 0x08,
+ JSON_PARSER_STRING_LITERAL_EXPECTED = 0x09,
+ JSON_PARSER_COLON_EXPECTED = 0x0a,
+ JSON_PARSER_UNEXPECTED_OBJECT_END = 0x0b,
+ JSON_PARSER_COMMA_OR_OBJECT_END_EXPECTED = 0x0c,
+ JSON_PARSER_VALUE_EXPECTED = 0x0d,
+
+ CBOR_INVALID_INT32 = 0x0e,
+ CBOR_INVALID_DOUBLE = 0x0f,
+ CBOR_INVALID_ENVELOPE = 0x10,
+ CBOR_INVALID_STRING8 = 0x11,
+ CBOR_INVALID_STRING16 = 0x12,
+ CBOR_INVALID_BINARY = 0x13,
+ CBOR_UNSUPPORTED_VALUE = 0x14,
+ CBOR_NO_INPUT = 0x15,
+ CBOR_INVALID_START_BYTE = 0x16,
+ CBOR_UNEXPECTED_EOF_EXPECTED_VALUE = 0x17,
+ CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x18,
+ CBOR_UNEXPECTED_EOF_IN_MAP = 0x19,
+ CBOR_INVALID_MAP_KEY = 0x1a,
+ CBOR_STACK_LIMIT_EXCEEDED = 0x1b,
+ CBOR_STRING8_MUST_BE_7BIT = 0x1c,
+ CBOR_TRAILING_JUNK = 0x1d,
+ CBOR_MAP_START_EXPECTED = 0x1e,
+};
+
+// A status value with position that can be copied. The default status
+// is OK. Usually, error status values should come with a valid position.
+struct Status {
+ static constexpr std::ptrdiff_t npos() { return -1; }
+
+ bool ok() const { return error == Error::OK; }
+
+ Error error = Error::OK;
+ std::ptrdiff_t pos = npos();
+ Status(Error error, std::ptrdiff_t pos) : error(error), pos(pos) {}
+ Status() = default;
+};
+
+// ===== encoding/span.h =====
+
+// This template is similar to std::span, which will be included in C++20. Like
+// std::span it uses ptrdiff_t, which is signed (and thus a bit annoying
+// sometimes when comparing with size_t), but other than this it's much simpler.
+template <typename T>
+class span {
+ public:
+ using index_type = std::ptrdiff_t;
+
+ span() : data_(nullptr), size_(0) {}
+ span(const T* data, index_type size) : data_(data), size_(size) {}
+
+ const T* data() const { return data_; }
+
+ const T* begin() const { return data_; }
+ const T* end() const { return data_ + size_; }
+
+ const T& operator[](index_type idx) const { return data_[idx]; }
+
+ span<T> subspan(index_type offset, index_type count) const {
+ return span(data_ + offset, count);
+ }
+
+ span<T> subspan(index_type offset) const {
+ return span(data_ + offset, size_ - offset);
+ }
+
+ bool empty() const { return size_ == 0; }
+
+ index_type size() const { return size_; }
+ index_type size_bytes() const { return size_ * sizeof(T); }
+
+ private:
+ const T* data_;
+ index_type size_;
+};
+
+// ===== encoding/json_parser_handler.h =====
+
+// Handler interface for JSON parser events. See also json_parser.h.
+class JSONParserHandler {
+ public:
+ virtual ~JSONParserHandler() = default;
+ virtual void HandleObjectBegin() = 0;
+ virtual void HandleObjectEnd() = 0;
+ virtual void HandleArrayBegin() = 0;
+ virtual void HandleArrayEnd() = 0;
+ virtual void HandleString8(span<uint8_t> chars) = 0;
+ virtual void HandleString16(span<uint16_t> chars) = 0;
+ virtual void HandleBinary(std::vector<uint8_t> bytes) = 0;
+ virtual void HandleDouble(double value) = 0;
+ virtual void HandleInt32(int32_t value) = 0;
+ virtual void HandleBool(bool value) = 0;
+ virtual void HandleNull() = 0;
+
+ // The parser may send one error even after other events have already
+ // been received. Client code is reponsible to then discard the
+ // already processed events.
+ // |error| must be an eror, as in, |error.is_ok()| can't be true.
+ virtual void HandleError(Status error) = 0;
+};
+
+// ===== encoding/cbor_internals.h =====
+
+namespace cbor {
+enum class MajorType;
+}
+
+namespace cbor_internals {
+
+// Reads the start of a token with definitive size from |bytes|.
+// |type| is the major type as specified in RFC 7049 Section 2.1.
+// |value| is the payload (e.g. for MajorType::UNSIGNED) or is the size
+// (e.g. for BYTE_STRING).
+// If successful, returns the number of bytes read. Otherwise returns -1.
+int8_t ReadTokenStart(span<uint8_t> bytes, cbor::MajorType* type,
+ uint64_t* value);
+
+// Writes the start of a token with |type|. The |value| may indicate the size,
+// or it may be the payload if the value is an unsigned integer.
+void WriteTokenStart(cbor::MajorType type, uint64_t value,
+ std::vector<uint8_t>* encoded);
+} // namespace cbor_internals
+
+// ===== encoding/cbor.h =====
+
+
+namespace cbor {
+
+// The major types from RFC 7049 Section 2.1.
+enum class MajorType {
+ UNSIGNED = 0,
+ NEGATIVE = 1,
+ BYTE_STRING = 2,
+ STRING = 3,
+ ARRAY = 4,
+ MAP = 5,
+ TAG = 6,
+ SIMPLE_VALUE = 7
+};
+
+// Indicates the number of bits the "initial byte" needs to be shifted to the
+// right after applying |kMajorTypeMask| to produce the major type in the
+// lowermost bits.
+static constexpr uint8_t kMajorTypeBitShift = 5u;
+// Mask selecting the low-order 5 bits of the "initial byte", which is where
+// the additional information is encoded.
+static constexpr uint8_t kAdditionalInformationMask = 0x1f;
+// Mask selecting the high-order 3 bits of the "initial byte", which indicates
+// the major type of the encoded value.
+static constexpr uint8_t kMajorTypeMask = 0xe0;
+// Indicates the integer is in the following byte.
+static constexpr uint8_t kAdditionalInformation1Byte = 24u;
+// Indicates the integer is in the next 2 bytes.
+static constexpr uint8_t kAdditionalInformation2Bytes = 25u;
+// Indicates the integer is in the next 4 bytes.
+static constexpr uint8_t kAdditionalInformation4Bytes = 26u;
+// Indicates the integer is in the next 8 bytes.
+static constexpr uint8_t kAdditionalInformation8Bytes = 27u;
+
+// Encodes the initial byte, consisting of the |type| in the first 3 bits
+// followed by 5 bits of |additional_info|.
+constexpr uint8_t EncodeInitialByte(MajorType type, uint8_t additional_info) {
+ return (static_cast<uint8_t>(type) << kMajorTypeBitShift) |
+ (additional_info & kAdditionalInformationMask);
+}
+
+// TAG 24 indicates that what follows is a byte string which is
+// encoded in CBOR format. We use this as a wrapper for
+// maps and arrays, allowing us to skip them, because the
+// byte string carries its size (byte length).
+// https://tools.ietf.org/html/rfc7049#section-2.4.4.1
+static constexpr uint8_t kInitialByteForEnvelope =
+ EncodeInitialByte(MajorType::TAG, 24);
+// The initial byte for a byte string with at most 2^32 bytes
+// of payload. This is used for envelope encoding, even if
+// the byte string is shorter.
+static constexpr uint8_t kInitialByteFor32BitLengthByteString =
+ EncodeInitialByte(MajorType::BYTE_STRING, 26);
+
+// See RFC 7049 Section 2.2.1, indefinite length arrays / maps have additional
+// info = 31.
+static constexpr uint8_t kInitialByteIndefiniteLengthArray =
+ EncodeInitialByte(MajorType::ARRAY, 31);
+static constexpr uint8_t kInitialByteIndefiniteLengthMap =
+ EncodeInitialByte(MajorType::MAP, 31);
+// See RFC 7049 Section 2.3, Table 1; this is used for finishing indefinite
+// length maps / arrays.
+static constexpr uint8_t kStopByte =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 31);
+
+} // namespace cbor
+
+// The binary encoding for the inspector protocol follows the CBOR specification
+// (RFC 7049). Additional constraints:
+// - Only indefinite length maps and arrays are supported.
+// - Maps and arrays are wrapped with an envelope, that is, a
+// CBOR tag with value 24 followed by a byte string specifying
+// the byte length of the enclosed map / array. The byte string
+// must use a 32 bit wide length.
+// - At the top level, a message must be an indefinite length map
+// wrapped by an envelope.
+// - Maximal size for messages is 2^32 (4 GB).
+// - For scalars, we support only the int32_t range, encoded as
+// UNSIGNED/NEGATIVE (major types 0 / 1).
+// - UTF16 strings, including with unbalanced surrogate pairs, are encoded
+// as CBOR BYTE_STRING (major type 2). For such strings, the number of
+// bytes encoded must be even.
+// - UTF8 strings (major type 3) are supported.
+// - 7 bit US-ASCII strings must always be encoded as UTF8 strings, not
+// as UTF16 strings.
+// - Arbitrary byte arrays, in the inspector protocol called 'binary',
+// are encoded as BYTE_STRING (major type 2), prefixed with a byte
+// indicating base64 when rendered as JSON.
+
+// Encodes |value| as |UNSIGNED| (major type 0) iff >= 0, or |NEGATIVE|
+// (major type 1) iff < 0.
+void EncodeInt32(int32_t value, std::vector<uint8_t>* out);
+
+// Encodes a UTF16 string as a BYTE_STRING (major type 2). Each utf16
+// character in |in| is emitted with most significant byte first,
+// appending to |out|.
+void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out);
+
+// Encodes a UTF8 string |in| as STRING (major type 3).
+void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out);
+
+// Encodes the given |latin1| string as STRING8.
+// If any non-ASCII character is present, it will be represented
+// as a 2 byte UTF8 sequence.
+void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out);
+
+// Encodes the given |utf16| string as STRING8 if it's entirely US-ASCII.
+// Otherwise, encodes as STRING16.
+void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out);
+
+// Encodes arbitrary binary data in |in| as a BYTE_STRING (major type 2) with
+// definitive length, prefixed with tag 22 indicating expected conversion to
+// base64 (see RFC 7049, Table 3 and Section 2.4.4.2).
+void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out);
+
+// Encodes / decodes a double as Major type 7 (SIMPLE_VALUE),
+// with additional info = 27, followed by 8 bytes in big endian.
+void EncodeDouble(double value, std::vector<uint8_t>* out);
+
+// Some constants for CBOR tokens that only take a single byte on the wire.
+uint8_t EncodeTrue();
+uint8_t EncodeFalse();
+uint8_t EncodeNull();
+uint8_t EncodeIndefiniteLengthArrayStart();
+uint8_t EncodeIndefiniteLengthMapStart();
+uint8_t EncodeStop();
+
+// An envelope indicates the byte length of a wrapped item.
+// We use this for maps and array, which allows the decoder
+// to skip such (nested) values whole sale.
+// It's implemented as a CBOR tag (major type 6) with additional
+// info = 24, followed by a byte string with a 32 bit length value;
+// so the maximal structure that we can wrap is 2^32 bits long.
+// See also: https://tools.ietf.org/html/rfc7049#section-2.4.4.1
+class EnvelopeEncoder {
+ public:
+ // Emits the envelope start bytes and records the position for the
+ // byte size in |byte_size_pos_|. Also emits empty bytes for the
+ // byte sisze so that encoding can continue.
+ void EncodeStart(std::vector<uint8_t>* out);
+ // This records the current size in |out| at position byte_size_pos_.
+ // Returns true iff successful.
+ bool EncodeStop(std::vector<uint8_t>* out);
+
+ private:
+ std::size_t byte_size_pos_ = 0;
+};
+
+// This can be used to convert from JSON to CBOR, by passing the
+// return value to the routines in json_parser.h. The handler will encode into
+// |out|, and iff an error occurs it will set |status| to an error and clear
+// |out|. Otherwise, |status.ok()| will be |true|.
+std::unique_ptr<JSONParserHandler> NewJSONToCBOREncoder(
+ std::vector<uint8_t>* out, Status* status);
+
+// Parses a CBOR encoded message from |bytes|, sending JSON events to
+// |json_out|. If an error occurs, sends |out->HandleError|, and parsing stops.
+// The client is responsible for discarding the already received information in
+// that case.
+void ParseCBOR(span<uint8_t> bytes, JSONParserHandler* json_out);
+
+// Tags for the tokens within a CBOR message that CBORStream understands.
+// Note that this is not the same terminology as the CBOR spec (RFC 7049),
+// but rather, our adaptation. For instance, we lump unsigned and signed
+// major type into INT32 here (and disallow values outside the int32_t range).
+enum class CBORTokenTag {
+ // Encountered an error in the structure of the message. Consult
+ // status() for details.
+ ERROR_VALUE,
+ // Booleans and NULL.
+ TRUE_VALUE,
+ FALSE_VALUE,
+ NULL_VALUE,
+ // An int32_t (signed 32 bit integer).
+ INT32,
+ // A double (64 bit floating point).
+ DOUBLE,
+ // A UTF8 string.
+ STRING8,
+ // A UTF16 string.
+ STRING16,
+ // A binary string.
+ BINARY,
+ // Starts an indefinite length map; after the map start we expect
+ // alternating keys and values, followed by STOP.
+ MAP_START,
+ // Starts an indefinite length array; after the array start we
+ // expect values, followed by STOP.
+ ARRAY_START,
+ // Ends a map or an array.
+ STOP,
+ // An envelope indicator, wrapping a map or array.
+ // Internally this carries the byte length of the wrapped
+ // map or array. While CBORTokenizer::Next() will read / skip the entire
+ // envelope, CBORTokenizer::EnterEnvelope() reads the tokens
+ // inside of it.
+ ENVELOPE,
+ // We've reached the end there is nothing else to read.
+ DONE,
+};
+
+// CBORTokenizer segments a CBOR message, presenting the tokens therein as
+// numbers, strings, etc. This is not a complete CBOR parser, but makes it much
+// easier to implement one (e.g. ParseCBOR, above). It can also be used to parse
+// messages partially.
+class CBORTokenizer {
+ public:
+ explicit CBORTokenizer(span<uint8_t> bytes);
+ ~CBORTokenizer();
+
+ // Identifies the current token that we're looking at,
+ // or ERROR_VALUE (in which ase ::Status() has details)
+ // or DONE (if we're past the last token).
+ CBORTokenTag TokenTag() const;
+
+ // Advances to the next token.
+ void Next();
+ // Can only be called if TokenTag() == CBORTokenTag::ENVELOPE.
+ // While Next() would skip past the entire envelope / what it's
+ // wrapping, EnterEnvelope positions the cursor inside of the envelope,
+ // letting the client explore the nested structure.
+ void EnterEnvelope();
+
+ // If TokenTag() is CBORTokenTag::ERROR_VALUE, then Status().error describes
+ // the error more precisely; otherwise it'll be set to Error::OK.
+ // In either case, Status().pos is the current position.
+ struct Status Status() const;
+
+ // The following methods retrieve the token values. They can only
+ // be called if TokenTag() matches.
+
+ // To be called only if ::TokenTag() == CBORTokenTag::INT32.
+ int32_t GetInt32() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::DOUBLE.
+ double GetDouble() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::STRING8.
+ span<uint8_t> GetString8() const;
+
+ // Wire representation for STRING16 is low byte first (little endian).
+ // To be called only if ::TokenTag() == CBORTokenTag::STRING16.
+ span<uint8_t> GetString16WireRep() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::BINARY.
+ span<uint8_t> GetBinary() const;
+
+ private:
+ void ReadNextToken(bool enter_envelope);
+ void SetToken(CBORTokenTag token, std::ptrdiff_t token_byte_length);
+ void SetError(Error error);
+
+ span<uint8_t> bytes_;
+ CBORTokenTag token_tag_;
+ struct Status status_;
+ std::ptrdiff_t token_byte_length_;
+ cbor::MajorType token_start_type_;
+ uint64_t token_start_internal_value_;
+};
+
+void DumpCBOR(span<uint8_t> cbor);
+
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+#endif // !defined({{"_".join(config.protocol.namespace)}}_CBOR_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index 4b1b2b8148..11843f4330 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -1,3 +1,5 @@
+// This file is generated by DispatcherBase_cpp.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -68,7 +70,7 @@ DispatcherBase::WeakPtr::~WeakPtr()
m_dispatcher->m_weakPtrs.erase(this);
}
-DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const String& message)
+DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const ProtocolMessage& message)
: m_backendImpl(std::move(backendImpl))
, m_callId(callId)
, m_method(method)
@@ -140,18 +142,14 @@ public:
return std::unique_ptr<ProtocolError>(new ProtocolError(code, errorMessage));
}
- String serialize() override
+ String serializeToJSON() override
{
- std::unique_ptr<protocol::DictionaryValue> error = DictionaryValue::create();
- error->setInteger("code", m_code);
- error->setString("message", m_errorMessage);
- if (m_data.length())
- error->setString("data", m_data);
- std::unique_ptr<protocol::DictionaryValue> message = DictionaryValue::create();
- message->setObject("error", std::move(error));
- if (m_hasCallId)
- message->setInteger("id", m_callId);
- return message->serialize();
+ return serialize()->serializeToJSON();
+ }
+
+ std::vector<uint8_t> serializeToBinary() override
+ {
+ return serialize()->serializeToBinary();
}
~ProtocolError() override {}
@@ -163,6 +161,19 @@ private:
{
}
+ std::unique_ptr<DictionaryValue> serialize() {
+ std::unique_ptr<protocol::DictionaryValue> error = DictionaryValue::create();
+ error->setInteger("code", m_code);
+ error->setString("message", m_errorMessage);
+ if (m_data.length())
+ error->setString("data", m_data);
+ std::unique_ptr<protocol::DictionaryValue> message = DictionaryValue::create();
+ message->setObject("error", std::move(error));
+ if (m_hasCallId)
+ message->setInteger("id", m_callId);
+ return message;
+ }
+
DispatchResponse::ErrorCode m_code;
String m_errorMessage;
String m_data;
@@ -273,7 +284,7 @@ bool UberDispatcher::canDispatch(const String& in_method)
return !!findDispatcher(method);
}
-void UberDispatcher::dispatch(int callId, const String& in_method, std::unique_ptr<Value> parsedMessage, const String& rawMessage)
+void UberDispatcher::dispatch(int callId, const String& in_method, std::unique_ptr<Value> parsedMessage, const ProtocolMessage& rawMessage)
{
String method = in_method;
auto redirectIt = m_redirects.find(method);
@@ -302,18 +313,32 @@ std::unique_ptr<InternalResponse> InternalResponse::createNotification(const Str
return std::unique_ptr<InternalResponse>(new InternalResponse(0, notification, std::move(params)));
}
-String InternalResponse::serialize()
+String InternalResponse::serializeToJSON()
+{
+ std::unique_ptr<DictionaryValue> result = DictionaryValue::create();
+ std::unique_ptr<Serializable> params(m_params ? std::move(m_params) : DictionaryValue::create());
+ if (m_notification.length()) {
+ result->setString("method", m_notification);
+ result->setValue("params", SerializedValue::fromJSON(params->serializeToJSON()));
+ } else {
+ result->setInteger("id", m_callId);
+ result->setValue("result", SerializedValue::fromJSON(params->serializeToJSON()));
+ }
+ return result->serializeToJSON();
+}
+
+std::vector<uint8_t> InternalResponse::serializeToBinary()
{
std::unique_ptr<DictionaryValue> result = DictionaryValue::create();
std::unique_ptr<Serializable> params(m_params ? std::move(m_params) : DictionaryValue::create());
if (m_notification.length()) {
result->setString("method", m_notification);
- result->setValue("params", SerializedValue::create(params->serialize()));
+ result->setValue("params", SerializedValue::fromBinary(params->serializeToBinary()));
} else {
result->setInteger("id", m_callId);
- result->setValue("result", SerializedValue::create(params->serialize()));
+ result->setValue("result", SerializedValue::fromBinary(params->serializeToBinary()));
}
- return result->serialize();
+ return result->serializeToBinary();
}
InternalResponse::InternalResponse(int callId, const String& notification, std::unique_ptr<Serializable> params)
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 4708e032d8..7d859c4f27 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -1,3 +1,5 @@
+// This file is generated by DispatcherBase_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -66,7 +68,7 @@ public:
class {{config.lib.export_macro}} Callback {
public:
- Callback(std::unique_ptr<WeakPtr> backendImpl, int callId, const String& method, const String& message);
+ Callback(std::unique_ptr<WeakPtr> backendImpl, int callId, const String& method, const ProtocolMessage& message);
virtual ~Callback();
void dispose();
@@ -78,14 +80,14 @@ public:
std::unique_ptr<WeakPtr> m_backendImpl;
int m_callId;
String m_method;
- String m_message;
+ ProtocolMessage m_message;
};
explicit DispatcherBase(FrontendChannel*);
virtual ~DispatcherBase();
virtual bool canDispatch(const String& method) = 0;
- virtual void dispatch(int callId, const String& method, const String& rawMessage, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
+ virtual void dispatch(int callId, const String& method, const ProtocolMessage& rawMessage, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
FrontendChannel* channel() { return m_frontendChannel; }
void sendResponse(int callId, const DispatchResponse&, std::unique_ptr<protocol::DictionaryValue> result);
@@ -109,7 +111,7 @@ public:
void setupRedirects(const std::unordered_map<String, String>&);
bool parseCommand(Value* message, int* callId, String* method);
bool canDispatch(const String& method);
- void dispatch(int callId, const String& method, std::unique_ptr<Value> message, const String& rawMessage);
+ void dispatch(int callId, const String& method, std::unique_ptr<Value> message, const ProtocolMessage& rawMessage);
FrontendChannel* channel() { return m_frontendChannel; }
virtual ~UberDispatcher();
@@ -126,7 +128,8 @@ public:
static std::unique_ptr<InternalResponse> createResponse(int callId, std::unique_ptr<Serializable> params);
static std::unique_ptr<InternalResponse> createNotification(const String& notification, std::unique_ptr<Serializable> params = nullptr);
- String serialize() override;
+ String serializeToJSON() override;
+ std::vector<uint8_t> serializeToBinary() override;
~InternalResponse() override {}
@@ -140,24 +143,36 @@ private:
class InternalRawNotification : public Serializable {
public:
- static std::unique_ptr<InternalRawNotification> create(const String& notification)
+ static std::unique_ptr<InternalRawNotification> fromJSON(String notification)
+ {
+ return std::unique_ptr<InternalRawNotification>(new InternalRawNotification(std::move(notification)));
+ }
+
+ static std::unique_ptr<InternalRawNotification> fromBinary(std::vector<uint8_t> notification)
{
- return std::unique_ptr<InternalRawNotification>(new InternalRawNotification(notification));
+ return std::unique_ptr<InternalRawNotification>(new InternalRawNotification(std::move(notification)));
}
+
~InternalRawNotification() override {}
- String serialize() override
+ String serializeToJSON() override
+ {
+ return std::move(m_jsonNotification);
+ }
+
+ std::vector<uint8_t> serializeToBinary() override
{
- return m_notification;
+ return std::move(m_binaryNotification);
}
private:
- explicit InternalRawNotification(const String& notification)
- : m_notification(notification)
- {
- }
+ explicit InternalRawNotification(String notification)
+ : m_jsonNotification(std::move(notification)) { }
+ explicit InternalRawNotification(std::vector<uint8_t> notification)
+ : m_binaryNotification(std::move(notification)) { }
- String m_notification;
+ String m_jsonNotification;
+ std::vector<uint8_t> m_binaryNotification;
};
{% for namespace in config.protocol.namespace %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
index 7b858b8dc4..a5c2a79bbd 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
@@ -1,3 +1,5 @@
+// This file is generated by ErrorSupport_cpp.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
index 11934c3abe..f317a3cfb4 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
@@ -1,3 +1,5 @@
+// This file is generated by ErrorSupport_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
index ac792e0837..ff5e685863 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Forward_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
index 4fba5be314..df104debad 100644
--- a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
@@ -1,3 +1,5 @@
+// This file is generated by FrontendChannel_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,7 +13,14 @@ namespace {{namespace}} {
class {{config.lib.export_macro}} Serializable {
public:
- virtual String serialize() = 0;
+ ProtocolMessage serialize(bool binary) {
+ if (binary)
+ return StringUtil::binaryToMessage(serializeToBinary());
+ else
+ return StringUtil::jsonToMessage(serializeToJSON());
+ }
+ virtual String serializeToJSON() = 0;
+ virtual std::vector<uint8_t> serializeToBinary() = 0;
virtual ~Serializable() = default;
};
@@ -20,7 +29,7 @@ public:
virtual ~FrontendChannel() { }
virtual void sendProtocolResponse(int callId, std::unique_ptr<Serializable> message) = 0;
virtual void sendProtocolNotification(std::unique_ptr<Serializable> message) = 0;
- virtual void fallThrough(int callId, const String& method, const String& message) = 0;
+ virtual void fallThrough(int callId, const String& method, const ProtocolMessage& message) = 0;
virtual void flushProtocolNotifications() = 0;
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
index 15626ab350..22cfac6b24 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Maybe_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -83,7 +85,7 @@ protected:
template<>
class Maybe<bool> : public MaybeBase<bool> {
public:
- Maybe() { }
+ Maybe() { m_value = false; }
Maybe(bool value) : MaybeBase(value) { }
Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
@@ -92,7 +94,7 @@ public:
template<>
class Maybe<int> : public MaybeBase<int> {
public:
- Maybe() { }
+ Maybe() { m_value = 0; }
Maybe(int value) : MaybeBase(value) { }
Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
@@ -101,7 +103,7 @@ public:
template<>
class Maybe<double> : public MaybeBase<double> {
public:
- Maybe() { }
+ Maybe() { m_value = 0; }
Maybe(double value) : MaybeBase(value) { }
Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
index 91723a71e2..1640a11127 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
@@ -1,3 +1,5 @@
+// This file is generated by Object_cpp.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_h.template b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
index 9efed8e033..ec953d0d48 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Object_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Object_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
index ea27652096..ea7ecc5a1a 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
@@ -1,3 +1,5 @@
+// This file is generated by Parser_cpp.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_h.template b/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
index 8397d3f5d6..1832c2e972 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Parser_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template
index 1167ed400a..88303a27ab 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template
@@ -1,4 +1,4 @@
-// This file is generated.
+// This file is generated by Protocol_cpp.template.
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
index 6549e5ab39..2ee5b72454 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
@@ -1,3 +1,5 @@
+// This file is generated by ValueConversions_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -103,12 +105,17 @@ template<>
struct ValueConversions<Binary> {
static Binary fromValue(protocol::Value* value, ErrorSupport* errors)
{
- String result;
- bool success = value ? value->asString(&result) : false;
- if (!success) {
- errors->addError("string value expected");
+ if (!value ||
+ (value->type() != Value::TypeBinary && value->type() != Value::TypeString)) {
+ errors->addError("Either string base64 or binary value expected");
return Binary();
}
+ Binary binary;
+ if (value->asBinary(&binary))
+ return binary;
+ String result;
+ value->asString(&result);
+ bool success;
Binary out = Binary::fromBase64(result, &success);
if (!success)
errors->addError("base64 decoding error");
@@ -117,7 +124,7 @@ struct ValueConversions<Binary> {
static std::unique_ptr<protocol::Value> toValue(const Binary& value)
{
- return StringValue::create(value.toBase64());
+ return BinaryValue::create(value);
}
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
index b9f061346b..bf31babf94 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -1,3 +1,5 @@
+// This file is generated by Values_cpp.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -58,8 +60,168 @@ void escapeStringForJSONInternal(const Char* str, unsigned len,
}
}
+// When parsing CBOR, we limit recursion depth for objects and arrays
+// to this constant.
+static constexpr int kStackLimitValues = 1000;
+
+// Below are three parsing routines for CBOR, which cover enough
+// to roundtrip JSON messages.
+std::unique_ptr<DictionaryValue> parseMap(int32_t stack_depth, CBORTokenizer* tokenizer);
+std::unique_ptr<ListValue> parseArray(int32_t stack_depth, CBORTokenizer* tokenizer);
+std::unique_ptr<Value> parseValue(int32_t stack_depth, CBORTokenizer* tokenizer);
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+std::unique_ptr<ListValue> parseArray(int32_t stack_depth, CBORTokenizer* tokenizer) {
+ DCHECK(tokenizer->TokenTag() == CBORTokenTag::ARRAY_START);
+ tokenizer->Next();
+ auto list = ListValue::create();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ // Error::CBOR_UNEXPECTED_EOF_IN_ARRAY
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) return nullptr;
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) return nullptr;
+ // Parse value.
+ auto value = parseValue(stack_depth, tokenizer);
+ if (!value) return nullptr;
+ list->pushValue(std::move(value));
+ }
+ tokenizer->Next();
+ return list;
+}
+
+std::unique_ptr<Value> parseValue(
+ int32_t stack_depth, CBORTokenizer* tokenizer) {
+ // Error::CBOR_STACK_LIMIT_EXCEEDED
+ if (stack_depth > kStackLimitValues) return nullptr;
+ // Skip past the envelope to get to what's inside.
+ if (tokenizer->TokenTag() == CBORTokenTag::ENVELOPE)
+ tokenizer->EnterEnvelope();
+ switch (tokenizer->TokenTag()) {
+ case CBORTokenTag::ERROR_VALUE:
+ return nullptr;
+ case CBORTokenTag::DONE:
+ // Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE
+ return nullptr;
+ case CBORTokenTag::TRUE_VALUE: {
+ std::unique_ptr<Value> value = FundamentalValue::create(true);
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::FALSE_VALUE: {
+ std::unique_ptr<Value> value = FundamentalValue::create(false);
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::NULL_VALUE: {
+ std::unique_ptr<Value> value = FundamentalValue::null();
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::INT32: {
+ std::unique_ptr<Value> value = FundamentalValue::create(tokenizer->GetInt32());
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::DOUBLE: {
+ std::unique_ptr<Value> value = FundamentalValue::create(tokenizer->GetDouble());
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::STRING8: {
+ span<uint8_t> str = tokenizer->GetString8();
+ std::unique_ptr<Value> value =
+ StringValue::create(StringUtil::fromUTF8(str.data(), str.size()));
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::STRING16: {
+ span<uint8_t> wire = tokenizer->GetString16WireRep();
+ DCHECK_EQ(wire.size() & 1, 0);
+ std::unique_ptr<Value> value = StringValue::create(StringUtil::fromUTF16(
+ reinterpret_cast<const uint16_t*>(wire.data()), wire.size() / 2));
+ tokenizer->Next();
+ return value;
+ }
+ case CBORTokenTag::BINARY: {
+ span<uint8_t> payload = tokenizer->GetBinary();
+ tokenizer->Next();
+ return BinaryValue::create(Binary::fromSpan(payload.data(), payload.size()));
+ }
+ case CBORTokenTag::MAP_START:
+ return parseMap(stack_depth + 1, tokenizer);
+ case CBORTokenTag::ARRAY_START:
+ return parseArray(stack_depth + 1, tokenizer);
+ default:
+ // Error::CBOR_UNSUPPORTED_VALUE
+ return nullptr;
+ }
+}
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+std::unique_ptr<DictionaryValue> parseMap(
+ int32_t stack_depth, CBORTokenizer* tokenizer) {
+ auto dict = DictionaryValue::create();
+ tokenizer->Next();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
+ // Error::CBOR_UNEXPECTED_EOF_IN_MAP
+ return nullptr;
+ }
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) return nullptr;
+ // Parse key.
+ String key;
+ if (tokenizer->TokenTag() == CBORTokenTag::STRING8) {
+ span<uint8_t> key_span = tokenizer->GetString8();
+ key = StringUtil::fromUTF8(key_span.data(), key_span.size());
+ tokenizer->Next();
+ } else if (tokenizer->TokenTag() == CBORTokenTag::STRING16) {
+ return nullptr; // STRING16 not supported yet.
+ } else {
+ // Error::CBOR_INVALID_MAP_KEY
+ return nullptr;
+ }
+ // Parse value.
+ auto value = parseValue(stack_depth, tokenizer);
+ if (!value) return nullptr;
+ dict->setValue(key, std::move(value));
+ }
+ tokenizer->Next();
+ return dict;
+}
+
} // anonymous namespace
+// static
+std::unique_ptr<Value> Value::parseBinary(const uint8_t* data, size_t size) {
+ span<uint8_t> bytes(data, size);
+
+ // Error::CBOR_NO_INPUT
+ if (bytes.empty()) return nullptr;
+
+ // Error::CBOR_INVALID_START_BYTE
+ // TODO(johannes): EncodeInitialByteForEnvelope() method.
+ if (bytes[0] != 0xd8) return nullptr;
+
+ CBORTokenizer tokenizer(bytes);
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) return nullptr;
+
+ // We checked for the envelope start byte above, so the tokenizer
+ // must agree here, since it's not an error.
+ DCHECK(tokenizer.TokenTag() == CBORTokenTag::ENVELOPE);
+ tokenizer.EnterEnvelope();
+ // Error::MAP_START_EXPECTED
+ if (tokenizer.TokenTag() != CBORTokenTag::MAP_START) return nullptr;
+ std::unique_ptr<Value> result = parseMap(/*stack_depth=*/1, &tokenizer);
+ if (!result) return nullptr;
+ if (tokenizer.TokenTag() == CBORTokenTag::DONE) return result;
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) return nullptr;
+ // Error::CBOR_TRAILING_JUNK
+ return nullptr;
+}
+
bool Value::asBoolean(bool*) const
{
return false;
@@ -80,7 +242,7 @@ bool Value::asString(String*) const
return false;
}
-bool Value::asSerialized(String*) const
+bool Value::asBinary(Binary*) const
{
return false;
}
@@ -91,12 +253,17 @@ void Value::writeJSON(StringBuilder* output) const
StringUtil::builderAppend(*output, nullValueString, 4);
}
+void Value::writeBinary(std::vector<uint8_t>* bytes) const {
+ DCHECK(m_type == TypeNull);
+ bytes->push_back(EncodeNull());
+}
+
std::unique_ptr<Value> Value::clone() const
{
return Value::null();
}
-String Value::serialize()
+String Value::toJSONString() const
{
StringBuilder result;
StringUtil::builderReserve(result, 512);
@@ -104,6 +271,16 @@ String Value::serialize()
return StringUtil::builderToString(result);
}
+String Value::serializeToJSON() {
+ return toJSONString();
+}
+
+std::vector<uint8_t> Value::serializeToBinary() {
+ std::vector<uint8_t> bytes;
+ writeBinary(&bytes);
+ return bytes;
+}
+
bool FundamentalValue::asBoolean(bool* output) const
{
if (type() != TypeBoolean)
@@ -152,6 +329,22 @@ void FundamentalValue::writeJSON(StringBuilder* output) const
}
}
+void FundamentalValue::writeBinary(std::vector<uint8_t>* bytes) const {
+ switch (type()) {
+ case TypeDouble:
+ EncodeDouble(m_doubleValue, bytes);
+ return;
+ case TypeInteger:
+ EncodeInt32(m_integerValue, bytes);
+ return;
+ case TypeBoolean:
+ bytes->push_back(m_boolValue ? EncodeTrue() : EncodeFalse());
+ return;
+ default:
+ DCHECK(false);
+ }
+}
+
std::unique_ptr<Value> FundamentalValue::clone() const
{
switch (type()) {
@@ -176,26 +369,80 @@ void StringValue::writeJSON(StringBuilder* output) const
StringUtil::builderAppendQuotedString(*output, m_stringValue);
}
+namespace {
+// This routine distinguishes between the current encoding for a given
+// string |s|, and calls encoding routines that will
+// - Ensure that all ASCII strings end up being encoded as UTF8 in
+// the wire format - e.g., EncodeFromUTF16 will detect ASCII and
+// do the (trivial) transcode to STRING8 on the wire, but if it's
+// not ASCII it'll do STRING16.
+// - Select a format that's cheap to convert to. E.g., we don't
+// have LATIN1 on the wire, so we call EncodeFromLatin1 which
+// transcodes to UTF8 if needed.
+void EncodeString(const String& s, std::vector<uint8_t>* out) {
+ if (StringUtil::CharactersLatin1(s)) {
+ EncodeFromLatin1(span<uint8_t>(StringUtil::CharactersLatin1(s),
+ StringUtil::CharacterCount(s)),
+ out);
+ } else if (StringUtil::CharactersUTF16(s)) {
+ EncodeFromUTF16(span<uint16_t>(StringUtil::CharactersUTF16(s),
+ StringUtil::CharacterCount(s)),
+ out);
+ } else if (StringUtil::CharactersUTF8(s)) {
+ EncodeString8(span<uint8_t>(StringUtil::CharactersUTF8(s),
+ StringUtil::CharacterCount(s)),
+ out);
+ } else {
+ EncodeString8(span<uint8_t>(nullptr, 0), out); // Empty string.
+ }
+}
+} // namespace
+
+void StringValue::writeBinary(std::vector<uint8_t>* bytes) const {
+ EncodeString(m_stringValue, bytes);
+}
+
std::unique_ptr<Value> StringValue::clone() const
{
return StringValue::create(m_stringValue);
}
-bool SerializedValue::asSerialized(String* output) const
+bool BinaryValue::asBinary(Binary* output) const
{
- *output = m_serializedValue;
+ *output = m_binaryValue;
return true;
}
+void BinaryValue::writeJSON(StringBuilder* output) const
+{
+ DCHECK(type() == TypeBinary);
+ StringUtil::builderAppendQuotedString(*output, m_binaryValue.toBase64());
+}
+
+void BinaryValue::writeBinary(std::vector<uint8_t>* bytes) const {
+ EncodeBinary(span<uint8_t>(m_binaryValue.data(), m_binaryValue.size()), bytes);
+}
+
+std::unique_ptr<Value> BinaryValue::clone() const
+{
+ return BinaryValue::create(m_binaryValue);
+}
+
void SerializedValue::writeJSON(StringBuilder* output) const
{
DCHECK(type() == TypeSerialized);
- StringUtil::builderAppend(*output, m_serializedValue);
+ StringUtil::builderAppend(*output, m_serializedJSON);
+}
+
+void SerializedValue::writeBinary(std::vector<uint8_t>* output) const
+{
+ DCHECK(type() == TypeSerialized);
+ output->insert(output->end(), m_serializedBinary.begin(), m_serializedBinary.end());
}
std::unique_ptr<Value> SerializedValue::clone() const
{
- return SerializedValue::create(m_serializedValue);
+ return std::unique_ptr<SerializedValue>(new SerializedValue(m_serializedJSON, m_serializedBinary));
}
DictionaryValue::~DictionaryValue()
@@ -335,6 +582,21 @@ void DictionaryValue::writeJSON(StringBuilder* output) const
StringUtil::builderAppend(*output, '}');
}
+void DictionaryValue::writeBinary(std::vector<uint8_t>* bytes) const {
+ EnvelopeEncoder encoder;
+ encoder.EncodeStart(bytes);
+ bytes->push_back(EncodeIndefiniteLengthMapStart());
+ for (size_t i = 0; i < m_order.size(); ++i) {
+ const String& key = m_order[i];
+ Dictionary::const_iterator value = m_data.find(key);
+ DCHECK(value != m_data.cend() && value->second);
+ EncodeString(key, bytes);
+ value->second->writeBinary(bytes);
+ }
+ bytes->push_back(EncodeStop());
+ encoder.EncodeStop(bytes);
+}
+
std::unique_ptr<Value> DictionaryValue::clone() const
{
std::unique_ptr<DictionaryValue> result = DictionaryValue::create();
@@ -369,6 +631,17 @@ void ListValue::writeJSON(StringBuilder* output) const
StringUtil::builderAppend(*output, ']');
}
+void ListValue::writeBinary(std::vector<uint8_t>* bytes) const {
+ EnvelopeEncoder encoder;
+ encoder.EncodeStart(bytes);
+ bytes->push_back(EncodeIndefiniteLengthArrayStart());
+ for (size_t i = 0; i < m_data.size(); ++i) {
+ m_data[i]->writeBinary(bytes);
+ }
+ bytes->push_back(EncodeStop());
+ encoder.EncodeStop(bytes);
+}
+
std::unique_ptr<Value> ListValue::clone() const
{
std::unique_ptr<ListValue> result = ListValue::create();
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
index e8eb91d85a..4a2e58f4cd 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -1,3 +1,5 @@
+// This file is generated by Values_h.template.
+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -26,15 +28,19 @@ public:
return std::unique_ptr<Value>(new Value());
}
+ static std::unique_ptr<Value> parseBinary(const uint8_t* data, size_t size);
+
enum ValueType {
TypeNull = 0,
TypeBoolean,
TypeInteger,
TypeDouble,
TypeString,
+ TypeBinary,
TypeObject,
TypeArray,
- TypeSerialized
+ TypeSerialized,
+ TypeImported
};
ValueType type() const { return m_type; }
@@ -45,11 +51,14 @@ public:
virtual bool asDouble(double* output) const;
virtual bool asInteger(int* output) const;
virtual bool asString(String* output) const;
- virtual bool asSerialized(String* output) const;
+ virtual bool asBinary(Binary* output) const;
virtual void writeJSON(StringBuilder* output) const;
+ virtual void writeBinary(std::vector<uint8_t>* bytes) const;
virtual std::unique_ptr<Value> clone() const;
- String serialize() override;
+ String toJSONString() const;
+ String serializeToJSON() override;
+ std::vector<uint8_t> serializeToBinary() override;
protected:
Value() : m_type(TypeNull) { }
@@ -83,6 +92,7 @@ public:
bool asDouble(double* output) const override;
bool asInteger(int* output) const override;
void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
std::unique_ptr<Value> clone() const override;
private:
@@ -111,6 +121,7 @@ public:
bool asString(String* output) const override;
void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
std::unique_ptr<Value> clone() const override;
private:
@@ -120,21 +131,47 @@ private:
String m_stringValue;
};
+class {{config.lib.export_macro}} BinaryValue : public Value {
+public:
+ static std::unique_ptr<BinaryValue> create(const Binary& value)
+ {
+ return std::unique_ptr<BinaryValue>(new BinaryValue(value));
+ }
+
+ bool asBinary(Binary* output) const override;
+ void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
+ std::unique_ptr<Value> clone() const override;
+
+private:
+ explicit BinaryValue(const Binary& value) : Value(TypeBinary), m_binaryValue(value) { }
+
+ Binary m_binaryValue;
+};
+
class {{config.lib.export_macro}} SerializedValue : public Value {
public:
- static std::unique_ptr<SerializedValue> create(const String& value)
+ static std::unique_ptr<SerializedValue> fromJSON(const String& value)
{
return std::unique_ptr<SerializedValue>(new SerializedValue(value));
}
- bool asSerialized(String* output) const override;
+ static std::unique_ptr<SerializedValue> fromBinary(std::vector<uint8_t> value)
+ {
+ return std::unique_ptr<SerializedValue>(new SerializedValue(std::move(value)));
+ }
+
void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
std::unique_ptr<Value> clone() const override;
private:
- explicit SerializedValue(const String& value) : Value(TypeSerialized), m_serializedValue(value) { }
-
- String m_serializedValue;
+ explicit SerializedValue(const String& json) : Value(TypeSerialized), m_serializedJSON(json) { }
+ explicit SerializedValue(std::vector<uint8_t> binary) : Value(TypeSerialized), m_serializedBinary(std::move(binary)) { }
+ SerializedValue(const String& json, const std::vector<uint8_t>& binary)
+ : Value(TypeSerialized), m_serializedJSON(json), m_serializedBinary(binary) { }
+ String m_serializedJSON;
+ std::vector<uint8_t> m_serializedBinary;
};
class {{config.lib.export_macro}} DictionaryValue : public Value {
@@ -158,6 +195,7 @@ public:
}
void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
std::unique_ptr<Value> clone() const override;
size_t size() const { return m_data.size(); }
@@ -226,6 +264,7 @@ public:
~ListValue() override;
void writeJSON(StringBuilder* output) const override;
+ void writeBinary(std::vector<uint8_t>* bytes) const override;
std::unique_ptr<Value> clone() const override;
void pushValue(std::unique_ptr<Value>);
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
new file mode 100644
index 0000000000..24855d4fb5
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -0,0 +1,311 @@
+// This file is generated by DispatcherBase_cpp.template.
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include {{format_include(config.protocol.package, "base_string_adapter")}}
+#include {{format_include(config.protocol.package, "Protocol")}}
+
+#include <utility>
+#include "base/base64.h"
+#include "base/json/json_reader.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+std::unique_ptr<protocol::Value> toProtocolValue(
+ const base::Value* value, int depth) {
+ if (!value || !depth)
+ return nullptr;
+ if (value->is_none())
+ return protocol::Value::null();
+ if (value->is_bool()) {
+ bool inner;
+ value->GetAsBoolean(&inner);
+ return protocol::FundamentalValue::create(inner);
+ }
+ if (value->is_int()) {
+ int inner;
+ value->GetAsInteger(&inner);
+ return protocol::FundamentalValue::create(inner);
+ }
+ if (value->is_double()) {
+ double inner;
+ value->GetAsDouble(&inner);
+ return protocol::FundamentalValue::create(inner);
+ }
+ if (value->is_string()) {
+ std::string inner;
+ value->GetAsString(&inner);
+ return protocol::StringValue::create(inner);
+ }
+ if (value->is_list()) {
+ const base::ListValue* list = nullptr;
+ value->GetAsList(&list);
+ std::unique_ptr<protocol::ListValue> result = protocol::ListValue::create();
+ for (size_t i = 0; i < list->GetSize(); i++) {
+ const base::Value* item = nullptr;
+ list->Get(i, &item);
+ std::unique_ptr<protocol::Value> converted =
+ toProtocolValue(item, depth - 1);
+ if (converted)
+ result->pushValue(std::move(converted));
+ }
+ return std::move(result);
+ }
+ if (value->is_dict()) {
+ const base::DictionaryValue* dictionary = nullptr;
+ value->GetAsDictionary(&dictionary);
+ std::unique_ptr<protocol::DictionaryValue> result =
+ protocol::DictionaryValue::create();
+ for (base::DictionaryValue::Iterator it(*dictionary);
+ !it.IsAtEnd(); it.Advance()) {
+ std::unique_ptr<protocol::Value> converted =
+ toProtocolValue(&it.value(), depth - 1);
+ if (converted)
+ result->setValue(it.key(), std::move(converted));
+ }
+ return std::move(result);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<base::Value> toBaseValue(Value* value, int depth) {
+ if (!value || !depth)
+ return nullptr;
+ if (value->type() == Value::TypeNull)
+ return std::make_unique<base::Value>();
+ if (value->type() == Value::TypeBoolean) {
+ bool inner;
+ value->asBoolean(&inner);
+ return base::WrapUnique(new base::Value(inner));
+ }
+ if (value->type() == Value::TypeInteger) {
+ int inner;
+ value->asInteger(&inner);
+ return base::WrapUnique(new base::Value(inner));
+ }
+ if (value->type() == Value::TypeDouble) {
+ double inner;
+ value->asDouble(&inner);
+ return base::WrapUnique(new base::Value(inner));
+ }
+ if (value->type() == Value::TypeString) {
+ std::string inner;
+ value->asString(&inner);
+ return base::WrapUnique(new base::Value(inner));
+ }
+ if (value->type() == Value::TypeArray) {
+ ListValue* list = ListValue::cast(value);
+ std::unique_ptr<base::ListValue> result(new base::ListValue());
+ for (size_t i = 0; i < list->size(); i++) {
+ std::unique_ptr<base::Value> converted =
+ toBaseValue(list->at(i), depth - 1);
+ if (converted)
+ result->Append(std::move(converted));
+ }
+ return std::move(result);
+ }
+ if (value->type() == Value::TypeObject) {
+ DictionaryValue* dict = DictionaryValue::cast(value);
+ std::unique_ptr<base::DictionaryValue> result(new base::DictionaryValue());
+ for (size_t i = 0; i < dict->size(); i++) {
+ DictionaryValue::Entry entry = dict->at(i);
+ std::unique_ptr<base::Value> converted =
+ toBaseValue(entry.second, depth - 1);
+ if (converted)
+ result->SetWithoutPathExpansion(entry.first, std::move(converted));
+ }
+ return std::move(result);
+ }
+ return nullptr;
+}
+
+// static
+std::unique_ptr<Value> StringUtil::parseMessage(
+ const std::string& message, bool binary) {
+ if (binary) {
+ return Value::parseBinary(
+ reinterpret_cast<const uint8_t*>(message.data()),
+ message.length());
+ }
+ std::unique_ptr<base::Value> value = base::JSONReader::ReadDeprecated(message);
+ return toProtocolValue(value.get(), 1000);
+}
+
+// static
+ProtocolMessage StringUtil::jsonToMessage(String message) {
+ return message;
+}
+
+// static
+ProtocolMessage StringUtil::binaryToMessage(std::vector<uint8_t> message) {
+ // TODO(pfeldman): figure out what to do with this copy.
+ return std::string(reinterpret_cast<const char*>(message.data()), message.size());
+}
+
+StringBuilder::StringBuilder() {}
+
+StringBuilder::~StringBuilder() {}
+
+void StringBuilder::append(const std::string& s) {
+ string_ += s;
+}
+
+void StringBuilder::append(char c) {
+ string_ += c;
+}
+
+void StringBuilder::append(const char* characters, size_t length) {
+ string_.append(characters, length);
+}
+
+// static
+void StringUtil::builderAppendQuotedString(StringBuilder& builder,
+ const String& str) {
+ builder.append('"');
+ base::string16 str16 = base::UTF8ToUTF16(str);
+ escapeWideStringForJSON(reinterpret_cast<const uint16_t*>(&str16[0]),
+ str16.length(), &builder);
+ builder.append('"');
+}
+
+std::string StringBuilder::toString() {
+ return string_;
+}
+
+void StringBuilder::reserveCapacity(size_t capacity) {
+ string_.reserve(capacity);
+}
+
+// static
+String StringUtil::fromUTF16(const uint16_t* data, size_t length) {
+ std::string utf8;
+ base::UTF16ToUTF8(reinterpret_cast<const base::char16*>(data), length, &utf8);
+ return utf8;
+}
+
+Binary::Binary() : bytes_(new base::RefCountedBytes) {}
+Binary::Binary(const Binary& binary) : bytes_(binary.bytes_) {}
+Binary::Binary(scoped_refptr<base::RefCountedMemory> bytes) : bytes_(bytes) {}
+Binary::~Binary() {}
+
+String Binary::toBase64() const {
+ std::string encoded;
+ base::Base64Encode(
+ base::StringPiece(reinterpret_cast<const char*>(bytes_->front()),
+ bytes_->size()),
+ &encoded);
+ return encoded;
+}
+
+// static
+Binary Binary::fromBase64(const String& base64, bool* success) {
+ std::string decoded;
+ *success = base::Base64Decode(base::StringPiece(base64), &decoded);
+ if (*success) {
+ return Binary::fromString(std::move(decoded));
+ }
+ return Binary();
+}
+
+// static
+Binary Binary::fromRefCounted(scoped_refptr<base::RefCountedMemory> memory) {
+ return Binary(memory);
+}
+
+// static
+Binary Binary::fromVector(std::vector<uint8_t> data) {
+ return Binary(base::RefCountedBytes::TakeVector(&data));
+}
+
+// static
+Binary Binary::fromString(std::string data) {
+ return Binary(base::RefCountedString::TakeString(&data));
+}
+
+// static
+Binary Binary::fromSpan(const uint8_t* data, size_t size) {
+ return Binary(scoped_refptr<base::RefCountedBytes>(
+ new base::RefCountedBytes(data, size)));
+}
+
+namespace {
+int32_t ReadEnvelopeSize(const uint8_t* in) {
+ return (in[0] << 24) + (in[1] << 16) + (in[2] << 8) + in[3];
+}
+
+void WriteEnvelopeSize(uint32_t value, uint8_t* out) {
+ *(out++) = (value >> 24) & 0xFF;
+ *(out++) = (value >> 16) & 0xFF;
+ *(out++) = (value >> 8) & 0xFF;
+ *(out++) = (value) & 0xFF;
+}
+
+}
+
+bool AppendStringValueToMapBinary(base::StringPiece in,
+ base::StringPiece key, base::StringPiece value, std::string* out) {
+ if (in.size() < 1 + 1 + 4 + 1 + 1)
+ return false;
+ const uint8_t* envelope = reinterpret_cast<const uint8_t*>(in.data());
+ if (cbor::kInitialByteForEnvelope != envelope[0])
+ return false;
+ if (cbor::kInitialByteFor32BitLengthByteString != envelope[1])
+ return false;
+ if (cbor::kInitialByteIndefiniteLengthMap != envelope[6])
+ return false;
+
+ uint32_t envelope_size = ReadEnvelopeSize(envelope + 2);
+ if (envelope_size + 2 + 4 != in.size())
+ return false;
+ if (cbor::kStopByte != static_cast<uint8_t>(*in.rbegin()))
+ return false;
+
+ std::vector<uint8_t> encoded_entry;
+ encoded_entry.reserve(1 + 4 + key.size() + 1 + 4 + value.size());
+ span<uint8_t> key_span(
+ reinterpret_cast<const uint8_t*>(key.data()), key.size());
+ EncodeString8(key_span, &encoded_entry);
+ span<uint8_t> value_span(
+ reinterpret_cast<const uint8_t*>(value.data()), value.size());
+ EncodeString8(value_span, &encoded_entry);
+
+ out->clear();
+ out->reserve(in.size() + encoded_entry.size());
+ out->append(in.begin(), in.end() - 1);
+ out->append(reinterpret_cast<const char*>(encoded_entry.data()),
+ encoded_entry.size());
+ out->append(1, static_cast<char>(cbor::kStopByte));
+ std::size_t new_size = envelope_size + out->size() - in.size();
+ if (new_size > static_cast<std::size_t>(
+ std::numeric_limits<uint32_t>::max())) {
+ return false;
+ }
+ WriteEnvelopeSize(new_size, reinterpret_cast<uint8_t*>(&*out->begin() + 2));
+ return true;
+}
+
+bool AppendStringValueToMapJSON(base::StringPiece in,
+ base::StringPiece key, base::StringPiece value, std::string* out) {
+ if (!in.length() || *in.rbegin() != '}')
+ return false;
+ std::string suffix =
+ base::StringPrintf(", \"%s\": \"%s\"}", key.begin(), value.begin());
+ out->clear();
+ out->reserve(in.length() + suffix.length() - 1);
+ out->append(in.data(), in.length() - 1);
+ out->append(suffix);
+ return true;
+}
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
new file mode 100644
index 0000000000..082c7c037e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
@@ -0,0 +1,149 @@
+// This file is generated by Parser_h.template.
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H
+#define {{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/strings/string_number_conversions.h"
+{% if config.lib.export_header %}
+#include "{{config.lib.export_header}}"
+{% endif %}
+
+namespace base {
+class Value;
+}
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class Value;
+
+using String = std::string;
+using ProtocolMessage = std::string;
+
+class {{config.lib.export_macro}} StringBuilder {
+ public:
+ StringBuilder();
+ ~StringBuilder();
+ void append(const String&);
+ void append(char);
+ void append(const char*, size_t);
+ String toString();
+ void reserveCapacity(size_t);
+
+ private:
+ std::string string_;
+};
+
+class {{config.lib.export_macro}} StringUtil {
+ public:
+ static String substring(const String& s, unsigned pos, unsigned len) {
+ return s.substr(pos, len);
+ }
+ static String fromInteger(int number) { return base::NumberToString(number); }
+ static String fromDouble(double number) {
+ String s = base::NumberToString(number);
+ if (!s.empty()) { // .123 -> 0.123; -.123 -> -0.123 for valid JSON.
+ if (s[0] == '.')
+ s.insert(/*index=*/ 0, /*count=*/ 1, /*ch=*/ '0');
+ else if (s[0] == '-' && s.size() >= 2 && s[1] == '.')
+ s.insert(/*index=*/ 1, /*count=*/ 1, /*ch=*/ '0');
+ }
+ return s;
+ }
+ static double toDouble(const char* s, size_t len, bool* ok) {
+ double v = 0.0;
+ *ok = base::StringToDouble(std::string(s, len), &v);
+ return *ok ? v : 0.0;
+ }
+ static size_t find(const String& s, const char* needle) {
+ return s.find(needle);
+ }
+ static size_t find(const String& s, const String& needle) {
+ return s.find(needle);
+ }
+ static const size_t kNotFound = static_cast<size_t>(-1);
+ static void builderAppend(StringBuilder& builder, const String& s) {
+ builder.append(s);
+ }
+ static void builderAppend(StringBuilder& builder, char c) {
+ builder.append(c);
+ }
+ static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
+ builder.append(s, len);
+ }
+ static void builderAppendQuotedString(StringBuilder& builder,
+ const String& str);
+ static void builderReserve(StringBuilder& builder, unsigned capacity) {
+ builder.reserveCapacity(capacity);
+ }
+ static String builderToString(StringBuilder& builder) {
+ return builder.toString();
+ }
+
+ static std::unique_ptr<Value> parseMessage(const std::string& message, bool binary);
+ static ProtocolMessage jsonToMessage(String message);
+ static ProtocolMessage binaryToMessage(std::vector<uint8_t> message);
+
+ static String fromUTF8(const uint8_t* data, size_t length) {
+ return std::string(reinterpret_cast<const char*>(data), length);
+ }
+
+ static String fromUTF16(const uint16_t* data, size_t length);
+
+ static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
+ static const uint8_t* CharactersUTF8(const String& s) {
+ return reinterpret_cast<const uint8_t*>(s.data());
+ }
+ static const uint16_t* CharactersUTF16(const String& s) { return nullptr; }
+ static size_t CharacterCount(const String& s) { return s.size(); }
+};
+
+// A read-only sequence of uninterpreted bytes with reference-counted storage.
+class {{config.lib.export_macro}} Binary {
+ public:
+ Binary(const Binary&);
+ Binary();
+ ~Binary();
+
+ const uint8_t* data() const { return bytes_->front(); }
+ size_t size() const { return bytes_->size(); }
+ scoped_refptr<base::RefCountedMemory> bytes() const { return bytes_; }
+
+ String toBase64() const;
+
+ static Binary fromBase64(const String& base64, bool* success);
+ static Binary fromRefCounted(scoped_refptr<base::RefCountedMemory> memory);
+ static Binary fromVector(std::vector<uint8_t> data);
+ static Binary fromString(std::string data);
+ static Binary fromSpan(const uint8_t* data, size_t size);
+
+ private:
+ explicit Binary(scoped_refptr<base::RefCountedMemory> bytes);
+ scoped_refptr<base::RefCountedMemory> bytes_;
+};
+
+std::unique_ptr<Value> toProtocolValue(const base::Value* value, int depth);
+std::unique_ptr<base::Value> toBaseValue(Value* value, int depth);
+
+bool AppendStringValueToMapBinary(base::StringPiece in,
+ base::StringPiece key, base::StringPiece value, std::string* out);
+bool AppendStringValueToMapJSON(base::StringPiece in,
+ base::StringPiece key, base::StringPiece value, std::string* out);
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_BASE_STRING_ADAPTER_H)
diff --git a/deps/v8/third_party/inspector_protocol/pdl.py b/deps/v8/third_party/inspector_protocol/pdl.py
index 652e99c6db..43111e944b 100644
--- a/deps/v8/third_party/inspector_protocol/pdl.py
+++ b/deps/v8/third_party/inspector_protocol/pdl.py
@@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+from __future__ import print_function
import collections
import json
import os.path
@@ -159,7 +160,7 @@ def parse(data, file_name, map_binary_to_string=False):
enumliterals.append(trimLine)
continue
- print 'Error in %s:%s, illegal token: \t%s' % (file_name, i, line)
+ print('Error in %s:%s, illegal token: \t%s' % (file_name, i, line))
sys.exit(1)
return protocol
diff --git a/deps/v8/third_party/inspector_protocol/roll.py b/deps/v8/third_party/inspector_protocol/roll.py
new file mode 100755
index 0000000000..083c0fc9bc
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/roll.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import sys
+import os
+import subprocess
+import glob
+import shutil
+
+
+FILES_TO_SYNC = [
+ 'README.md',
+ 'check_protocol_compatibility.py',
+ 'code_generator.py',
+ 'concatenate_protocols.py',
+ 'convert_protocol_to_json.py',
+ 'inspector_protocol.gni',
+ 'inspector_protocol.gypi',
+ 'lib/*',
+ 'pdl.py',
+ 'templates/*',
+]
+
+
+def RunCmd(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ (stdoutdata, stderrdata) = p.communicate()
+ if p.returncode != 0:
+ raise Exception('%s: exit status %d', str(cmd), p.returncode)
+ return stdoutdata
+
+
+def CheckRepoIsClean(path, suffix):
+ os.chdir(path) # As a side effect this also checks for existence of the dir.
+ # If path isn't a git repo, this will throw and exception.
+ # And if it is a git repo and 'git status' has anything interesting to say,
+ # then it's not clean (uncommitted files etc.)
+ if len(RunCmd(['git', 'status', '--porcelain'])) != 0:
+ raise Exception('%s is not a clean git repo (run git status)' % path)
+ if not path.endswith(suffix):
+ raise Exception('%s does not end with /%s' % (path, suffix))
+
+
+def CheckRepoIsNotAtMasterBranch(path):
+ os.chdir(path)
+ stdout = RunCmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+ if stdout == 'master':
+ raise Exception('%s is at master branch - refusing to copy there.' % path)
+
+
+def CheckRepoIsV8Checkout(path):
+ os.chdir(path)
+ if (RunCmd(['git', 'config', '--get', 'remote.origin.url']).strip() !=
+ 'https://chromium.googlesource.com/v8/v8.git'):
+ raise Exception('%s is not a proper V8 checkout.' % path)
+
+
+def CheckRepoIsInspectorProtocolCheckout(path):
+ os.chdir(path)
+ if (RunCmd(['git', 'config', '--get', 'remote.origin.url']).strip() !=
+ 'https://chromium.googlesource.com/deps/inspector_protocol.git'):
+ raise Exception('%s is not a proper inspector_protocol checkout.' % path)
+
+
+def FindFilesToSyncIn(path):
+ files = []
+ for f in FILES_TO_SYNC:
+ files += glob.glob(os.path.join(path, f))
+ files = [os.path.relpath(f, path) for f in files]
+ return files
+
+
+def FilesAreEqual(path1, path2):
+ # We check for permissions (useful for executable scripts) and contents.
+ return (os.stat(path1).st_mode == os.stat(path2).st_mode and
+ open(path1).read() == open(path2).read())
+
+
+def GetHeadRevision(path):
+ os.chdir(path)
+ return RunCmd(['git', 'rev-parse', 'HEAD'])
+
+
+def main(argv):
+ parser = argparse.ArgumentParser(description=(
+ "Rolls the inspector_protocol project (upstream) into V8's "
+ "third_party (downstream)."))
+ parser.add_argument("--ip_src_upstream",
+ help="The inspector_protocol (upstream) tree.",
+ default="~/ip/src")
+ parser.add_argument("--v8_src_downstream",
+ help="The V8 src tree.",
+ default="~/v8/v8")
+ parser.add_argument('--reverse', dest='reverse', action='store_true',
+ help=("Whether to roll the opposite direction, from "
+ "V8 (downstream) to inspector_protocol "
+ "(upstream)."))
+ parser.set_defaults(reverse=False)
+ parser.add_argument('--force', dest='force', action='store_true',
+ help=("Whether to carry out the modifications "
+ "in the destination tree."))
+ parser.set_defaults(force=False)
+
+ args = parser.parse_args(argv)
+ upstream = os.path.normpath(os.path.expanduser(args.ip_src_upstream))
+ downstream = os.path.normpath(os.path.expanduser(
+ args.v8_src_downstream))
+ CheckRepoIsClean(upstream, '/src')
+ CheckRepoIsClean(downstream, '/v8')
+ CheckRepoIsInspectorProtocolCheckout(upstream)
+ CheckRepoIsV8Checkout(downstream)
+ # Check that the destination Git repo isn't at the master branch - it's
+ # generally a bad idea to check into the master branch, so we catch this
+ # common pilot error here early.
+ if args.reverse:
+ CheckRepoIsNotAtMasterBranch(upstream)
+ src_dir = os.path.join(downstream, 'third_party/inspector_protocol')
+ dest_dir = upstream
+ else:
+ CheckRepoIsNotAtMasterBranch(downstream)
+ src_dir = upstream
+ dest_dir = os.path.join(downstream, 'third_party/inspector_protocol')
+ print 'Rolling %s into %s ...' % (src_dir, dest_dir)
+ src_files = set(FindFilesToSyncIn(src_dir))
+ dest_files = set(FindFilesToSyncIn(dest_dir))
+ to_add = [f for f in src_files if f not in dest_files]
+ to_delete = [f for f in dest_files if f not in src_files]
+ to_copy = [f for f in src_files
+ if (f in dest_files and not FilesAreEqual(
+ os.path.join(src_dir, f), os.path.join(dest_dir, f)))]
+ print 'To add: %s' % to_add
+ print 'To delete: %s' % to_delete
+ print 'To copy: %s' % to_copy
+ if not to_add and not to_delete and not to_copy:
+ print 'Nothing to do. You\'re good.'
+ sys.exit(0)
+ if not args.force:
+ print 'Rerun with --force if you wish the modifications to be done.'
+ sys.exit(1)
+ print 'You said --force ... as you wish, modifying the destination.'
+ for f in to_add + to_copy:
+ shutil.copyfile(os.path.join(src_dir, f), os.path.join(dest_dir, f))
+ shutil.copymode(os.path.join(src_dir, f), os.path.join(dest_dir, f))
+ for f in to_delete:
+ os.unlink(os.path.join(dest_dir, f))
+ if not args.reverse:
+ head_revision = GetHeadRevision(upstream)
+ lines = open(os.path.join(dest_dir, 'README.v8')).readlines()
+ f = open(os.path.join(dest_dir, 'README.v8'), 'w')
+ for line in lines:
+ if line.startswith('Revision: '):
+ f.write('Revision: %s' % head_revision)
+ else:
+ f.write(line)
+ f.close()
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/third_party/inspector_protocol/templates/Exported_h.template b/deps/v8/third_party/inspector_protocol/templates/Exported_h.template
index 3d36ecffae..765f6c2135 100644
--- a/deps/v8/third_party/inspector_protocol/templates/Exported_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/Exported_h.template
@@ -1,4 +1,4 @@
-// This file is generated
+// This file is generated by Exported_h.template.
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -15,6 +15,17 @@
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
+
+#ifndef {{"_".join(config.protocol.namespace)}}_exported_api_h
+#define {{"_".join(config.protocol.namespace)}}_exported_api_h
+class {{config.exported.export_macro}} Exported {
+public:
+ virtual {{config.exported.string_out}} toJSONString() const = 0;
+ virtual void writeBinary(std::vector<uint8_t>* out) const = 0;
+ virtual ~Exported() { }
+};
+#endif // !defined({{"_".join(config.protocol.namespace)}}_exported_api_h)
+
namespace {{domain.domain}} {
namespace API {
@@ -48,11 +59,10 @@ namespace {{param.name | to_title_case}}Enum {
{% for type in domain.types %}
{% if not (type.type == "object") or not ("properties" in type) or not protocol.is_exported(domain.domain, type.id) %}{% continue %}{% endif %}
-class {{config.exported.export_macro}} {{type.id}} {
+class {{config.exported.export_macro}} {{type.id}} : public Exported {
public:
- virtual {{config.exported.string_out}} toJSONString() const = 0;
- virtual ~{{type.id}}() { }
static std::unique_ptr<protocol::{{domain.domain}}::API::{{type.id}}> fromJSONString(const {{config.exported.string_in}}& json);
+ static std::unique_ptr<protocol::{{domain.domain}}::API::{{type.id}}> fromBinary(const uint8_t* data, size_t length);
};
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/templates/Imported_h.template b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
index 4c9d24bd5f..f2e576a9c4 100644
--- a/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
@@ -1,4 +1,4 @@
-// This file is generated
+// This file is generated by Imported_h.template.
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -17,6 +17,37 @@
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
+
+using Exported = {{"::".join(config.imported.namespace)}}::Exported;
+
+#ifndef {{"_".join(config.protocol.namespace)}}_imported_imported_h
+#define {{"_".join(config.protocol.namespace)}}_imported_imported_h
+
+class {{config.lib.export_macro}} ImportedValue : public Value {
+public:
+ static std::unique_ptr<ImportedValue> fromExported(const Exported* value) {
+ return std::unique_ptr<ImportedValue>(new ImportedValue(value));
+ }
+
+ void writeJSON(StringBuilder* output) const override {
+ auto json = m_exported->toJSONString();
+ String local_json = ({{config.imported.from_imported_string % "std::move(json)"}});
+ StringUtil::builderAppend(*output, local_json);
+ }
+ void writeBinary(std::vector<uint8_t>* output) const override {
+ m_exported->writeBinary(output);
+ }
+ std::unique_ptr<Value> clone() const override {
+ return std::unique_ptr<Value>(new ImportedValue(m_exported));
+ }
+
+private:
+ explicit ImportedValue(const Exported* exported) : Value(TypeImported), m_exported(exported) { }
+ const Exported* m_exported;
+};
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_imported_imported_h)
+
{% for type in domain.types %}
{% if not (type.type == "object") or not ("properties" in type) or not protocol.is_imported(domain.domain, type.id) %}{% continue %}{% endif %}
@@ -28,17 +59,18 @@ struct ValueConversions<{{"::".join(config.imported.namespace)}}::{{domain.domai
errors->addError("value expected");
return nullptr;
}
- String json = value->serialize();
- auto result = {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}::fromJSONString({{config.imported.to_imported_string % "json"}});
+
+ std::vector<uint8_t> binary;
+ value->writeBinary(&binary);
+ auto result = {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}::fromBinary(binary.data(), binary.size());
if (!result)
errors->addError("cannot parse");
return result;
}
- static std::unique_ptr<protocol::Value> toValue(const {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}* value)
+ static std::unique_ptr<protocol::Value> toValue(const {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}* exported)
{
- auto json = value->toJSONString();
- return SerializedValue::create({{config.imported.from_imported_string % "std::move(json)"}});
+ return ImportedValue::fromExported(exported);
}
static std::unique_ptr<protocol::Value> toValue(const std::unique_ptr<{{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}>& value)
@@ -46,6 +78,7 @@ struct ValueConversions<{{"::".join(config.imported.namespace)}}::{{domain.domai
return toValue(value.get());
}
};
+
{% endfor %}
{% for namespace in config.protocol.namespace %}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index f99ce5f0d9..4ef60a6ea2 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -1,4 +1,4 @@
-// This file is generated
+// This file is generated by TypeBuilder_cpp.template.
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -101,10 +101,15 @@ std::unique_ptr<{{type.id}}> {{type.id}}::clone() const
{{config.exported.string_out}} {{type.id}}::toJSONString() const
{
- String json = toValue()->serialize();
+ String json = toValue()->serializeToJSON();
return {{config.exported.to_string_out % "json"}};
}
+void {{type.id}}::writeBinary(std::vector<uint8_t>* out) const
+{
+ toValue()->writeBinary(out);
+}
+
// static
std::unique_ptr<API::{{type.id}}> API::{{type.id}}::fromJSONString(const {{config.exported.string_in}}& json)
{
@@ -114,6 +119,17 @@ std::unique_ptr<API::{{type.id}}> API::{{type.id}}::fromJSONString(const {{confi
return nullptr;
return protocol::{{domain.domain}}::{{type.id}}::fromValue(value.get(), &errors);
}
+
+// static
+std::unique_ptr<API::{{type.id}}> API::{{type.id}}::fromBinary(const uint8_t* data, size_t length)
+{
+ ErrorSupport errors;
+ std::unique_ptr<Value> value = Value::parseBinary(data, length);
+ if (!value)
+ return nullptr;
+ return protocol::{{domain.domain}}::{{type.id}}::fromValue(value.get(), &errors);
+}
+
{% endif %}
{% endfor %}
@@ -187,9 +203,14 @@ void Frontend::flush()
m_frontendChannel->flushProtocolNotifications();
}
-void Frontend::sendRawNotification(const String& notification)
+void Frontend::sendRawNotification(String notification)
+{
+ m_frontendChannel->sendProtocolNotification(InternalRawNotification::fromJSON(std::move(notification)));
+}
+
+void Frontend::sendRawNotification(std::vector<uint8_t> notification)
{
- m_frontendChannel->sendProtocolNotification(InternalRawNotification::create(notification));
+ m_frontendChannel->sendProtocolNotification(InternalRawNotification::fromBinary(std::move(notification)));
}
// --------------------- Dispatcher.
@@ -210,11 +231,11 @@ public:
}
~DispatcherImpl() override { }
bool canDispatch(const String& method) override;
- void dispatch(int callId, const String& method, const String& message, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
+ void dispatch(int callId, const String& method, const ProtocolMessage& message, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
std::unordered_map<String, String>& redirects() { return m_redirects; }
protected:
- using CallHandler = void (DispatcherImpl::*)(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
+ using CallHandler = void (DispatcherImpl::*)(int callId, const String& method, const ProtocolMessage& message, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
using DispatchMap = std::unordered_map<String, CallHandler>;
DispatchMap m_dispatchMap;
std::unordered_map<String, String> m_redirects;
@@ -222,7 +243,7 @@ protected:
{% for command in domain.commands %}
{% if "redirect" in command %}{% continue %}{% endif %}
{% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
- void {{command.name}}(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
+ void {{command.name}}(int callId, const String& method, const ProtocolMessage& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
{% endfor %}
Backend* m_backend;
@@ -232,7 +253,7 @@ bool DispatcherImpl::canDispatch(const String& method) {
return m_dispatchMap.find(method) != m_dispatchMap.end();
}
-void DispatcherImpl::dispatch(int callId, const String& method, const String& message, std::unique_ptr<protocol::DictionaryValue> messageObject)
+void DispatcherImpl::dispatch(int callId, const String& method, const ProtocolMessage& message, std::unique_ptr<protocol::DictionaryValue> messageObject)
{
std::unordered_map<String, CallHandler>::iterator it = m_dispatchMap.find(method);
DCHECK(it != m_dispatchMap.end());
@@ -248,7 +269,7 @@ void DispatcherImpl::dispatch(int callId, const String& method, const String& me
class {{command_name_title}}CallbackImpl : public Backend::{{command_name_title}}Callback, public DispatcherBase::Callback {
public:
- {{command_name_title}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const String& message)
+ {{command_name_title}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const ProtocolMessage& message)
: DispatcherBase::Callback(std::move(backendImpl), callId, method, message) { }
void sendSuccess(
@@ -286,7 +307,7 @@ public:
};
{% endif %}
-void DispatcherImpl::{{command.name}}(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
+void DispatcherImpl::{{command.name}}(int callId, const String& method, const ProtocolMessage& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
{
{% if "parameters" in command %}
// Prepare input parameters.
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
index 291b2a7542..c670d65c46 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -1,4 +1,4 @@
-// This file is generated
+// This file is generated by TypeBuilder_h.template.
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -100,10 +100,13 @@ public:
{% endfor %}
std::unique_ptr<protocol::DictionaryValue> toValue() const;
- String serialize() override { return toValue()->serialize(); }
+ String serializeToJSON() override { return toValue()->serializeToJSON(); }
+ std::vector<uint8_t> serializeToBinary() override { return toValue()->serializeToBinary(); }
+ String toJSON() const { return toValue()->toJSONString(); }
std::unique_ptr<{{type.id}}> clone() const;
{% if protocol.is_exported(domain.domain, type.id) %}
{{config.exported.string_out}} toJSONString() const override;
+ void writeBinary(std::vector<uint8_t>* out) const override;
{% endif %}
template<int STATE>
@@ -266,7 +269,8 @@ public:
{% endfor %}
void flush();
- void sendRawNotification(const String&);
+ void sendRawNotification(String);
+ void sendRawNotification(std::vector<uint8_t>);
private:
FrontendChannel* m_frontendChannel;
};
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 791f88e009..a751083575 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -20,79 +20,147 @@ namespace array {
// <- FastSmiOrObject
// <- FastDouble
// <- Dictionary
- //
- // The only exception is TempArrayElements, since it does not describe the
- // "elements" of the receiver, but instead is used as an "adaptor" so
- // GallopLeft/GallopRight can be reused with the temporary array.
const kGenericElementsAccessorId: Smi = 0;
const kFastElementsAccessorId: Smi = 1;
- // This is a special type, used to access the temporary array which is always
- // PACKED_ELEMENTS. As a result, we do not need a sanity check for it,
- // otherwise we might wrongly bail to the slow path.
- type TempArrayElements;
-
- // The following index constants describe the layout of the sortState.
- // The sortState is currently implemented as a FixedArray of
- // size kSortStateSize.
-
- // The receiver of the Array.p.sort call.
- const kReceiverIdx: constexpr int31 = 0;
-
- // The initial map and length of the receiver. After calling into JS, these
- // are reloaded and checked. If they changed we bail to the baseline
- // GenericElementsAccessor.
- const kInitialReceiverMapIdx: constexpr int31 = 1;
- const kInitialReceiverLengthIdx: constexpr int31 = 2;
-
- // If the user provided a comparison function, it is stored here.
- const kUserCmpFnIdx: constexpr int31 = 3;
-
- // Function pointer to the comparison function. This can either be a builtin
- // that calls the user-provided comparison function or "SortDefault", which
- // uses ToString and a lexicographical compare.
- const kSortComparePtrIdx: constexpr int31 = 4;
-
- // The following three function pointer represent a Accessor/Path.
- // These are used to Load/Store elements and to check whether to bail to the
- // baseline GenericElementsAccessor.
- const kLoadFnIdx: constexpr int31 = 5;
- const kStoreFnIdx: constexpr int31 = 6;
- const kCanUseSameAccessorFnIdx: constexpr int31 = 7;
-
- // If this field has the value kFailure, we need to bail to the baseline
- // GenericElementsAccessor.
- const kBailoutStatusIdx: constexpr int31 = 8;
-
- // This controls when we get *into* galloping mode. It's initialized to
- // kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random data,
- // and lower for highly structured data.
- const kMinGallopIdx: constexpr int31 = 9;
-
- // A stack of sortState[kPendingRunsSizeIdx] pending runs yet to be merged.
- // Run #i starts at sortState[kPendingRunsIdx][2 * i] and extends for
- // sortState[kPendingRunsIdx][2 * i + 1] elements:
- //
- // [..., base (i-1), length (i-1), base i, length i]
- //
- // It's always true (so long as the indices are in bounds) that
- //
- // base of run #i + length of run #i == base of run #i + 1
- //
- const kPendingRunsSizeIdx: constexpr int31 = 10;
- const kPendingRunsIdx: constexpr int31 = 11;
+ class SortState {
+ Compare(implicit context: Context)(x: Object, y: Object): Number {
+ const sortCompare: CompareBuiltinFn = this.sortComparePtr;
+ return sortCompare(context, this.userCmpFn, x, y);
+ }
- // The current size of the temporary array.
- const kTempArraySizeIdx: constexpr int31 = 12;
+ CheckAccessor(implicit context: Context)() labels Bailout {
+ const canUseSameAccessorFn: CanUseSameAccessorFn =
+ this.canUseSameAccessorFn;
- // Pointer to the temporary array.
- const kTempArrayIdx: constexpr int31 = 13;
+ if (!canUseSameAccessorFn(
+ context, this.receiver, this.initialReceiverMap,
+ this.initialReceiverLength)) {
+ goto Bailout;
+ }
+ }
- // Contains a Smi constant describing which accessors to use. This is used
- // for reloading the right elements and for a sanity check.
- const kAccessorIdx: constexpr int31 = 14;
+ // The receiver of the Array.p.sort call.
+ receiver: JSReceiver;
+
+ // The initial map and length of the receiver. After calling into JS, these
+ // are reloaded and checked. If they changed we bail to the baseline
+ // GenericElementsAccessor.
+ initialReceiverMap: Map;
+ initialReceiverLength: Number;
+
+ // If the user provided a comparison function, it is stored here.
+ userCmpFn: Undefined | Callable;
+
+ // Function pointer to the comparison function. This can either be a builtin
+ // that calls the user-provided comparison function or "SortDefault", which
+ // uses ToString and a lexicographical compare.
+ sortComparePtr: CompareBuiltinFn;
+
+ // The following three function pointer represent a Accessor/Path.
+ // These are used to Load/Store elements and to check whether to bail to the
+ // baseline GenericElementsAccessor.
+ loadFn: LoadFn;
+ storeFn: StoreFn;
+ canUseSameAccessorFn: CanUseSameAccessorFn;
+
+ // If this field has the value kFailure, we need to bail to the baseline
+ // GenericElementsAccessor.
+ bailoutStatus: Smi;
+
+ // This controls when we get *into* galloping mode. It's initialized to
+ // kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random
+ // data, and lower for highly structured data.
+ minGallop: Smi;
+
+ // A stack of sortState.pendingRunsSize pending runs yet to be merged.
+ // Run #i starts at sortState.pendingRuns[2 * i] and extends for
+ // sortState.pendingRuns[2 * i + 1] elements:
+ //
+ // [..., base (i-1), length (i-1), base i, length i]
+ //
+ // It's always true (so long as the indices are in bounds) that
+ //
+ // base of run #i + length of run #i == base of run #i + 1
+ //
+ pendingRunsSize: Smi;
+ pendingRuns: FixedArray;
+
+ // This is a copy of the original array/object that needs sorting.
+ // workArray is never exposed to user-code, and as such cannot change
+ // shape and won't be left-trimmed.
+ workArray: FixedArray;
+
+ // Pointer to the temporary array.
+ tempArray: FixedArray;
+
+ // A Smi constant describing which accessors to use. This is used
+ // for reloading the right elements and for a sanity check.
+ accessor: Smi;
+ }
+
+ macro NewSortState(implicit context: Context)(
+ receiver: JSReceiver, comparefn: Undefined | Callable,
+ initialReceiverLength: Number, sortLength: Smi,
+ forceGeneric: constexpr bool): SortState {
+ const sortComparePtr =
+ comparefn != Undefined ? SortCompareUserFn : SortCompareDefault;
+ const map = receiver.map;
+ let accessor = kGenericElementsAccessorId;
+ let loadFn = Load<GenericElementsAccessor>;
+ let storeFn = Store<GenericElementsAccessor>;
+ let canUseSameAccessorFn = CanUseSameAccessor<GenericElementsAccessor>;
- const kSortStateSize: intptr = 15;
+ try {
+ if constexpr (!forceGeneric) {
+ GotoIfForceSlowPath() otherwise Slow;
+ let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+
+ const elementsKind: ElementsKind = map.elements_kind;
+ accessor = kFastElementsAccessorId;
+ if (IsDoubleElementsKind(elementsKind)) {
+ loadFn = Load<FastDoubleElements>;
+ storeFn = Store<FastDoubleElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastDoubleElements>;
+ } else if (elementsKind == PACKED_SMI_ELEMENTS) {
+ loadFn = Load<FastPackedSmiElements>;
+ storeFn = Store<FastPackedSmiElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastPackedSmiElements>;
+ } else {
+ loadFn = Load<FastSmiOrObjectElements>;
+ storeFn = Store<FastSmiOrObjectElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastSmiOrObjectElements>;
+ }
+ }
+ }
+ label Slow {
+ if (map.elements_kind == DICTIONARY_ELEMENTS && IsExtensibleMap(map) &&
+ !IsCustomElementsReceiverInstanceType(map.instance_type)) {
+ accessor = kFastElementsAccessorId;
+ loadFn = Load<DictionaryElements>;
+ storeFn = Store<DictionaryElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<DictionaryElements>;
+ }
+ }
+
+ return new SortState{
+ receiver,
+ map,
+ initialReceiverLength,
+ comparefn,
+ sortComparePtr,
+ loadFn,
+ storeFn,
+ canUseSameAccessorFn,
+ kSuccess,
+ kMinGallopWins,
+ 0,
+ AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending)),
+ AllocateZeroedFixedArray(Convert<intptr>(sortLength)),
+ kEmptyFixedArray,
+ accessor
+ };
+ }
const kFailure: Smi = -1;
const kSuccess: Smi = 0;
@@ -112,8 +180,8 @@ namespace array {
// it is first requested, but it has always at least this size.
const kSortStateTempSize: Smi = 32;
- type LoadFn = builtin(Context, FixedArray, HeapObject, Smi) => Object;
- type StoreFn = builtin(Context, FixedArray, HeapObject, Smi, Object) => Smi;
+ type LoadFn = builtin(Context, SortState, Smi) => Object;
+ type StoreFn = builtin(Context, SortState, Smi, Object) => Smi;
type CanUseSameAccessorFn = builtin(Context, JSReceiver, Object, Number) =>
Boolean;
type CompareBuiltinFn = builtin(Context, Object, Object, Object) => Number;
@@ -125,23 +193,22 @@ namespace array {
// through a hole.
transitioning builtin Load<ElementsAccessor: type>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
- return GetProperty(elements, index);
+ context: Context, sortState: SortState, index: Smi): Object {
+ return GetProperty(sortState.receiver, index);
}
Load<FastPackedSmiElements>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- return elems[index];
+ context: Context, sortState: SortState, index: Smi): Object {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ return elements.objects[index];
}
Load<FastSmiOrObjectElements>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- const result: Object = elems[index];
+ context: Context, sortState: SortState, index: Smi): Object {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ const result: Object = elements.objects[index];
if (IsTheHole(result)) {
// The pre-processing step removed all holes by compacting all elements
// at the start of the array. Finding a hole means the cmp function or
@@ -151,13 +218,12 @@ namespace array {
return result;
}
- Load<FastDoubleElements>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
+ Load<FastDoubleElements>(context: Context, sortState: SortState, index: Smi):
+ Object {
try {
- const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- const value: float64 =
- LoadDoubleWithHoleCheck(elems, index) otherwise Bailout;
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ const value = LoadDoubleWithHoleCheck(elements, index) otherwise Bailout;
return AllocateHeapNumberWithValue(value);
}
label Bailout {
@@ -168,70 +234,58 @@ namespace array {
}
}
- Load<DictionaryElements>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
+ Load<DictionaryElements>(context: Context, sortState: SortState, index: Smi):
+ Object {
try {
- const dictionary: NumberDictionary =
- UnsafeCast<NumberDictionary>(elements);
- const intptrIndex: intptr = Convert<intptr>(index);
- const value: Object =
- BasicLoadNumberDictionaryElement(dictionary, intptrIndex)
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const dictionary = UnsafeCast<NumberDictionary>(object.elements);
+ const intptrIndex = Convert<intptr>(index);
+ return BasicLoadNumberDictionaryElement(dictionary, intptrIndex)
otherwise Bailout, Bailout;
- return value;
}
label Bailout {
return Failure(sortState);
}
}
- Load<TempArrayElements>(
- context: Context, sortState: FixedArray, elements: HeapObject,
- index: Smi): Object {
- assert(IsFixedArray(elements));
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- return elems[index];
- }
-
transitioning builtin Store<ElementsAccessor: type>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- SetProperty(elements, index, value);
+ context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ SetProperty(sortState.receiver, index, value);
return kSuccess;
}
Store<FastPackedSmiElements>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
+ context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ StoreFixedArrayElementSmi(elements, index, value, SKIP_WRITE_BARRIER);
return kSuccess;
}
Store<FastSmiOrObjectElements>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- elems[index] = value;
+ context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = value;
return kSuccess;
}
Store<FastDoubleElements>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- const heapVal: HeapNumber = UnsafeCast<HeapNumber>(value);
+ context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ const heapVal = UnsafeCast<HeapNumber>(value);
// Make sure we do not store signalling NaNs into double arrays.
- const val: float64 = Float64SilenceNaN(Convert<float64>(heapVal));
- StoreFixedDoubleArrayElementWithSmiIndex(elems, index, val);
+ const val = Float64SilenceNaN(Convert<float64>(heapVal));
+ StoreFixedDoubleArrayElementSmi(elements, index, val);
return kSuccess;
}
Store<DictionaryElements>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- const dictionary: NumberDictionary = UnsafeCast<NumberDictionary>(elements);
- const intptrIndex: intptr = Convert<intptr>(index);
+ context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const dictionary = UnsafeCast<NumberDictionary>(object.elements);
+ const intptrIndex = Convert<intptr>(index);
try {
BasicStoreNumberDictionaryElement(dictionary, intptrIndex, value)
otherwise Fail, Fail, ReadOnly;
@@ -240,36 +294,28 @@ namespace array {
label ReadOnly {
// We cannot write to read-only data properties. Throw the same TypeError
// as SetProperty would.
- const receiver: JSReceiver = GetReceiver(sortState);
+ const receiver = sortState.receiver;
ThrowTypeError(
- context, kStrictReadOnlyProperty, index, Typeof(receiver), receiver);
+ kStrictReadOnlyProperty, index, Typeof(receiver), receiver);
}
label Fail {
return Failure(sortState);
}
}
- Store<TempArrayElements>(
- context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
- value: Object): Smi {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- elems[index] = value;
- return kSuccess;
- }
-
UnsafeCast<CompareBuiltinFn>(implicit context: Context)(o: Object):
CompareBuiltinFn {
- return %RawObjectCast<CompareBuiltinFn>(o);
+ return %RawDownCast<CompareBuiltinFn>(o);
}
UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
- return %RawObjectCast<LoadFn>(o);
+ return %RawDownCast<LoadFn>(o);
}
UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
- return %RawObjectCast<StoreFn>(o);
+ return %RawDownCast<StoreFn>(o);
}
UnsafeCast<CanUseSameAccessorFn>(implicit context: Context)(o: Object):
CanUseSameAccessorFn {
- return %RawObjectCast<CanUseSameAccessorFn>(o);
+ return %RawDownCast<CanUseSameAccessorFn>(o);
}
builtin SortCompareDefault(
@@ -281,10 +327,10 @@ namespace array {
}
// 5. Let xString be ? ToString(x).
- const xString: String = ToString_Inline(context, x);
+ const xString = ToString_Inline(context, x);
// 6. Let yString be ? ToString(y).
- const yString: String = ToString_Inline(context, y);
+ const yString = ToString_Inline(context, y);
// 7. Let xSmaller be the result of performing
// Abstract Relational Comparison xString < yString.
@@ -303,11 +349,10 @@ namespace array {
transitioning builtin SortCompareUserFn(
context: Context, comparefn: Object, x: Object, y: Object): Number {
assert(comparefn != Undefined);
- const cmpfn: Callable = UnsafeCast<Callable>(comparefn);
+ const cmpfn = UnsafeCast<Callable>(comparefn);
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
- const v: Number =
- ToNumber_Inline(context, Call(context, cmpfn, Undefined, x, y));
+ const v = ToNumber_Inline(context, Call(context, cmpfn, Undefined, x, y));
// b. If v is NaN, return +0.
if (NumberIsNaN(v)) return 0;
@@ -343,147 +388,73 @@ namespace array {
return SelectBooleanConstant(obj.map == initialReceiverMap);
}
- macro CallCompareFn(
- context: Context, sortState: FixedArray, x: Object, y: Object): Number
- labels Bailout {
- const userCmpFn: Object = sortState[kUserCmpFnIdx];
- const sortCompare: CompareBuiltinFn =
- UnsafeCast<CompareBuiltinFn>(sortState[kSortComparePtrIdx]);
-
- const result: Number = sortCompare(context, userCmpFn, x, y);
-
- const receiver: JSReceiver = GetReceiver(sortState);
- const initialReceiverMap: Object = sortState[kInitialReceiverMapIdx];
- const initialReceiverLength: Number = GetInitialReceiverLength(sortState);
- const canUseSameAccessorFn: CanUseSameAccessorFn =
- GetCanUseSameAccessorFn(sortState);
-
- if (!canUseSameAccessorFn(
- context, receiver, initialReceiverMap, initialReceiverLength)) {
- goto Bailout;
- }
- return result;
- }
-
- // Reloading elements after returning from JS is needed since left-trimming
- // might have occurred. This means we cannot leave any pointer to the elements
- // backing store on the stack (since it would point to the filler object).
- // TODO(v8:7995): Remove reloading once left-trimming is removed.
- macro ReloadElements(implicit context: Context)(sortState: FixedArray):
- HeapObject {
- const receiver: JSReceiver = GetReceiver(sortState);
- if (sortState[kAccessorIdx] == kGenericElementsAccessorId) return receiver;
-
- const object: JSObject = UnsafeCast<JSObject>(receiver);
- return object.elements;
- }
-
- macro GetInitialReceiverLength(implicit context:
- Context)(sortState: FixedArray): Number {
- return UnsafeCast<Number>(sortState[kInitialReceiverLengthIdx]);
- }
-
- macro GetLoadFn(implicit context: Context)(sortState: FixedArray): LoadFn {
- return UnsafeCast<LoadFn>(sortState[kLoadFnIdx]);
- }
-
- macro GetStoreFn(implicit context: Context)(sortState: FixedArray): StoreFn {
- return UnsafeCast<StoreFn>(sortState[kStoreFnIdx]);
- }
-
- macro GetCanUseSameAccessorFn(implicit context: Context)(
- sortState: FixedArray): CanUseSameAccessorFn {
- return UnsafeCast<CanUseSameAccessorFn>(
- sortState[kCanUseSameAccessorFnIdx]);
- }
-
- macro GetReceiver(implicit context: Context)(sortState: FixedArray):
- JSReceiver {
- return UnsafeCast<JSReceiver>(sortState[kReceiverIdx]);
- }
-
- // Returns the temporary array without changing its size.
- macro GetTempArray(implicit context: Context)(sortState: FixedArray):
- FixedArray {
- return UnsafeCast<FixedArray>(sortState[kTempArrayIdx]);
- }
-
// Re-loading the stack-size is done in a few places. The small macro allows
// for easier invariant checks at all use sites.
- macro GetPendingRunsSize(implicit context: Context)(sortState: FixedArray):
+ macro GetPendingRunsSize(implicit context: Context)(sortState: SortState):
Smi {
- assert(TaggedIsSmi(sortState[kPendingRunsSizeIdx]));
- const stackSize: Smi = UnsafeCast<Smi>(sortState[kPendingRunsSizeIdx]);
-
+ const stackSize: Smi = sortState.pendingRunsSize;
assert(stackSize >= 0);
return stackSize;
}
- macro SetPendingRunsSize(sortState: FixedArray, value: Smi) {
- sortState[kPendingRunsSizeIdx] = value;
- }
-
macro GetPendingRunBase(implicit context:
Context)(pendingRuns: FixedArray, run: Smi): Smi {
- return UnsafeCast<Smi>(pendingRuns[run << 1]);
+ return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
}
macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
- pendingRuns[run << 1] = value;
+ pendingRuns.objects[run << 1] = value;
}
macro GetPendingRunLength(implicit context: Context)(
pendingRuns: FixedArray, run: Smi): Smi {
- return UnsafeCast<Smi>(pendingRuns[(run << 1) + 1]);
+ return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
}
macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
- pendingRuns[(run << 1) + 1] = value;
+ pendingRuns.objects[(run << 1) + 1] = value;
}
macro PushRun(implicit context:
- Context)(sortState: FixedArray, base: Smi, length: Smi) {
+ Context)(sortState: SortState, base: Smi, length: Smi) {
assert(GetPendingRunsSize(sortState) < kMaxMergePending);
const stackSize: Smi = GetPendingRunsSize(sortState);
- const pendingRuns: FixedArray =
- UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
+ const pendingRuns: FixedArray = sortState.pendingRuns;
SetPendingRunBase(pendingRuns, stackSize, base);
SetPendingRunLength(pendingRuns, stackSize, length);
- SetPendingRunsSize(sortState, stackSize + 1);
+ sortState.pendingRunsSize = stackSize + 1;
}
// Returns the temporary array and makes sure that it is big enough.
// TODO(szuend): Implement a better re-size strategy.
macro GetTempArray(implicit context: Context)(
- sortState: FixedArray, requestedSize: Smi): FixedArray {
+ sortState: SortState, requestedSize: Smi): FixedArray {
const minSize: Smi = SmiMax(kSortStateTempSize, requestedSize);
- const currentSize: Smi = UnsafeCast<Smi>(sortState[kTempArraySizeIdx]);
+ const currentSize: Smi = UnsafeCast<Smi>(sortState.tempArray.length);
if (currentSize >= minSize) {
- return GetTempArray(sortState);
+ return sortState.tempArray;
}
const tempArray: FixedArray =
AllocateZeroedFixedArray(Convert<intptr>(minSize));
- sortState[kTempArraySizeIdx] = minSize;
- sortState[kTempArrayIdx] = tempArray;
+ sortState.tempArray = tempArray;
return tempArray;
}
// This macro jumps to the Bailout label iff kBailoutStatus is kFailure.
- macro EnsureSuccess(implicit context:
- Context)(sortState: FixedArray) labels Bailout {
- const status: Smi = UnsafeCast<Smi>(sortState[kBailoutStatusIdx]);
- if (status == kFailure) goto Bailout;
+ macro EnsureSuccess(implicit context: Context)(sortState:
+ SortState) labels Bailout {
+ if (sortState.bailoutStatus == kFailure) goto Bailout;
}
// Sets kBailoutStatus to kFailure and returns kFailure.
- macro Failure(sortState: FixedArray): Smi {
- sortState[kBailoutStatusIdx] = kFailure;
+ macro Failure(sortState: SortState): Smi {
+ sortState.bailoutStatus = kFailure;
return kFailure;
}
@@ -491,233 +462,115 @@ namespace array {
// readable since we can use labels and do not have to check kBailoutStatus
// or the return value.
- macro CallLoad(
- context: Context, sortState: FixedArray, load: LoadFn,
- elements: HeapObject, index: Smi): Object
+ macro CallLoad(implicit context: Context, sortState: SortState)(
+ load: LoadFn, index: Smi): Object
labels Bailout {
- const result: Object = load(context, sortState, elements, index);
+ const result: Object = load(context, sortState, index);
EnsureSuccess(sortState) otherwise Bailout;
return result;
}
- macro CallStore(
- context: Context, sortState: FixedArray, store: StoreFn,
- elements: HeapObject, index: Smi, value: Object) labels Bailout {
- store(context, sortState, elements, index, value);
- EnsureSuccess(sortState) otherwise Bailout;
- }
-
- transitioning macro CallCopyFromTempArray(
- context: Context, sortState: FixedArray, dstElements: HeapObject,
- dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi)
- labels Bailout {
- CopyFromTempArray(
- context, sortState, dstElements, dstPos, tempArray, srcPos, length);
+ macro CallStore(implicit context: Context, sortState: SortState)(
+ store: StoreFn, index: Smi, value: Object) labels Bailout {
+ store(context, sortState, index, value);
EnsureSuccess(sortState) otherwise Bailout;
}
- transitioning macro CallCopyWithinSortArray(
- context: Context, sortState: FixedArray, elements: HeapObject,
- srcPos: Smi, dstPos: Smi, length: Smi)
- labels Bailout {
- CopyWithinSortArray(context, sortState, elements, srcPos, dstPos, length);
- EnsureSuccess(sortState) otherwise Bailout;
- }
-
- macro CallGallopRight(
- context: Context, sortState: FixedArray, load: LoadFn, key: Object,
- base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
- labels Bailout {
- const result: Smi = GallopRight(
- context, sortState, load, key, base, length, hint, useTempArray);
- EnsureSuccess(sortState) otherwise Bailout;
- return result;
- }
-
- macro CallGallopLeft(
- context: Context, sortState: FixedArray, load: LoadFn, key: Object,
- base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
- labels Bailout {
- const result: Smi = GallopLeft(
- context, sortState, load, key, base, length, hint, useTempArray);
- EnsureSuccess(sortState) otherwise Bailout;
- return result;
- }
-
- transitioning macro
- CallMergeAt(context: Context, sortState: FixedArray, i: Smi)
- labels Bailout {
- MergeAt(context, sortState, i);
- EnsureSuccess(sortState) otherwise Bailout;
- }
-
- transitioning macro CopyToTempArray(
- context: Context, sortState: FixedArray, load: LoadFn,
- srcElements: HeapObject, srcPos: Smi, tempArray: FixedArray, dstPos: Smi,
- length: Smi)
- labels Bailout {
- assert(srcPos >= 0);
- assert(dstPos >= 0);
- assert(srcPos <= GetInitialReceiverLength(sortState) - length);
- assert(dstPos <= tempArray.length - length);
-
- let srcIdx: Smi = srcPos;
- let dstIdx: Smi = dstPos;
- let to: Smi = srcPos + length;
-
- while (srcIdx < to) {
- let element: Object =
- CallLoad(context, sortState, load, srcElements, srcIdx++)
- otherwise Bailout;
- tempArray[dstIdx++] = element;
- }
- }
-
- transitioning builtin CopyFromTempArray(
- context: Context, sortState: FixedArray, dstElements: HeapObject,
- dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi): Smi {
+ transitioning builtin
+ Copy(implicit context: Context)(
+ source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
+ length: Smi): Object {
assert(srcPos >= 0);
assert(dstPos >= 0);
- assert(srcPos <= tempArray.length - length);
- assert(dstPos <= GetInitialReceiverLength(sortState) - length);
-
- let store: StoreFn = GetStoreFn(sortState);
-
- let srcIdx: Smi = srcPos;
- let dstIdx: Smi = dstPos;
- let to: Smi = srcPos + length;
- try {
- while (srcIdx < to) {
- CallStore(
- context, sortState, store, dstElements, dstIdx++,
- tempArray[srcIdx++])
- otherwise Bailout;
+ assert(srcPos <= source.length - length);
+ assert(dstPos <= target.length - length);
+
+ // TODO(szuend): Investigate whether this builtin should be replaced
+ // by CopyElements/MoveElements for perfomance.
+
+ // source and target might be the same array. To avoid overwriting
+ // values in the case of overlaping ranges, elements are copied from
+ // the back when srcPos < dstPos.
+ if (srcPos < dstPos) {
+ let srcIdx: Smi = srcPos + length - 1;
+ let dstIdx: Smi = dstPos + length - 1;
+ while (srcIdx >= srcPos) {
+ target.objects[dstIdx--] = source.objects[srcIdx--];
}
- return kSuccess;
- }
- label Bailout {
- return Failure(sortState);
- }
- }
-
- transitioning builtin CopyWithinSortArray(
- context: Context, sortState: FixedArray, elements: HeapObject,
- srcPos: Smi, dstPos: Smi, length: Smi): Smi {
- assert(srcPos >= 0);
- assert(dstPos >= 0);
- assert(srcPos <= GetInitialReceiverLength(sortState) - length);
- assert(dstPos <= GetInitialReceiverLength(sortState) - length);
+ } else {
+ let srcIdx: Smi = srcPos;
+ let dstIdx: Smi = dstPos;
+ let to: Smi = srcPos + length;
- try {
- let load: LoadFn = GetLoadFn(sortState);
- let store: StoreFn = GetStoreFn(sortState);
-
- if (srcPos < dstPos) {
- let srcIdx: Smi = srcPos + length - 1;
- let dstIdx: Smi = dstPos + length - 1;
- while (srcIdx >= srcPos) {
- CopyElement(
- context, sortState, load, store, elements, srcIdx--, dstIdx--)
- otherwise Bailout;
- }
- } else {
- let srcIdx: Smi = srcPos;
- let dstIdx: Smi = dstPos;
- let to: Smi = srcPos + length;
- while (srcIdx < to) {
- CopyElement(
- context, sortState, load, store, elements, srcIdx++, dstIdx++)
- otherwise Bailout;
- }
+ while (srcIdx < to) {
+ target.objects[dstIdx++] = source.objects[srcIdx++];
}
- return kSuccess;
- }
- label Bailout {
- return Failure(sortState);
}
+ return kSuccess;
}
- // BinaryInsertionSort is the best method for sorting small arrays: it does
- // few compares, but can do data movement quadratic in the number of elements.
- // This is an advantage since comparisons are more expensive due to
- // calling into JS.
+ // BinaryInsertionSort is the best method for sorting small arrays: it
+ // does few compares, but can do data movement quadratic in the number of
+ // elements. This is an advantage since comparisons are more expensive due
+ // to calling into JS.
//
// [low, high) is a contiguous range of a array, and is sorted via
// binary insertion. This sort is stable.
//
- // On entry, must have low <= start <= high, and that [low, start) is already
- // sorted. Pass start == low if you do not know!.
+ // On entry, must have low <= start <= high, and that [low, start) is
+ // already sorted. Pass start == low if you do not know!.
builtin BinaryInsertionSort(
- context: Context, sortState: FixedArray, low: Smi, startArg: Smi,
+ context: Context, sortState: SortState, low: Smi, startArg: Smi,
high: Smi): Smi {
assert(low <= startArg && startArg <= high);
- try {
- let elements: HeapObject = ReloadElements(sortState);
-
- const load: LoadFn = GetLoadFn(sortState);
- const store: StoreFn = GetStoreFn(sortState);
-
- let start: Smi = low == startArg ? (startArg + 1) : startArg;
-
- for (; start < high; ++start) {
- // Set left to where a[start] belongs.
- let left: Smi = low;
- let right: Smi = start;
-
- const pivot: Object =
- CallLoad(context, sortState, load, elements, right)
- otherwise Bailout;
-
- // Invariants:
- // pivot >= all in [low, left).
- // pivot < all in [right, start).
- assert(left < right);
-
- // Find pivot insertion point.
- while (left < right) {
- const mid: Smi = left + ((right - left) >> 1);
- const midElement: Object =
- CallLoad(context, sortState, load, elements, mid)
- otherwise Bailout;
- const order: Number =
- CallCompareFn(context, sortState, pivot, midElement)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ const workArray = sortState.workArray;
- if (order < 0) {
- right = mid;
- } else {
- left = mid + 1;
- }
- }
- assert(left == right);
-
- // The invariants still hold, so:
- // pivot >= all in [low, left) and
- // pivot < all in [left, start),
- //
- // so pivot belongs at left. Note that if there are elements equal to
- // pivot, left points to the first slot after them -- that's why this
- // sort is stable.
- // Slide over to make room.
- for (let p: Smi = start; p > left; --p) {
- CopyElement(context, sortState, load, store, elements, p - 1, p)
- otherwise Bailout;
+ let start: Smi = low == startArg ? (startArg + 1) : startArg;
+
+ for (; start < high; ++start) {
+ // Set left to where a[start] belongs.
+ let left: Smi = low;
+ let right: Smi = start;
+
+ const pivot = workArray.objects[right];
+
+ // Invariants:
+ // pivot >= all in [low, left).
+ // pivot < all in [right, start).
+ assert(left < right);
+
+ // Find pivot insertion point.
+ while (left < right) {
+ const mid: Smi = left + ((right - left) >> 1);
+ const order = sortState.Compare(pivot, workArray.objects[mid]);
+
+ if (order < 0) {
+ right = mid;
+ } else {
+ left = mid + 1;
}
- CallStore(context, sortState, store, elements, left, pivot)
- otherwise Bailout;
}
- return kSuccess;
- }
- label Bailout {
- return Failure(sortState);
+ assert(left == right);
+
+ // The invariants still hold, so:
+ // pivot >= all in [low, left) and
+ // pivot < all in [left, start),
+ //
+ // so pivot belongs at left. Note that if there are elements equal
+ // to pivot, left points to the first slot after them -- that's why
+ // this sort is stable. Slide over to make room.
+ for (let p: Smi = start; p > left; --p) {
+ workArray.objects[p] = workArray.objects[p - 1];
+ }
+ workArray.objects[left] = pivot;
}
+ return kSuccess;
}
- // Return the length of the run beginning at low, in the range [low, high),
- // low < high is required on entry.
- // "A run" is the longest ascending sequence, with
+ // Return the length of the run beginning at low, in the range [low,
+ // high), low < high is required on entry. "A run" is the longest
+ // ascending sequence, with
//
// a[low] <= a[low + 1] <= a[low + 2] <= ...
//
@@ -725,35 +578,27 @@ namespace array {
//
// a[low] > a[low + 1] > a[low + 2] > ...
//
- // For its intended use in stable mergesort, the strictness of the definition
- // of "descending" is needed so that the range can safely be reversed
- // without violating stability (strict ">" ensures there are no equal
- // elements to get out of order).
+ // For its intended use in stable mergesort, the strictness of the
+ // definition of "descending" is needed so that the range can safely be
+ // reversed without violating stability (strict ">" ensures there are no
+ // equal elements to get out of order).
//
- // In addition, if the run is "descending", it is reversed, so the returned
- // length is always an ascending sequence.
- macro CountAndMakeRun(
- context: Context, sortState: FixedArray, lowArg: Smi, high: Smi): Smi
- labels Bailout {
+ // In addition, if the run is "descending", it is reversed, so the
+ // returned length is always an ascending sequence.
+ macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
+ lowArg: Smi, high: Smi): Smi {
assert(lowArg < high);
- let elements: HeapObject = ReloadElements(sortState);
- const load: LoadFn = GetLoadFn(sortState);
- const store: StoreFn = GetStoreFn(sortState);
+ const workArray = sortState.workArray;
let low: Smi = lowArg + 1;
if (low == high) return 1;
let runLength: Smi = 2;
- const elementLow: Object =
- CallLoad(context, sortState, load, elements, low) otherwise Bailout;
- const elementLowPred: Object =
- CallLoad(context, sortState, load, elements, low - 1) otherwise Bailout;
- let order: Number =
- CallCompareFn(context, sortState, elementLow, elementLowPred)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ const elementLow = workArray.objects[low];
+ const elementLowPred = workArray.objects[low - 1];
+ let order = sortState.Compare(elementLow, elementLowPred);
// TODO(szuend): Replace with "order < 0" once Torque supports it.
// Currently the operator<(Number, Number) has return type
@@ -762,11 +607,8 @@ namespace array {
let previousElement: Object = elementLow;
for (let idx: Smi = low + 1; idx < high; ++idx) {
- const currentElement: Object =
- CallLoad(context, sortState, load, elements, idx) otherwise Bailout;
- order = CallCompareFn(context, sortState, currentElement, previousElement)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ const currentElement = workArray.objects[idx];
+ order = sortState.Compare(currentElement, previousElement);
if (isDescending) {
if (order >= 0) break;
@@ -779,37 +621,28 @@ namespace array {
}
if (isDescending) {
- ReverseRange(
- context, sortState, load, store, elements, lowArg, lowArg + runLength)
- otherwise Bailout;
+ ReverseRange(workArray, lowArg, lowArg + runLength);
}
return runLength;
}
- macro ReverseRange(
- context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
- elements: HeapObject, from: Smi, to: Smi)
- labels Bailout {
+ macro ReverseRange(array: FixedArray, from: Smi, to: Smi) {
let low: Smi = from;
let high: Smi = to - 1;
while (low < high) {
- const elementLow: Object =
- CallLoad(context, sortState, load, elements, low) otherwise Bailout;
- const elementHigh: Object =
- CallLoad(context, sortState, load, elements, high) otherwise Bailout;
- CallStore(context, sortState, store, elements, low++, elementHigh)
- otherwise Bailout;
- CallStore(context, sortState, store, elements, high--, elementLow)
- otherwise Bailout;
+ const elementLow = array.objects[low];
+ const elementHigh = array.objects[high];
+ array.objects[low++] = elementHigh;
+ array.objects[high--] = elementLow;
}
}
// Merges the two runs at stack indices i and i + 1.
// Returns kFailure if we need to bailout, kSuccess otherwise.
transitioning builtin
- MergeAt(context: Context, sortState: FixedArray, i: Smi): Smi {
+ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
const stackSize: Smi = GetPendingRunsSize(sortState);
// We are only allowed to either merge the two top-most runs, or leave
@@ -818,11 +651,9 @@ namespace array {
assert(i >= 0);
assert(i == stackSize - 2 || i == stackSize - 3);
- let elements: HeapObject = ReloadElements(sortState);
- const load: LoadFn = GetLoadFn(sortState);
+ const workArray = sortState.workArray;
- const pendingRuns: FixedArray =
- UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
+ const pendingRuns: FixedArray = sortState.pendingRuns;
let baseA: Smi = GetPendingRunBase(pendingRuns, i);
let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
let baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
@@ -840,18 +671,13 @@ namespace array {
SetPendingRunBase(pendingRuns, i + 1, base);
SetPendingRunLength(pendingRuns, i + 1, length);
}
- SetPendingRunsSize(sortState, stackSize - 1);
+ sortState.pendingRunsSize = stackSize - 1;
try {
// Where does b start in a? Elements in a before that can be ignored,
// because they are already in place.
- const keyRight: Object =
- CallLoad(context, sortState, load, elements, baseB)
- otherwise Bailout;
- const k: Smi = CallGallopRight(
- context, sortState, load, keyRight, baseA, lengthA, 0, False)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ const keyRight = workArray.objects[baseB];
+ const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
assert(k >= 0);
baseA = baseA + k;
@@ -861,24 +687,17 @@ namespace array {
// Where does a end in b? Elements in b after that can be ignored,
// because they are already in place.
- let keyLeft: Object =
- CallLoad(context, sortState, load, elements, baseA + lengthA - 1)
- otherwise Bailout;
- lengthB = CallGallopLeft(
- context, sortState, load, keyLeft, baseB, lengthB, lengthB - 1, False)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ const keyLeft = workArray.objects[baseA + lengthA - 1];
+ lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
assert(lengthB >= 0);
if (lengthB == 0) return kSuccess;
// Merge what remains of the runs, using a temp array with
// min(lengthA, lengthB) elements.
if (lengthA <= lengthB) {
- MergeLow(context, sortState, baseA, lengthA, baseB, lengthB)
- otherwise Bailout;
+ MergeLow(baseA, lengthA, baseB, lengthB) otherwise Bailout;
} else {
- MergeHigh(context, sortState, baseA, lengthA, baseB, lengthB)
- otherwise Bailout;
+ MergeHigh(baseA, lengthA, baseB, lengthB) otherwise Bailout;
}
return kSuccess;
}
@@ -887,22 +706,17 @@ namespace array {
}
}
- macro LoadElementsOrTempArray(implicit context: Context)(
- useTempArray: Boolean, sortState: FixedArray): HeapObject {
- return useTempArray == True ? GetTempArray(sortState) :
- ReloadElements(sortState);
- }
-
- // Locates the proper position of key in a sorted array; if the array contains
- // an element equal to key, return the position immediately to the left of
- // the leftmost equal element. (GallopRight does the same except returns the
- // position to the right of the rightmost equal element (if any)).
+ // Locates the proper position of key in a sorted array; if the array
+ // contains an element equal to key, return the position immediately to
+ // the left of the leftmost equal element. (GallopRight does the same
+ // except returns the position to the right of the rightmost equal element
+ // (if any)).
//
// The array is sorted with "length" elements, starting at "base".
// "length" must be > 0.
//
- // "hint" is an index at which to begin the search, 0 <= hint < n. The closer
- // hint is to the final result, the faster this runs.
+ // "hint" is an index at which to begin the search, 0 <= hint < n. The
+ // closer hint is to the final result, the faster this runs.
//
// The return value is the int offset in 0..length such that
//
@@ -910,250 +724,195 @@ namespace array {
//
// pretending that array[base - 1] is minus infinity and array[base + len]
// is plus infinity. In other words, key belongs at index base + k.
- builtin GallopLeft(
- context: Context, sortState: FixedArray, load: LoadFn, key: Object,
- base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
+ builtin GallopLeft(implicit context: Context, sortState: SortState)(
+ array: FixedArray, key: Object, base: Smi, length: Smi, hint: Smi): Smi {
assert(length > 0 && base >= 0);
assert(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
- try {
- const baseHintElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState), base + hint)
- otherwise Bailout;
- let order: Number =
- CallCompareFn(context, sortState, baseHintElement, key)
- otherwise Bailout;
+ const baseHintElement = array.objects[base + hint];
+ let order = sortState.Compare(baseHintElement, key);
- if (order < 0) {
- // a[base + hint] < key: gallop right, until
- // a[base + hint + lastOfs] < key <= a[base + hint + offset].
-
- // a[base + length - 1] is highest.
- let maxOfs: Smi = length - hint;
- while (offset < maxOfs) {
- const offsetElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState),
- base + hint + offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, offsetElement, key)
- otherwise Bailout;
-
- // a[base + hint + offset] >= key? Break.
- if (order >= 0) break;
-
- lastOfs = offset;
- offset = (offset << 1) + 1;
-
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ if (order < 0) {
+ // a[base + hint] < key: gallop right, until
+ // a[base + hint + lastOfs] < key <= a[base + hint + offset].
- if (offset > maxOfs) offset = maxOfs;
+ // a[base + length - 1] is highest.
+ let maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement = array.objects[base + hint + offset];
+ order = sortState.Compare(offsetElement, key);
- // Translate back to positive offsets relative to base.
- lastOfs = lastOfs + hint;
- offset = offset + hint;
- } else {
- // key <= a[base + hint]: gallop left, until
- // a[base + hint - offset] < key <= a[base + hint - lastOfs].
- assert(order >= 0);
-
- // a[base + hint] is lowest.
- let maxOfs: Smi = hint + 1;
- while (offset < maxOfs) {
- const offsetElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState),
- base + hint - offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, offsetElement, key)
- otherwise Bailout;
-
- if (order < 0) break;
-
- lastOfs = offset;
- offset = (offset << 1) + 1;
-
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ // a[base + hint + offset] >= key? Break.
+ if (order >= 0) break;
- if (offset > maxOfs) offset = maxOfs;
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
- // Translate back to positive offsets relative to base.
- const tmp: Smi = lastOfs;
- lastOfs = hint - offset;
- offset = hint - tmp;
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ if (offset > maxOfs) offset = maxOfs;
- // Now a[base+lastOfs] < key <= a[base+offset], so key belongs somewhere
- // to the right of lastOfs but no farther right than offset. Do a binary
- // search, with invariant:
- // a[base + lastOfs - 1] < key <= a[base + offset].
- lastOfs++;
- while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
+ // Translate back to positive offsets relative to base.
+ lastOfs = lastOfs + hint;
+ offset = offset + hint;
+ } else {
+ // key <= a[base + hint]: gallop left, until
+ // a[base + hint - offset] < key <= a[base + hint - lastOfs].
+ assert(order >= 0);
- const baseMElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState), base + m)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, baseMElement, key)
- otherwise Bailout;
+ // a[base + hint] is lowest.
+ let maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement = array.objects[base + hint - offset];
+ order = sortState.Compare(offsetElement, key);
- if (order < 0) {
- lastOfs = m + 1; // a[base + m] < key.
- } else {
- offset = m; // key <= a[base + m].
- }
+ if (order < 0) break;
+
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
}
- // so a[base + offset - 1] < key <= a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
- return offset;
+
+ if (offset > maxOfs) offset = maxOfs;
+
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
+ offset = hint - tmp;
}
- label Bailout {
- return Failure(sortState);
+
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+
+ // Now a[base+lastOfs] < key <= a[base+offset], so key belongs
+ // somewhere to the right of lastOfs but no farther right than offset.
+ // Do a binary search, with invariant:
+ // a[base + lastOfs - 1] < key <= a[base + offset].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
+
+ order = sortState.Compare(array.objects[base + m], key);
+
+ if (order < 0) {
+ lastOfs = m + 1; // a[base + m] < key.
+ } else {
+ offset = m; // key <= a[base + m].
+ }
}
+ // so a[base + offset - 1] < key <= a[base + offset].
+ assert(lastOfs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
}
// Exactly like GallopLeft, except that if key already exists in
- // [base, base + length), finds the position immediately to the right of the
- // rightmost equal value.
+ // [base, base + length), finds the position immediately to the right of
+ // the rightmost equal value.
//
// The return value is the int offset in 0..length such that
//
// array[base + offset - 1] <= key < array[base + offset]
//
// or kFailure on error.
- builtin GallopRight(
- context: Context, sortState: FixedArray, load: LoadFn, key: Object,
- base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
+ builtin GallopRight(implicit context: Context, sortState: SortState)(
+ array: FixedArray, key: Object, base: Smi, length: Smi, hint: Smi): Smi {
assert(length > 0 && base >= 0);
assert(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
- try {
- const baseHintElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState), base + hint)
- otherwise Bailout;
- let order: Number =
- CallCompareFn(context, sortState, key, baseHintElement)
- otherwise Bailout;
+ const baseHintElement = array.objects[base + hint];
+ let order = sortState.Compare(key, baseHintElement);
- if (order < 0) {
- // key < a[base + hint]: gallop left, until
- // a[base + hint - offset] <= key < a[base + hint - lastOfs].
-
- // a[base + hint] is lowest.
- let maxOfs: Smi = hint + 1;
- while (offset < maxOfs) {
- const offsetElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState),
- base + hint - offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, offsetElement)
- otherwise Bailout;
-
- if (order >= 0) break;
-
- lastOfs = offset;
- offset = (offset << 1) + 1;
-
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ if (order < 0) {
+ // key < a[base + hint]: gallop left, until
+ // a[base + hint - offset] <= key < a[base + hint - lastOfs].
- if (offset > maxOfs) offset = maxOfs;
+ // a[base + hint] is lowest.
+ let maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement = array.objects[base + hint - offset];
+ order = sortState.Compare(key, offsetElement);
- // Translate back to positive offsets relative to base.
- const tmp: Smi = lastOfs;
- lastOfs = hint - offset;
- offset = hint - tmp;
- } else {
- // a[base + hint] <= key: gallop right, until
- // a[base + hint + lastOfs] <= key < a[base + hint + offset].
-
- // a[base + length - 1] is highest.
- let maxOfs: Smi = length - hint;
- while (offset < maxOfs) {
- const offsetElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState),
- base + hint + offset)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, offsetElement)
- otherwise Bailout;
-
- // a[base + hint + ofs] <= key.
- if (order < 0) break;
-
- lastOfs = offset;
- offset = (offset << 1) + 1;
-
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ if (order >= 0) break;
- if (offset > maxOfs) offset = maxOfs;
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
- // Translate back to positive offests relative to base.
- lastOfs = lastOfs + hint;
- offset = offset + hint;
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
-
- // Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
- // somewhere to the right of lastOfs but no farther right than ofs.
- // Do a binary search, with invariant
- // a[base + lastOfs - 1] < key <= a[base + ofs].
- lastOfs++;
- while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
-
- const baseMElement: Object = CallLoad(
- context, sortState, load,
- LoadElementsOrTempArray(useTempArray, sortState), base + m)
- otherwise Bailout;
- order = CallCompareFn(context, sortState, key, baseMElement)
- otherwise Bailout;
- if (order < 0) {
- offset = m; // key < a[base + m].
- } else {
- lastOfs = m + 1; // a[base + m] <= key.
- }
+ if (offset > maxOfs) offset = maxOfs;
+
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
+ offset = hint - tmp;
+ } else {
+ // a[base + hint] <= key: gallop right, until
+ // a[base + hint + lastOfs] <= key < a[base + hint + offset].
+
+ // a[base + length - 1] is highest.
+ let maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement = array.objects[base + hint + offset];
+ order = sortState.Compare(key, offsetElement);
+
+ // a[base + hint + ofs] <= key.
+ if (order < 0) break;
+
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
}
- // so a[base + offset - 1] <= key < a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
- return offset;
+
+ if (offset > maxOfs) offset = maxOfs;
+
+ // Translate back to positive offests relative to base.
+ lastOfs = lastOfs + hint;
+ offset = offset + hint;
}
- label Bailout {
- return Failure(sortState);
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+
+ // Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
+ // somewhere to the right of lastOfs but no farther right than ofs.
+ // Do a binary search, with invariant
+ // a[base + lastOfs - 1] < key <= a[base + ofs].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
+
+ order = sortState.Compare(key, array.objects[base + m]);
+
+ if (order < 0) {
+ offset = m; // key < a[base + m].
+ } else {
+ lastOfs = m + 1; // a[base + m] <= key.
+ }
}
+ // so a[base + offset - 1] <= key < a[base + offset].
+ assert(lastOfs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
}
// Copies a single element inside the array/object (NOT the tempArray).
- macro CopyElement(
- context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
- elements: HeapObject, from: Smi, to: Smi)
+ macro CopyElement(implicit context: Context, sortState: SortState)(
+ load: LoadFn, store: StoreFn, from: Smi, to: Smi)
labels Bailout {
- const element: Object = CallLoad(context, sortState, load, elements, from)
- otherwise Bailout;
- CallStore(context, sortState, store, elements, to, element)
- otherwise Bailout;
+ const element = CallLoad(load, from) otherwise Bailout;
+ CallStore(store, to, element) otherwise Bailout;
}
// Merge the lengthA elements starting at baseA with the lengthB elements
@@ -1162,9 +921,8 @@ namespace array {
// array[baseB] < array[baseA],
// that array[baseA + lengthA - 1] belongs at the end of the merge,
// and should have lengthA <= lengthB.
- transitioning macro MergeLow(
- context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
- baseB: Smi, lengthBArg: Smi)
+ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi)
labels Bailout {
assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
@@ -1173,27 +931,21 @@ namespace array {
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
- let elements: HeapObject = ReloadElements(sortState);
- const load: LoadFn = GetLoadFn(sortState);
- const store: StoreFn = GetStoreFn(sortState);
-
+ const workArray = sortState.workArray;
const tempArray: FixedArray = GetTempArray(sortState, lengthA);
- CopyToTempArray(
- context, sortState, load, elements, baseA, tempArray, 0, lengthA)
- otherwise Bailout;
+ Copy(workArray, baseA, tempArray, 0, lengthA);
let dest: Smi = baseA;
let cursorTemp: Smi = 0;
let cursorB: Smi = baseB;
- CopyElement(context, sortState, load, store, elements, cursorB++, dest++)
- otherwise Bailout;
+ workArray.objects[dest++] = workArray.objects[cursorB++];
try {
if (--lengthB == 0) goto Succeed;
if (lengthA == 1) goto CopyB;
- let minGallop: Smi = UnsafeCast<Smi>(sortState[kMinGallopIdx]);
+ let minGallop: Smi = sortState.minGallop;
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
@@ -1207,21 +959,12 @@ namespace array {
while (Int32TrueConstant()) {
assert(lengthA > 1 && lengthB > 0);
- let elementB: Object =
- CallLoad(context, sortState, load, elements, cursorB)
- otherwise Bailout;
- let order: Number =
- CallCompareFn(context, sortState, elementB, tempArray[cursorTemp])
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ let order = sortState.Compare(
+ workArray.objects[cursorB], tempArray.objects[cursorTemp]);
if (order < 0) {
- CopyElement(
- context, sortState, load, store, elements, cursorB, dest)
- otherwise Bailout;
+ workArray.objects[dest++] = workArray.objects[cursorB++];
- ++cursorB;
- ++dest;
++nofWinsB;
--lengthB;
nofWinsA = 0;
@@ -1229,13 +972,8 @@ namespace array {
if (lengthB == 0) goto Succeed;
if (nofWinsB >= minGallop) break;
} else {
- CallStore(
- context, sortState, store, elements, dest,
- tempArray[cursorTemp])
- otherwise Bailout;
+ workArray.objects[dest++] = tempArray.objects[cursorTemp++];
- ++cursorTemp;
- ++dest;
++nofWinsA;
--lengthA;
nofWinsB = 0;
@@ -1245,9 +983,9 @@ namespace array {
}
}
- // One run is winning so consistently that galloping may be a huge win.
- // So try that, and continue galloping until (if ever) neither run
- // appears to be winning consistently anymore.
+ // One run is winning so consistently that galloping may be a huge
+ // win. So try that, and continue galloping until (if ever) neither
+ // run appears to be winning consistently anymore.
++minGallop;
let firstIteration: bool = true;
while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
@@ -1256,21 +994,14 @@ namespace array {
assert(lengthA > 1 && lengthB > 0);
minGallop = SmiMax(1, minGallop - 1);
- sortState[kMinGallopIdx] = minGallop;
-
- let keyRight: Object =
- CallLoad(context, sortState, load, elements, cursorB)
- otherwise Bailout;
- nofWinsA = CallGallopRight(
- context, sortState, Load<TempArrayElements>, keyRight, cursorTemp,
- lengthA, 0, True) otherwise Bailout;
- elements = ReloadElements(sortState);
+ sortState.minGallop = minGallop;
+
+ nofWinsA = GallopRight(
+ tempArray, workArray.objects[cursorB], cursorTemp, lengthA, 0);
assert(nofWinsA >= 0);
if (nofWinsA > 0) {
- CallCopyFromTempArray(
- context, sortState, elements, dest, tempArray, cursorTemp,
- nofWinsA) otherwise Bailout;
+ Copy(tempArray, cursorTemp, workArray, dest, nofWinsA);
dest = dest + nofWinsA;
cursorTemp = cursorTemp + nofWinsA;
lengthA = lengthA - nofWinsA;
@@ -1281,21 +1012,14 @@ namespace array {
// consistent, but we can't assume that it is.
if (lengthA == 0) goto Succeed;
}
- CopyElement(
- context, sortState, load, store, elements, cursorB++, dest++)
- otherwise Bailout;
+ workArray.objects[dest++] = workArray.objects[cursorB++];
if (--lengthB == 0) goto Succeed;
- nofWinsB = CallGallopLeft(
- context, sortState, load, tempArray[cursorTemp], cursorB, lengthB,
- 0, False)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ nofWinsB = GallopLeft(
+ workArray, tempArray.objects[cursorTemp], cursorB, lengthB, 0);
assert(nofWinsB >= 0);
if (nofWinsB > 0) {
- CallCopyWithinSortArray(
- context, sortState, elements, cursorB, dest, nofWinsB)
- otherwise Bailout;
+ Copy(workArray, cursorB, workArray, dest, nofWinsB);
dest = dest + nofWinsB;
cursorB = cursorB + nofWinsB;
@@ -1303,33 +1027,23 @@ namespace array {
if (lengthB == 0) goto Succeed;
}
- CallStore(
- context, sortState, store, elements, dest++,
- tempArray[cursorTemp++])
- otherwise Bailout;
+ workArray.objects[dest++] = tempArray.objects[cursorTemp++];
if (--lengthA == 1) goto CopyB;
}
++minGallop; // Penalize it for leaving galloping mode
- sortState[kMinGallopIdx] = minGallop;
+ sortState.minGallop = minGallop;
}
}
label Succeed {
if (lengthA > 0) {
- CallCopyFromTempArray(
- context, sortState, elements, dest, tempArray, cursorTemp, lengthA)
- otherwise Bailout;
+ Copy(tempArray, cursorTemp, workArray, dest, lengthA);
}
}
label CopyB {
assert(lengthA == 1 && lengthB > 0);
// The last element of run A belongs at the end of the merge.
- CallCopyWithinSortArray(
- context, sortState, elements, cursorB, dest, lengthB)
- otherwise Bailout;
- CallStore(
- context, sortState, store, elements, dest + lengthB,
- tempArray[cursorTemp])
- otherwise Bailout;
+ Copy(workArray, cursorB, workArray, dest, lengthB);
+ workArray.objects[dest + lengthB] = tempArray.objects[cursorTemp];
}
}
@@ -1338,8 +1052,9 @@ namespace array {
// be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
// end of the merge and should have lengthA >= lengthB.
transitioning macro MergeHigh(
- context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
- baseB: Smi, lengthBArg: Smi)
+ implicit context: Context,
+ sortState:
+ SortState)(baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi)
labels Bailout {
assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
@@ -1348,28 +1063,22 @@ namespace array {
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
- let elements: HeapObject = ReloadElements(sortState);
- const load: LoadFn = GetLoadFn(sortState);
- const store: StoreFn = GetStoreFn(sortState);
-
+ const workArray = sortState.workArray;
const tempArray: FixedArray = GetTempArray(sortState, lengthB);
- CopyToTempArray(
- context, sortState, load, elements, baseB, tempArray, 0, lengthB)
- otherwise Bailout;
+ Copy(workArray, baseB, tempArray, 0, lengthB);
// MergeHigh merges the two runs backwards.
let dest: Smi = baseB + lengthB - 1;
let cursorTemp: Smi = lengthB - 1;
let cursorA: Smi = baseA + lengthA - 1;
- CopyElement(context, sortState, load, store, elements, cursorA--, dest--)
- otherwise Bailout;
+ workArray.objects[dest--] = workArray.objects[cursorA--];
try {
if (--lengthA == 0) goto Succeed;
if (lengthB == 1) goto CopyA;
- let minGallop: Smi = UnsafeCast<Smi>(sortState[kMinGallopIdx]);
+ let minGallop: Smi = sortState.minGallop;
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
@@ -1383,21 +1092,12 @@ namespace array {
while (Int32TrueConstant()) {
assert(lengthA > 0 && lengthB > 1);
- let elementA: Object =
- CallLoad(context, sortState, load, elements, cursorA)
- otherwise Bailout;
- let order: Number =
- CallCompareFn(context, sortState, tempArray[cursorTemp], elementA)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ let order = sortState.Compare(
+ tempArray.objects[cursorTemp], workArray.objects[cursorA]);
if (order < 0) {
- CopyElement(
- context, sortState, load, store, elements, cursorA, dest)
- otherwise Bailout;
+ workArray.objects[dest--] = workArray.objects[cursorA--];
- --cursorA;
- --dest;
++nofWinsA;
--lengthA;
nofWinsB = 0;
@@ -1405,13 +1105,8 @@ namespace array {
if (lengthA == 0) goto Succeed;
if (nofWinsA >= minGallop) break;
} else {
- CallStore(
- context, sortState, store, elements, dest,
- tempArray[cursorTemp])
- otherwise Bailout;
+ workArray.objects[dest--] = tempArray.objects[cursorTemp--];
- --cursorTemp;
- --dest;
++nofWinsB;
--lengthB;
nofWinsA = 0;
@@ -1421,9 +1116,9 @@ namespace array {
}
}
- // One run is winning so consistently that galloping may be a huge win.
- // So try that, and continue galloping until (if ever) neither run
- // appears to be winning consistently anymore.
+ // One run is winning so consistently that galloping may be a huge
+ // win. So try that, and continue galloping until (if ever) neither
+ // run appears to be winning consistently anymore.
++minGallop;
let firstIteration: bool = true;
while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
@@ -1433,48 +1128,34 @@ namespace array {
assert(lengthA > 0 && lengthB > 1);
minGallop = SmiMax(1, minGallop - 1);
- sortState[kMinGallopIdx] = minGallop;
+ sortState.minGallop = minGallop;
- let k: Smi = CallGallopRight(
- context, sortState, load, tempArray[cursorTemp], baseA, lengthA,
- lengthA - 1, False)
- otherwise Bailout;
- elements = ReloadElements(sortState);
+ let k: Smi = GallopRight(
+ workArray, tempArray.objects[cursorTemp], baseA, lengthA,
+ lengthA - 1);
assert(k >= 0);
nofWinsA = lengthA - k;
if (nofWinsA > 0) {
dest = dest - nofWinsA;
cursorA = cursorA - nofWinsA;
- CallCopyWithinSortArray(
- context, sortState, elements, cursorA + 1, dest + 1, nofWinsA)
- otherwise Bailout;
+ Copy(workArray, cursorA + 1, workArray, dest + 1, nofWinsA);
lengthA = lengthA - nofWinsA;
if (lengthA == 0) goto Succeed;
}
- CallStore(
- context, sortState, store, elements, dest--,
- tempArray[cursorTemp--])
- otherwise Bailout;
+ workArray.objects[dest--] = tempArray.objects[cursorTemp--];
if (--lengthB == 1) goto CopyA;
- let key: Object =
- CallLoad(context, sortState, load, elements, cursorA)
- otherwise Bailout;
- k = CallGallopLeft(
- context, sortState, Load<TempArrayElements>, key, 0, lengthB,
- lengthB - 1, True) otherwise Bailout;
- elements = ReloadElements(sortState);
+ k = GallopLeft(
+ tempArray, workArray.objects[cursorA], 0, lengthB, lengthB - 1);
assert(k >= 0);
nofWinsB = lengthB - k;
if (nofWinsB > 0) {
dest = dest - nofWinsB;
cursorTemp = cursorTemp - nofWinsB;
- CallCopyFromTempArray(
- context, sortState, elements, dest + 1, tempArray,
- cursorTemp + 1, nofWinsB) otherwise Bailout;
+ Copy(tempArray, cursorTemp + 1, workArray, dest + 1, nofWinsB);
lengthB = lengthB - nofWinsB;
if (lengthB == 1) goto CopyA;
@@ -1483,21 +1164,17 @@ namespace array {
// consistent, but we can't assume that it is.
if (lengthB == 0) goto Succeed;
}
- CopyElement(
- context, sortState, load, store, elements, cursorA--, dest--)
- otherwise Bailout;
+ workArray.objects[dest--] = workArray.objects[cursorA--];
if (--lengthA == 0) goto Succeed;
}
++minGallop;
- sortState[kMinGallopIdx] = minGallop;
+ sortState.minGallop = minGallop;
}
}
label Succeed {
if (lengthB > 0) {
assert(lengthA == 0);
- CallCopyFromTempArray(
- context, sortState, elements, dest - (lengthB - 1), tempArray, 0,
- lengthB) otherwise Bailout;
+ Copy(tempArray, 0, workArray, dest - (lengthB - 1), lengthB);
}
}
label CopyA {
@@ -1506,17 +1183,13 @@ namespace array {
// The first element of run B belongs at the front of the merge.
dest = dest - lengthA;
cursorA = cursorA - lengthA;
- CallCopyWithinSortArray(
- context, sortState, elements, cursorA + 1, dest + 1, lengthA)
- otherwise Bailout;
- CallStore(
- context, sortState, store, elements, dest, tempArray[cursorTemp])
- otherwise Bailout;
+ Copy(workArray, cursorA + 1, workArray, dest + 1, lengthA);
+ workArray.objects[dest] = tempArray.objects[cursorTemp];
}
}
- // Compute a good value for the minimum run length; natural runs shorter than
- // this are boosted artificially via binary insertion sort.
+ // Compute a good value for the minimum run length; natural runs shorter
+ // than this are boosted artificially via binary insertion sort.
//
// If n < 64, return n (it's too small to bother with fancy stuff).
// Else if n is an exact power of 2, return 32.
@@ -1560,10 +1233,9 @@ namespace array {
// TODO(szuend): Remove unnecessary loads. This macro was refactored to
// improve readability, introducing unnecessary loads in the
// process. Determine if all these extra loads are ok.
- transitioning macro MergeCollapse(context: Context, sortState: FixedArray)
+ transitioning macro MergeCollapse(context: Context, sortState: SortState)
labels Bailout {
- const pendingRuns: FixedArray =
- UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
+ const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size because MergeAt might change it.
while (GetPendingRunsSize(sortState) > 1) {
@@ -1576,11 +1248,11 @@ namespace array {
--n;
}
- CallMergeAt(context, sortState, n) otherwise Bailout;
+ MergeAt(n);
} else if (
GetPendingRunLength(pendingRuns, n) <=
GetPendingRunLength(pendingRuns, n + 1)) {
- CallMergeAt(context, sortState, n) otherwise Bailout;
+ MergeAt(n);
} else {
break;
}
@@ -1590,10 +1262,9 @@ namespace array {
// Regardless of invariants, merge all runs on the stack until only one
// remains. This is used at the end of the mergesort.
transitioning macro
- MergeForceCollapse(context: Context, sortState: FixedArray)
+ MergeForceCollapse(context: Context, sortState: SortState)
labels Bailout {
- let pendingRuns: FixedArray =
- UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
+ let pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size becuase MergeAt might change it.
while (GetPendingRunsSize(sortState) > 1) {
@@ -1604,40 +1275,13 @@ namespace array {
GetPendingRunLength(pendingRuns, n + 1)) {
--n;
}
- CallMergeAt(context, sortState, n) otherwise Bailout;
+ MergeAt(n);
}
}
- macro InitializeSortState(sortState: FixedArray) {
- sortState[kMinGallopIdx] = SmiConstant(kMinGallopWins);
- sortState[kTempArraySizeIdx] = SmiConstant(0);
-
- SetPendingRunsSize(sortState, 0);
- let pendingRuns: FixedArray =
- AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending));
- sortState[kPendingRunsIdx] = pendingRuns;
- }
-
- macro InitializeSortStateAccessor<Accessor: type>(sortState: FixedArray) {
- sortState[kAccessorIdx] = kFastElementsAccessorId;
- sortState[kLoadFnIdx] = Load<Accessor>;
- sortState[kStoreFnIdx] = Store<Accessor>;
- sortState[kCanUseSameAccessorFnIdx] = CanUseSameAccessor<Accessor>;
- }
-
- InitializeSortStateAccessor<GenericElementsAccessor>(sortState: FixedArray) {
- sortState[kAccessorIdx] = kGenericElementsAccessorId;
- sortState[kLoadFnIdx] = Load<GenericElementsAccessor>;
- sortState[kStoreFnIdx] = Store<GenericElementsAccessor>;
- sortState[kCanUseSameAccessorFnIdx] =
- CanUseSameAccessor<GenericElementsAccessor>;
- }
-
transitioning macro
- ArrayTimSortImpl(context: Context, sortState: FixedArray, length: Smi)
+ ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi)
labels Bailout {
- InitializeSortState(sortState);
-
if (length < 2) return;
let remaining: Smi = length;
@@ -1646,9 +1290,7 @@ namespace array {
let low: Smi = 0;
const minRunLength: Smi = ComputeMinRunLength(remaining);
while (remaining != 0) {
- let currentRunLength: Smi =
- CountAndMakeRun(context, sortState, low, low + remaining)
- otherwise Bailout;
+ let currentRunLength: Smi = CountAndMakeRun(low, low + remaining);
// If the run is short, extend it to min(minRunLength, remaining).
if (currentRunLength < minRunLength) {
@@ -1672,26 +1314,59 @@ namespace array {
MergeForceCollapse(context, sortState) otherwise Bailout;
assert(GetPendingRunsSize(sortState) == 1);
- assert(
- GetPendingRunLength(
- UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]), 0) == length);
+ assert(GetPendingRunLength(sortState.pendingRuns, 0) == length);
+ }
+
+ transitioning macro
+ CopyReceiverElementsToWorkArray(
+ implicit context: Context, sortState: SortState)(length: Smi)
+ labels Bailout {
+ // TODO(szuend): Investigate if we can use COW arrays or a memcpy + range
+ // barrier to speed this step up.
+ const loadFn = sortState.loadFn;
+ const workArray = sortState.workArray;
+
+ for (let i: Smi = 0; i < length; ++i) {
+ workArray.objects[i] = CallLoad(loadFn, i) otherwise Bailout;
+ }
+ }
+
+ transitioning macro
+ CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
+ length: Smi)
+ labels Bailout {
+ // TODO(szuend): Build fast-path that simply installs the work array as the
+ // new backing store where applicable.
+ const storeFn = sortState.storeFn;
+ const workArray = sortState.workArray;
+
+ for (let i: Smi = 0; i < length; ++i) {
+ CallStore(storeFn, i, workArray.objects[i]) otherwise Bailout;
+ }
}
transitioning builtin
- ArrayTimSort(context: Context, sortState: FixedArray, length: Smi): Object {
+ ArrayTimSort(context: Context, sortState: SortState, length: Smi): Object {
try {
- ArrayTimSortImpl(context, sortState, length)
- otherwise Slow;
+ CopyReceiverElementsToWorkArray(length) otherwise Slow;
+ ArrayTimSortImpl(context, sortState, length) otherwise Slow;
+
+ // The comparison function or toString might have changed the
+ // receiver, if that is the case, we switch to the slow path.
+ // TODO(szuend): Introduce "special" slow path that only copies,
+ // but skips the whole re-sorting.
+ sortState.CheckAccessor() otherwise Slow;
+ CopyWorkArrayToReceiver(length) otherwise Slow;
}
label Slow {
- if (sortState[kAccessorIdx] == kGenericElementsAccessorId) {
+ if (sortState.accessor == kGenericElementsAccessorId) {
// We were already on the slow path. This must not happen.
unreachable;
}
- sortState[kBailoutStatusIdx] = kSuccess;
-
- InitializeSortStateAccessor<GenericElementsAccessor>(sortState);
- ArrayTimSort(context, sortState, length);
+ const newSortState: SortState = NewSortState(
+ sortState.receiver, sortState.userCmpFn,
+ sortState.initialReceiverLength, sortState.workArray.length, true);
+ ArrayTimSort(context, newSortState, length);
}
return kSuccess;
}
@@ -1716,21 +1391,12 @@ namespace array {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
const comparefnObj: Object = arguments[0];
- if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
- ThrowTypeError(context, kBadSortComparisonFunction, comparefnObj);
- }
+ const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise
+ ThrowTypeError(kBadSortComparisonFunction, comparefnObj);
// 2. Let obj be ? ToObject(this value).
const obj: JSReceiver = ToObject(context, receiver);
- const sortState: FixedArray = AllocateZeroedFixedArray(kSortStateSize);
-
- sortState[kReceiverIdx] = obj;
- sortState[kUserCmpFnIdx] = comparefnObj;
- sortState[kSortComparePtrIdx] =
- comparefnObj != Undefined ? SortCompareUserFn : SortCompareDefault;
- sortState[kBailoutStatusIdx] = kSuccess;
-
// 3. Let len be ? ToLength(? Get(obj, "length")).
const len: Number = GetLengthProperty(obj);
@@ -1741,32 +1407,11 @@ namespace array {
const nofNonUndefined: Smi = PrepareElementsForSort(context, obj, len);
assert(nofNonUndefined <= len);
- let map: Map = obj.map;
- sortState[kInitialReceiverMapIdx] = map;
- sortState[kInitialReceiverLengthIdx] = len;
-
- try {
- let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ if (nofNonUndefined < 2) return receiver;
- const elementsKind: ElementsKind = map.elements_kind;
- if (IsDoubleElementsKind(elementsKind)) {
- InitializeSortStateAccessor<FastDoubleElements>(sortState);
- } else if (elementsKind == PACKED_SMI_ELEMENTS) {
- InitializeSortStateAccessor<FastPackedSmiElements>(sortState);
- } else {
- InitializeSortStateAccessor<FastSmiOrObjectElements>(sortState);
- }
- ArrayTimSort(context, sortState, nofNonUndefined);
- }
- label Slow {
- if (map.elements_kind == DICTIONARY_ELEMENTS && IsExtensibleMap(map) &&
- !IsCustomElementsReceiverInstanceType(map.instance_type)) {
- InitializeSortStateAccessor<DictionaryElements>(sortState);
- } else {
- InitializeSortStateAccessor<GenericElementsAccessor>(sortState);
- }
- ArrayTimSort(context, sortState, nofNonUndefined);
- }
+ const sortState: SortState =
+ NewSortState(obj, comparefn, len, nofNonUndefined, false);
+ ArrayTimSort(context, sortState, nofNonUndefined);
return receiver;
}
diff --git a/deps/v8/tools/OWNERS b/deps/v8/tools/OWNERS
index 0e56af3129..85f514c4ab 100644
--- a/deps/v8/tools/OWNERS
+++ b/deps/v8/tools/OWNERS
@@ -1 +1,3 @@
machenbach@chromium.org
+sergiyb@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/tools/PRESUBMIT.py b/deps/v8/tools/PRESUBMIT.py
index f719c75eed..0445f5a3a3 100644
--- a/deps/v8/tools/PRESUBMIT.py
+++ b/deps/v8/tools/PRESUBMIT.py
@@ -4,5 +4,5 @@
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, 'unittests')
+ input_api, output_api, 'unittests', whitelist=[r'.+_test\.py$'])
return input_api.RunTests(tests)
diff --git a/deps/v8/tools/android-run.py b/deps/v8/tools/android-run.py
index 4765f86b4c..66d333a25b 100755
--- a/deps/v8/tools/android-run.py
+++ b/deps/v8/tools/android-run.py
@@ -35,6 +35,9 @@
# and output special error string in case of non-zero exit code.
# Then we parse the output of 'adb shell' and look for that error string.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
from os.path import join, dirname, abspath
import subprocess
@@ -58,8 +61,8 @@ def Execute(cmdline):
exit_code = process.wait()
os.close(fd_out)
os.close(fd_err)
- output = file(outname).read()
- errors = file(errname).read()
+ output = open(outname).read()
+ errors = open(errname).read()
os.unlink(outname)
os.unlink(errname)
sys.stdout.write(output)
diff --git a/deps/v8/tools/avg.py b/deps/v8/tools/avg.py
index b9ceb0d907..5741acd388 100755
--- a/deps/v8/tools/avg.py
+++ b/deps/v8/tools/avg.py
@@ -1,4 +1,5 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
@@ -22,50 +23,53 @@ will output
[10/10] [default] : avg 22,885.80 stddev 1,941.80 ( 17,584.00 - 24,266.00) Kps
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
-import subprocess
+import math
import re
-import numpy
-import time
-import sys
import signal
+import subprocess
+import sys
-parser = argparse.ArgumentParser(
+PARSER = argparse.ArgumentParser(
description="A script that averages numbers from another script's output",
epilog="Example:\n\tavg.py 10 bash -c \"echo A: 100; echo B 120; sleep .1\""
)
-parser.add_argument(
+PARSER.add_argument(
'repetitions',
type=int,
help="number of times the command should be repeated")
-parser.add_argument(
+PARSER.add_argument(
'command',
nargs=argparse.REMAINDER,
help="command to run (no quotes needed)")
-parser.add_argument(
+PARSER.add_argument(
'--echo',
'-e',
action='store_true',
default=False,
help="set this flag to echo the command's output")
-args = vars(parser.parse_args())
+ARGS = vars(PARSER.parse_args())
-if (len(args['command']) == 0):
+if not ARGS['command']:
print("No command provided.")
exit(1)
class FieldWidth:
- def __init__(self, key=0, average=0, stddev=0, min=0, max=0):
- self.w = dict(key=key, average=average, stddev=stddev, min=min, max=max)
+ def __init__(self, points=0, key=0, average=0, stddev=0, min_width=0, max_width=0):
+ self.widths = dict(points=points, key=key, average=average, stddev=stddev,
+ min=min_width, max=max_width)
- def max_with(self, w2):
- self.w = {k: max(v, w2.w[k]) for k, v in self.w.items()}
+ def max_widths(self, other):
+ self.widths = {k: max(v, other.widths[k]) for k, v in self.widths.items()}
def __getattr__(self, key):
- return self.w[key]
+ return self.widths[key]
def fmtS(string, width=0):
@@ -76,6 +80,27 @@ def fmtN(num, width=0):
return "{0:>{1},.2f}".format(num, width)
+def fmt(num):
+ return "{0:>,.2f}".format(num)
+
+
+def format_line(points, key, average, stddev, min_value, max_value,
+ unit_string, widths):
+ return "{:>{}}; {:<{}}; {:>{}}; {:>{}}; {:>{}}; {:>{}}; {}".format(
+ points, widths.points,
+ key, widths.key,
+ average, widths.average,
+ stddev, widths.stddev,
+ min_value, widths.min,
+ max_value, widths.max,
+ unit_string)
+
+
+def fmt_reps(msrmnt):
+ rep_string = str(ARGS['repetitions'])
+ return "[{0:>{1}}/{2}]".format(msrmnt.size(), len(rep_string), rep_string)
+
+
class Measurement:
def __init__(self, key, unit):
@@ -102,14 +127,21 @@ class Measurement:
except ValueError:
print("Ignoring non-numeric value", value)
- def status(self, w):
- return "{}: avg {} stddev {} ({} - {}) {}".format(
- fmtS(self.key, w.key), fmtN(self.average, w.average),
- fmtN(self.stddev(), w.stddev), fmtN(self.min, w.min),
- fmtN(self.max, w.max), fmtS(self.unit_string()))
+ def status(self, widths):
+ return "{} {}: avg {} stddev {} ({} - {}) {}".format(
+ fmt_reps(self),
+ fmtS(self.key, widths.key), fmtN(self.average, widths.average),
+ fmtN(self.stddev(), widths.stddev), fmtN(self.min, widths.min),
+ fmtN(self.max, widths.max), fmtS(self.unit_string()))
+
+ def result(self, widths):
+ return format_line(self.size(), self.key, fmt(self.average),
+ fmt(self.stddev()), fmt(self.min),
+ fmt(self.max), self.unit_string(),
+ widths)
def unit_string(self):
- if self.unit == None:
+ if not self.unit:
return ""
return self.unit
@@ -119,21 +151,24 @@ class Measurement:
return self.M2 / (self.count - 1)
def stddev(self):
- return numpy.sqrt(self.variance())
+ return math.sqrt(self.variance())
def size(self):
return len(self.values)
def widths(self):
return FieldWidth(
- key=len(fmtS(self.key)),
- average=len(fmtN(self.average)),
- stddev=len(fmtN(self.stddev())),
- min=len(fmtN(self.min)),
- max=len(fmtN(self.max)))
+ points=len("{}".format(self.size())) + 2,
+ key=len(self.key),
+ average=len(fmt(self.average)),
+ stddev=len(fmt(self.stddev())),
+ min_width=len(fmt(self.min)),
+ max_width=len(fmt(self.max)))
-rep_string = str(args['repetitions'])
+def result_header(widths):
+ return format_line("#/{}".format(ARGS['repetitions']),
+ "id", "avg", "stddev", "min", "max", "unit", widths)
class Measurements:
@@ -141,70 +176,73 @@ class Measurements:
def __init__(self):
self.all = {}
self.default_key = '[default]'
- self.max_widths = FieldWidth()
+ self.max_widths = FieldWidth(
+ points=len("{}".format(ARGS['repetitions'])) + 2,
+ key=len("id"),
+ average=len("avg"),
+ stddev=len("stddev"),
+ min_width=len("min"),
+ max_width=len("max"))
+ self.last_status_len = 0
def record(self, key, value, unit):
- if (key == None):
+ if not key:
key = self.default_key
if key not in self.all:
self.all[key] = Measurement(key, unit)
self.all[key].addValue(value)
- self.max_widths.max_with(self.all[key].widths())
+ self.max_widths.max_widths(self.all[key].widths())
def any(self):
- if len(self.all) >= 1:
+ if self.all:
return next(iter(self.all.values()))
- else:
- return None
+ return None
- def format_status(self):
- m = self.any()
- if m == None:
- return ""
- return m.status(self.max_widths)
-
- def format_num(self, m):
- return "[{0:>{1}}/{2}]".format(m.size(), len(rep_string), rep_string)
+ def print_results(self):
+ print("{:<{}}".format("", self.last_status_len), end="\r")
+ print(result_header(self.max_widths), sep=" ")
+ for key in sorted(self.all):
+ print(self.all[key].result(self.max_widths), sep=" ")
def print_status(self):
- if len(self.all) == 0:
- print("No results found. Check format?")
- return
- print(self.format_num(self.any()), self.format_status(), sep=" ", end="")
-
- def print_results(self):
- for key in self.all:
- m = self.all[key]
- print(self.format_num(m), m.status(self.max_widths), sep=" ")
+ status = "No results found. Check format?"
+ measurement = MEASUREMENTS.any()
+ if measurement:
+ status = measurement.status(MEASUREMENTS.max_widths)
+ print("{:<{}}".format(status, self.last_status_len), end="\r")
+ self.last_status_len = len(status)
-measurements = Measurements()
+MEASUREMENTS = Measurements()
-def signal_handler(signal, frame):
+def signal_handler(signum, frame):
print("", end="\r")
- measurements.print_status()
- print()
- measurements.print_results()
+ MEASUREMENTS.print_results()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
-for x in range(0, args['repetitions']):
- proc = subprocess.Popen(args['command'], stdout=subprocess.PIPE)
+SCORE_REGEX = (r'\A((console.timeEnd: )?'
+ r'(?P<key>[^\s:,]+)[,:]?)?'
+ r'(^\s*|\s+)'
+ r'(?P<value>[0-9]+(.[0-9]+)?)'
+ r'\ ?(?P<unit>[^\d\W]\w*)?[.\s]*\Z')
+
+for x in range(0, ARGS['repetitions']):
+ proc = subprocess.Popen(ARGS['command'], stdout=subprocess.PIPE)
for line in proc.stdout:
- if args['echo']:
+ if ARGS['echo']:
print(line.decode(), end="")
- for m in re.finditer(
- r'\A((?P<key>.*[^\s\d:]+)[:]?)?\s*(?P<value>[0-9]+(.[0-9]+)?)\ ?(?P<unit>[^\d\W]\w*)?\s*\Z',
- line.decode()):
- measurements.record(m.group('key'), m.group('value'), m.group('unit'))
+ for m in re.finditer(SCORE_REGEX, line.decode()):
+ MEASUREMENTS.record(m.group('key'), m.group('value'), m.group('unit'))
proc.wait()
if proc.returncode != 0:
print("Child exited with status %d" % proc.returncode)
break
- measurements.print_status()
- print("", end="\r")
-measurements.print_results()
+ MEASUREMENTS.print_status()
+
+# Print final results
+MEASUREMENTS.print_results()
diff --git a/deps/v8/tools/bigint-tester.py b/deps/v8/tools/bigint-tester.py
index 2deab883fa..0940369ac5 100755
--- a/deps/v8/tools/bigint-tester.py
+++ b/deps/v8/tools/bigint-tester.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import math
import multiprocessing
@@ -294,7 +297,7 @@ def WrapRunOne(args):
return RunOne(*args)
def RunAll(args):
for op in args.op:
- for r in xrange(args.runs):
+ for r in range(args.runs):
yield (op, args.num_inputs, args.binary)
def Main():
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 709aade30f..70db89b5da 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -17,6 +17,9 @@ Commands:
For each command, you can try ./runtime-call-stats.py help command.
'''
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import json
import os
@@ -46,7 +49,7 @@ def print_command(cmd_args):
elif ' ' in arg:
arg = "'{}'".format(arg)
return arg
- print " ".join(map(fix_for_printing, cmd_args))
+ print(" ".join(map(fix_for_printing, cmd_args)))
def start_replay_server(args, sites, discard_output=True):
@@ -66,15 +69,15 @@ def start_replay_server(args, sites, discard_output=True):
"--inject_scripts=deterministic.js,{}".format(injection),
args.replay_wpr,
]
- print "=" * 80
+ print("=" * 80)
print_command(cmd_args)
if discard_output:
with open(os.devnull, 'w') as null:
server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
else:
server = subprocess.Popen(cmd_args)
- print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid)
- print "=" * 80
+ print("RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid))
+ print("=" * 80)
return {'process': server, 'injection': injection}
@@ -85,7 +88,7 @@ def stop_replay_server(server):
def generate_injection(f, sites, refreshes=0):
- print >> f, """\
+ print("""\
(function() {
var s = window.sessionStorage.getItem("refreshCounter");
var refreshTotal = """, refreshes, """;
@@ -127,7 +130,7 @@ def generate_injection(f, sites, refreshes=0):
var sites =
""", json.dumps(sites), """;
onLoad(window.location.href);
-})();"""
+})();""", file=f)
def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
return [
@@ -161,9 +164,9 @@ def get_chrome_replay_flags(args, arg_delimiter=""):
]
def run_site(site, domain, args, timeout=None):
- print "="*80
- print "RUNNING DOMAIN %s" % domain
- print "="*80
+ print("="*80)
+ print("RUNNING DOMAIN %s" % domain)
+ print("="*80)
result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt"
count = 0
if timeout is None: timeout = args.timeout
@@ -196,9 +199,9 @@ def run_site(site, domain, args, timeout=None):
"timeout", str(timeout),
args.with_chrome
] + chrome_flags + [ site ]
- print "- " * 40
+ print("- " * 40)
print_command(cmd_args)
- print "- " * 40
+ print("- " * 40)
with open(result, "wt") as f:
with open(args.log_stderr or os.devnull, 'at') as err:
status = subprocess.call(cmd_args, stdout=f, stderr=err)
@@ -212,8 +215,8 @@ def run_site(site, domain, args, timeout=None):
if os.path.isfile(result) and os.path.getsize(result) > 0:
if args.print_url:
with open(result, "at") as f:
- print >> f
- print >> f, "URL: {}".format(site)
+ print(file=f)
+ print("URL: {}".format(site), file=f)
retries_since_good_run = 0
break
if retries_since_good_run > MAX_NOF_RETRIES:
@@ -294,7 +297,7 @@ def do_run(args):
# Run them.
for site, domain, count, timeout in L:
if count is not None: domain = "{}%{}".format(domain, count)
- print(site, domain, timeout)
+ print((site, domain, timeout))
run_site(site, domain, args, timeout)
finally:
if replay_server:
@@ -459,11 +462,11 @@ def print_stats(S, args):
def stats(s, units=""):
conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc'])
return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf)
- print "{:>50s} {} {}".format(
+ print("{:>50s} {} {}".format(
key,
stats(value['time_stat'], units="ms"),
stats(value['count_stat'])
- )
+ ))
# Print and calculate partial sums, if necessary.
for i in range(low, high):
print_entry(*L[i])
@@ -479,7 +482,7 @@ def print_stats(S, args):
partial['count_list'][j] += v
# Print totals, if necessary.
if args.totals:
- print '-' * 80
+ print('-' * 80)
if args.limit != 0 and not args.aggregate:
partial['time_stat'] = statistics(partial['time_list'])
partial['count_stat'] = statistics(partial['count_list'])
@@ -500,9 +503,9 @@ def do_stats(args):
create_total_page_stats(domains, args)
for i, domain in enumerate(sorted(domains)):
if len(domains) > 1:
- if i > 0: print
- print "{}:".format(domain)
- print '=' * 80
+ if i > 0: print()
+ print("{}:".format(domain))
+ print('=' * 80)
domain_stats = domains[domain]
for key in domain_stats:
domain_stats[key]['time_stat'] = \
@@ -575,7 +578,7 @@ def do_json(args):
entry.append(round(s['ci']['perc'], 2))
stats.append(entry)
domains[domain] = stats
- print json.dumps(versions, separators=(',', ':'))
+ print(json.dumps(versions, separators=(',', ':')))
# Help.
diff --git a/deps/v8/tools/clusterfuzz/OWNERS b/deps/v8/tools/clusterfuzz/OWNERS
new file mode 100644
index 0000000000..c8693c972c
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+machenbach@chromium.org
+sergiyb@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
new file mode 100644
index 0000000000..d37e3dbf1b
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -0,0 +1,46 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_turbo
+# V8 correctness sources: sanity check failed
+# V8 correctness suppression:
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_turbo
+#
+# Flags of x64,ignition:
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+# Flags of x64,ignition_turbo:
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+#
+# Difference:
+- unknown
++ not unknown
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+ weird error
+ ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_turbo:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+ weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_turbo
diff --git a/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
index 15a93fa535..4a3d008077 100644
--- a/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-print """
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
1
v8-foozzie source: name/to/a/file.js
2
@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
unknown
-"""
+""")
diff --git a/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
index f2bdacfaa1..824b222485 100644
--- a/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-print """
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
1
v8-foozzie source: name/to/a/file.js
2
@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
unknown
-"""
+""")
diff --git a/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
index a6c8682b2f..0b19a3fc90 100644
--- a/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
@@ -2,7 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-print """
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
1
v8-foozzie source: name/to/a/file.js
2
@@ -11,4 +14,4 @@ v8-foozzie source: name/to/file.js
^
3
not unknown
-"""
+""")
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 6fcd49f209..563c4643ae 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -7,6 +7,9 @@
V8 correctness fuzzer launcher script.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import hashlib
import itertools
@@ -51,6 +54,9 @@ CONFIGS = dict(
'--no-lazy',
'--no-lazy-inner-functions',
],
+ jitless=[
+ '--jitless',
+ ],
slow_path=[
'--force-slow-path',
],
@@ -75,6 +81,11 @@ ADDITIONAL_FLAGS = [
(0.1, '--stress-compaction-random'),
(0.1, '--random-gc-interval=2000'),
(0.2, '--noanalyze-environment-liveness'),
+ (0.1, '--stress-delay-tasks'),
+ (0.01, '--thread-pool-size=1'),
+ (0.01, '--thread-pool-size=2'),
+ (0.01, '--thread-pool-size=4'),
+ (0.01, '--thread-pool-size=8'),
]
# Timeout in seconds for one d8 run.
@@ -90,6 +101,7 @@ PREAMBLE = [
os.path.join(BASE_PATH, 'v8_suppressions.js'),
]
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
+SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
FLAGS = ['--abort_on_stack_or_string_length_overflow', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
@@ -118,10 +130,7 @@ FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
%(second_config_flags)s
#
# Difference:
-%(difference)s
-#
-# Source file:
-%(source)s
+%(difference)s%(source_file_text)s
#
### Start of configuration %(first_config_label)s:
%(first_config_output)s
@@ -132,6 +141,12 @@ FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
### End of configuration %(second_config_label)s
"""
+SOURCE_FILE_TEMPLATE = """
+#
+# Source file:
+%s"""
+
+
FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
@@ -168,6 +183,9 @@ def parse_args():
parser.add_argument(
'--second-d8',
help='optional path to second d8 executable, default: same as first')
+ parser.add_argument(
+ '--skip-sanity-checks', default=False, action='store_true',
+ help='skip sanity checks for testing purposes')
parser.add_argument('testcase', help='path to test case')
options = parser.parse_args()
@@ -219,8 +237,8 @@ def content_bailout(content, ignore_fun):
"""Print failure state and return if ignore_fun matches content."""
bug = (ignore_fun(content) or '').strip()
if bug:
- print FAILURE_HEADER_TEMPLATE % dict(
- configs='', source_key='', suppression=bug)
+ print(FAILURE_HEADER_TEMPLATE % dict(
+ configs='', source_key='', suppression=bug))
return True
return False
@@ -230,10 +248,10 @@ def pass_bailout(output, step_number):
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
- print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
+ print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number)
return True
if output.HasCrashed():
- print '# V8 correctness - C-R-A-S-H %d' % step_number
+ print('# V8 correctness - C-R-A-S-H %d' % step_number)
return True
return False
@@ -242,12 +260,38 @@ def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
- print FAILURE_HEADER_TEMPLATE % dict(
- configs='', source_key='', suppression=bug)
+ print(FAILURE_HEADER_TEMPLATE % dict(
+ configs='', source_key='', suppression=bug))
return True
return False
+def print_difference(
+ options, source_key, first_config_flags, second_config_flags,
+ first_config_output, second_config_output, difference, source=None):
+ # The first three entries will be parsed by clusterfuzz. Format changes
+ # will require changes on the clusterfuzz side.
+ first_config_label = '%s,%s' % (options.first_arch, options.first_config)
+ second_config_label = '%s,%s' % (options.second_arch, options.second_config)
+ source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
+ print((FAILURE_TEMPLATE % dict(
+ configs='%s:%s' % (first_config_label, second_config_label),
+ source_file_text=source_file_text,
+ source_key=source_key,
+ suppression='', # We can't tie bugs to differences.
+ first_config_label=first_config_label,
+ second_config_label=second_config_label,
+ first_config_flags=' '.join(first_config_flags),
+ second_config_flags=' '.join(second_config_flags),
+ first_config_output=
+ first_config_output.stdout.decode('utf-8', 'replace'),
+ second_config_output=
+ second_config_output.stdout.decode('utf-8', 'replace'),
+ source=source,
+ difference=difference.decode('utf-8', 'replace'),
+ )).encode('utf-8', 'replace'))
+
+
def main():
options = parse_args()
rng = random.Random(options.random_seed)
@@ -276,28 +320,49 @@ def main():
if rng.random() < p:
second_config_flags.append(flag)
- def run_d8(d8, config_flags):
+ def run_d8(d8, config_flags, config_label=None, testcase=options.testcase):
preamble = PREAMBLE[:]
if options.first_arch != options.second_arch:
preamble.append(ARCH_MOCKS)
- args = [d8] + config_flags + preamble + [options.testcase]
- print " ".join(args)
+ args = [d8] + config_flags + preamble + [testcase]
+ if config_label:
+ print('# Command line for %s comparison:' % config_label)
+ print(' '.join(args))
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
return v8_commands.Execute(
args,
- cwd=os.path.dirname(os.path.abspath(options.testcase)),
+ cwd=os.path.dirname(os.path.abspath(testcase)),
timeout=TIMEOUT,
)
- first_config_output = run_d8(options.first_d8, first_config_flags)
+ # Sanity checks. Run both configurations with the sanity-checks file only and
+ # bail out early if different.
+ if not options.skip_sanity_checks:
+ first_config_output = run_d8(
+ options.first_d8, first_config_flags, testcase=SANITY_CHECKS)
+ second_config_output = run_d8(
+ options.second_d8, second_config_flags, testcase=SANITY_CHECKS)
+ difference, _ = suppress.diff(
+ first_config_output.stdout, second_config_output.stdout)
+ if difference:
+ # Special source key for sanity checks so that clusterfuzz dedupes all
+ # cases on this in case it's hit.
+ source_key = 'sanity check failed'
+ print_difference(
+ options, source_key, first_config_flags, second_config_flags,
+ first_config_output, second_config_output, difference)
+ return RETURN_FAIL
+
+ first_config_output = run_d8(options.first_d8, first_config_flags, 'first')
# Early bailout based on first run's output.
if pass_bailout(first_config_output, 1):
return RETURN_PASS
- second_config_output = run_d8(options.second_d8, second_config_flags)
+ second_config_output = run_d8(
+ options.second_d8, second_config_flags, 'second')
# Bailout based on second run's output.
if pass_bailout(second_config_output, 2):
@@ -309,7 +374,6 @@ def main():
if source:
source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
else:
- source = ORIGINAL_SOURCE_DEFAULT
source_key = ORIGINAL_SOURCE_DEFAULT
if difference:
@@ -321,32 +385,16 @@ def main():
if fail_bailout(second_config_output, suppress.ignore_by_output2):
return RETURN_FAIL
- # The first three entries will be parsed by clusterfuzz. Format changes
- # will require changes on the clusterfuzz side.
- first_config_label = '%s,%s' % (options.first_arch, options.first_config)
- second_config_label = '%s,%s' % (options.second_arch, options.second_config)
- print (FAILURE_TEMPLATE % dict(
- configs='%s:%s' % (first_config_label, second_config_label),
- source_key=source_key,
- suppression='', # We can't tie bugs to differences.
- first_config_label=first_config_label,
- second_config_label=second_config_label,
- first_config_flags=' '.join(first_config_flags),
- second_config_flags=' '.join(second_config_flags),
- first_config_output=
- first_config_output.stdout.decode('utf-8', 'replace'),
- second_config_output=
- second_config_output.stdout.decode('utf-8', 'replace'),
- source=source,
- difference=difference.decode('utf-8', 'replace'),
- )).encode('utf-8', 'replace')
+ print_difference(
+ options, source_key, first_config_flags, second_config_flags,
+ first_config_output, second_config_output, difference, source)
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
- print '# V8 correctness - pass'
+ print('# V8 correctness - pass')
return RETURN_PASS
@@ -356,17 +404,17 @@ if __name__ == "__main__":
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
- print FAILURE_HEADER_TEMPLATE % dict(
- configs='', source_key='', suppression='wrong_usage')
+ print(FAILURE_HEADER_TEMPLATE % dict(
+ configs='', source_key='', suppression='wrong_usage'))
result = RETURN_FAIL
except MemoryError:
# Running out of memory happens occasionally but is not actionable.
- print '# V8 correctness - pass'
+ print('# V8 correctness - pass')
result = RETURN_PASS
except Exception as e:
- print FAILURE_HEADER_TEMPLATE % dict(
- configs='', source_key='', suppression='internal_error')
- print '# Internal error: %s' % e
+ print(FAILURE_HEADER_TEMPLATE % dict(
+ configs='', source_key='', suppression='internal_error'))
+ print('# Internal error: %s' % e)
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index 3b95111271..e9559f6e0c 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -113,10 +113,11 @@ otherfile.js: TypeError: undefined is not a constructor
def cut_verbose_output(stdout):
- return '\n'.join(stdout.split('\n')[2:])
+ # This removes first lines containing d8 commands.
+ return '\n'.join(stdout.split('\n')[4:])
-def run_foozzie(first_d8, second_d8):
+def run_foozzie(first_d8, second_d8, *extra_flags):
return subprocess.check_output([
sys.executable, FOOZZIE,
'--random-seed', '12345',
@@ -125,23 +126,31 @@ def run_foozzie(first_d8, second_d8):
'--first-config', 'ignition',
'--second-config', 'ignition_turbo',
os.path.join(TEST_DATA, 'fuzz-123.js'),
- ])
+ ] + list(extra_flags))
class SystemTest(unittest.TestCase):
def testSyntaxErrorDiffPass(self):
- stdout = run_foozzie('test_d8_1.py', 'test_d8_2.py')
+ stdout = run_foozzie('test_d8_1.py', 'test_d8_2.py', '--skip-sanity-checks')
self.assertEquals('# V8 correctness - pass\n', cut_verbose_output(stdout))
def testDifferentOutputFail(self):
with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
expected_output = f.read()
with self.assertRaises(subprocess.CalledProcessError) as ctx:
- run_foozzie('test_d8_1.py', 'test_d8_3.py')
+ run_foozzie('test_d8_1.py', 'test_d8_3.py', '--skip-sanity-checks')
e = ctx.exception
self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEquals(expected_output, cut_verbose_output(e.output))
+ def testSanityCheck(self):
+ with open(os.path.join(TEST_DATA, 'sanity_check_output.txt')) as f:
+ expected_output = f.read()
+ with self.assertRaises(subprocess.CalledProcessError) as ctx:
+ run_foozzie('test_d8_1.py', 'test_d8_3.py')
+ e = ctx.exception
+ self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertEquals(expected_output, e.output)
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 8cc1939e38..39e983f74a 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -8,12 +8,10 @@ import random
# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
# Probabilities must add up to 100.
FOOZZIE_EXPERIMENTS = [
- [5, 'ignition', 'ignition_asm', 'd8'],
- [5, 'ignition', 'trusted', 'd8'],
- [5, 'ignition', 'trusted_opt', 'd8'],
+ [10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [25, 'ignition', 'ignition_turbo', 'd8'],
+ [30, 'ignition', 'ignition_turbo', 'd8'],
[20, 'ignition', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
diff --git a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
new file mode 100644
index 0000000000..2b7cb65a1b
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is executed separately before the correctness test case. Add here
+// checking of global properties that should never differ in any configuration.
+// A difference found in the prints below will prevent any further correctness
+// comparison for the selected configurations to avoid flooding bugs.
+
+print("https://crbug.com/932656");
+print(Object.getOwnPropertyNames(this));
+
+print("https://crbug.com/935800");
+(function () {
+ function foo() {
+ "use asm";
+ function baz() {}
+ return {bar: baz};
+ }
+ // TODO(mstarzinger): Uncomment once https://crbug.com/935800 is resolved.
+ // print(Object.getOwnPropertyNames(foo().bar));
+})();
diff --git a/deps/v8/tools/concatenate-files.py b/deps/v8/tools/concatenate-files.py
index 8a9012cfa2..a5dbe45ccc 100644
--- a/deps/v8/tools/concatenate-files.py
+++ b/deps/v8/tools/concatenate-files.py
@@ -35,6 +35,9 @@
# on all supported build platforms, but Python is, and hence this provides
# us with an easy and uniform way of doing this on all platforms.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import optparse
@@ -49,7 +52,7 @@ def Concatenate(filenames):
True, if the operation was successful.
"""
if len(filenames) < 2:
- print "An error occurred generating %s:\nNothing to do." % filenames[-1]
+ print("An error occurred generating %s:\nNothing to do." % filenames[-1])
return False
try:
@@ -59,7 +62,7 @@ def Concatenate(filenames):
target.write(current.read())
return True
except IOError as e:
- print "An error occurred when writing %s:\n%s" % (filenames[-1], e)
+ print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
return False
diff --git a/deps/v8/tools/deprecation_stats.py b/deps/v8/tools/deprecation_stats.py
index 780832e681..628eebc779 100755
--- a/deps/v8/tools/deprecation_stats.py
+++ b/deps/v8/tools/deprecation_stats.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
from datetime import datetime
import re
diff --git a/deps/v8/tools/dev/gen-tags.py b/deps/v8/tools/dev/gen-tags.py
index 256f65a401..a478ee3f56 100755
--- a/deps/v8/tools/dev/gen-tags.py
+++ b/deps/v8/tools/dev/gen-tags.py
@@ -15,6 +15,10 @@ The example usage is as follows:
If no <arch> is given, it generates tags file for all arches:
$ tools/dev/gen-tags.py
"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import subprocess
import sys
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index e7a4a239e0..bc808c31ae 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -281,8 +281,8 @@ class Config(object):
match = csa_trap.search(output)
extra_opt = match.group(1) if match else ""
cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
- match = cmdline.search(output)
- cmdline = PrepareMksnapshotCmdline(match.group(1), path) + extra_opt
+ orig_cmdline = cmdline.search(output).group(1).strip()
+ cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
_Notify("V8 build requires your attention",
"Detected mksnapshot failure, re-running in GDB...")
_Call(cmdline)
@@ -294,8 +294,9 @@ class Config(object):
tests = ""
else:
tests = " ".join(self.tests)
- return _Call("tools/run-tests.py --outdir=%s %s" %
- (GetPath(self.arch, self.mode), tests))
+ return _Call('"%s" ' % sys.executable +
+ os.path.join("tools", "run-tests.py") +
+ " --outdir=%s %s" % (GetPath(self.arch, self.mode), tests))
def GetTestBinary(argstring):
for suite in TESTSUITES_TARGETS:
diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py
index b8a34e2af2..0b6e1d1cdc 100755
--- a/deps/v8/tools/dev/v8gen.py
+++ b/deps/v8/tools/dev/v8gen.py
@@ -38,6 +38,9 @@ v8gen.py list
-------------------------------------------------------------------------------
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import re
@@ -144,8 +147,8 @@ class GenerateGnArgs(object):
# Check for builder/config in mb config.
if self._options.builder not in self._mbw.masters[self._options.master]:
- print '%s does not exist in %s for %s' % (
- self._options.builder, CONFIG, self._options.master)
+ print('%s does not exist in %s for %s' % (
+ self._options.builder, CONFIG, self._options.master))
return 1
# TODO(machenbach): Check if the requested configurations has switched to
@@ -186,19 +189,19 @@ class GenerateGnArgs(object):
return 0
def cmd_list(self):
- print '\n'.join(sorted(self._mbw.masters[self._options.master]))
+ print('\n'.join(sorted(self._mbw.masters[self._options.master])))
return 0
def verbose_print_1(self, text):
if self._options.verbosity >= 1:
- print '#' * 80
- print text
+ print('#' * 80)
+ print(text)
def verbose_print_2(self, text):
if self._options.verbosity >= 2:
indent = ' ' * 2
for l in text.splitlines():
- print indent + l
+ print(indent + l)
def _call_cmd(self, args):
self.verbose_print_1(' '.join(args))
@@ -290,9 +293,9 @@ class GenerateGnArgs(object):
self._mbw.ReadConfigFile()
if not self._options.master in self._mbw.masters:
- print '%s not found in %s\n' % (self._options.master, CONFIG)
- print 'Choose one of:\n%s\n' % (
- '\n'.join(sorted(self._mbw.masters.keys())))
+ print('%s not found in %s\n' % (self._options.master, CONFIG))
+ print('Choose one of:\n%s\n' % (
+ '\n'.join(sorted(self._mbw.masters.keys()))))
return 1
return self._options.func()
diff --git a/deps/v8/tools/dump-cpp.py b/deps/v8/tools/dump-cpp.py
index 1fc15d9fc1..31dc0536dc 100644
--- a/deps/v8/tools/dump-cpp.py
+++ b/deps/v8/tools/dump-cpp.py
@@ -6,6 +6,9 @@
# This script executes dumpcpp.js, collects all dumped C++ symbols,
# and merges them back into v8 log.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import platform
import re
@@ -44,10 +47,10 @@ if __name__ == '__main__':
if d8_line:
d8_exec = d8_line.group(1)
if not is_file_executable(d8_exec):
- print 'd8 binary path found in {} is not executable.'.format(log_file)
+ print('d8 binary path found in {} is not executable.'.format(log_file))
sys.exit(-1)
else:
- print 'No d8 binary path found in {}.'.format(log_file)
+ print('No d8 binary path found in {}.'.format(log_file))
sys.exit(-1)
args = [d8_exec] + JS_FILES + ['--'] + args
@@ -57,9 +60,9 @@ if __name__ == '__main__':
stdin=f)
out, err = sp.communicate()
if debug:
- print err
+ print(err)
if sp.returncode != 0:
- print out
+ print(out)
exit(-1)
if on_windows and out:
diff --git a/deps/v8/tools/eval_gc_nvp.py b/deps/v8/tools/eval_gc_nvp.py
index 25afe8e4f0..222ebef7d3 100755
--- a/deps/v8/tools/eval_gc_nvp.py
+++ b/deps/v8/tools/eval_gc_nvp.py
@@ -7,10 +7,14 @@
"""This script is used to analyze GCTracer's NVP output."""
+# for py2/py3 compatibility
+from __future__ import print_function
+
+
from argparse import ArgumentParser
from copy import deepcopy
from gc_nvp_common import split_nvp
-from math import ceil,log
+from math import ceil, log
from sys import stdin
diff --git a/deps/v8/tools/find-commit-for-patch.py b/deps/v8/tools/find-commit-for-patch.py
index 657826c132..cca1f40d55 100755
--- a/deps/v8/tools/find-commit-for-patch.py
+++ b/deps/v8/tools/find-commit-for-patch.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import subprocess
import sys
diff --git a/deps/v8/tools/find_depot_tools.py b/deps/v8/tools/find_depot_tools.py
index 95ae9e8a2d..db3ffa224c 100644
--- a/deps/v8/tools/find_depot_tools.py
+++ b/deps/v8/tools/find_depot_tools.py
@@ -4,6 +4,9 @@
"""Small utility function to find depot_tools and add it to the python path.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import sys
@@ -36,5 +39,5 @@ def add_depot_tools_to_path():
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
- print >> sys.stderr, 'Failed to find depot_tools'
+ print('Failed to find depot_tools', file=sys.stderr)
return None
diff --git a/deps/v8/tools/gc-nvp-to-csv.py b/deps/v8/tools/gc-nvp-to-csv.py
index 26ed8e1c97..b3ad374185 100755
--- a/deps/v8/tools/gc-nvp-to-csv.py
+++ b/deps/v8/tools/gc-nvp-to-csv.py
@@ -11,20 +11,25 @@
# Usage: gc-nvp-to-csv.py <GC-trace-filename>
#
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
import sys
import gc_nvp_common
+
def process_trace(filename):
trace = gc_nvp_common.parse_gc_trace(filename)
if len(trace):
keys = trace[0].keys()
- print ', '.join(keys)
+ print(', '.join(keys))
for entry in trace:
- print ', '.join(map(lambda key: str(entry[key]), keys))
+ print(', '.join(map(lambda key: str(entry[key]), keys)))
if len(sys.argv) != 2:
- print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+ print("Usage: %s <GC-trace-filename>" % sys.argv[0])
sys.exit(1)
process_trace(sys.argv[1])
diff --git a/deps/v8/tools/gc-nvp-trace-processor.py b/deps/v8/tools/gc-nvp-trace-processor.py
index 21526ae9d6..75d50b1cda 100755
--- a/deps/v8/tools/gc-nvp-trace-processor.py
+++ b/deps/v8/tools/gc-nvp-trace-processor.py
@@ -37,10 +37,21 @@
#
+# for py2/py3 compatibility
from __future__ import with_statement
+from __future__ import print_function
+from functools import reduce
+
import sys, types, subprocess, math
import gc_nvp_common
+
+try:
+ long # Python 2
+except NameError:
+ long = int # Python 3
+
+
def flatten(l):
flat = []
for i in l: flat.extend(i)
@@ -62,7 +73,7 @@ class Item(object):
self.title = title
self.axis = axis
self.props = keywords
- if type(field) is types.ListType:
+ if type(field) is list:
self.field = field
else:
self.field = [field]
@@ -135,7 +146,7 @@ def is_y2_used(plot):
def get_field(trace_line, field):
t = type(field)
- if t is types.StringType:
+ if t is bytes:
return trace_line[field]
elif t is types.FunctionType:
return field(trace_line)
@@ -177,7 +188,7 @@ def plot_all(plots, trace, prefix):
outfilename = "%s_%d.png" % (prefix, len(charts))
charts.append(outfilename)
script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
- print 'Plotting %s...' % outfilename
+ print('Plotting %s...' % outfilename)
gnuplot(script)
return charts
@@ -350,10 +361,10 @@ def process_trace(filename):
out.write('<img src="%s">' % chart)
out.write('</body></html>')
- print "%s generated." % (filename + '.html')
+ print("%s generated." % (filename + '.html'))
if len(sys.argv) != 2:
- print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+ print("Usage: %s <GC-trace-filename>" % sys.argv[0])
sys.exit(1)
process_trace(sys.argv[1])
diff --git a/deps/v8/tools/gcmole/parallel.py b/deps/v8/tools/gcmole/parallel.py
index 0c045f423c..7ff95ccadc 100755
--- a/deps/v8/tools/gcmole/parallel.py
+++ b/deps/v8/tools/gcmole/parallel.py
@@ -20,6 +20,9 @@ ______________ file2
______________ finish <exit code of clang --opt file2> ______________
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import itertools
import multiprocessing
import subprocess
@@ -39,6 +42,6 @@ if __name__ == '__main__':
cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
for filename, result in itertools.izip(
sys.argv[2:], pool.imap(invoke, cmdlines)):
- print "______________ %s" % filename
- print result[0]
- print "______________ finish %d ______________" % result[1]
+ print("______________ %s" % filename)
+ print(result[0])
+ print("______________ finish %d ______________" % result[1])
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
index cb4c74e31c..76a6b55d44 100755
--- a/deps/v8/tools/gcmole/run-gcmole.py
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import os.path
import signal
@@ -19,8 +22,8 @@ BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
assert len(sys.argv) == 2
if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-from-dsl.h"):
- print "Expected generated headers in out/Release/gen."
- print "Either build v8 in out/Release or change gcmole.lua:115"
+ print("Expected generated headers in out/Release/gen.")
+ print("Either build v8 in out/Release or change gcmole.lua:115")
sys.exit(-1)
proc = subprocess.Popen(
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
index a0262f0a57..f8442bf462 100644
--- a/deps/v8/tools/gdb-v8-support.py
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -25,12 +25,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import re
import tempfile
import os
import subprocess
import time
-
+import gdb
kSmiTag = 0
kSmiTagSize = 1
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 5e98d92d6f..c45bc6df33 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -4,7 +4,7 @@
# Print tagged object.
define job
-call _v8_internal_Print_Object((void*)($arg0))
+call (void) _v8_internal_Print_Object((void*)($arg0))
end
document job
Print a v8 JavaScript object
@@ -13,7 +13,7 @@ end
# Print content of v8::internal::Handle.
define jh
-call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).location_))
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).location_))
end
document jh
Print content of a v8::internal::Handle
@@ -22,7 +22,7 @@ end
# Print content of v8::Local handle.
define jlh
-call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
end
document jlh
Print content of a v8::Local handle
@@ -31,7 +31,7 @@ end
# Print Code objects containing given PC.
define jco
-call _v8_internal_Print_Code((void*)($arg0))
+call (void) _v8_internal_Print_Code((void*)($arg0))
end
document jco
Print a v8 Code object from an internal code address
@@ -40,7 +40,7 @@ end
# Print LayoutDescriptor.
define jld
-call _v8_internal_Print_LayoutDescriptor((void*)($arg0))
+call (void) _v8_internal_Print_LayoutDescriptor((void*)($arg0))
end
document jld
Print a v8 LayoutDescriptor object
@@ -49,7 +49,7 @@ end
# Print TransitionTree.
define jtt
-call _v8_internal_Print_TransitionTree((void*)($arg0))
+call (void) _v8_internal_Print_TransitionTree((void*)($arg0))
end
document jtt
Print the complete transition tree of the given v8 Map.
@@ -58,7 +58,7 @@ end
# Print JavaScript stack trace.
define jst
-call _v8_internal_Print_StackTrace()
+call (void) _v8_internal_Print_StackTrace()
end
document jst
Print the current JavaScript stack trace
@@ -82,7 +82,7 @@ define bta
python
import re
frame_re = re.compile("^#(\d+)\s*(?:0x[a-f\d]+ in )?(.+) \(.+ at (.+)")
-assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
+assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertScope<v8::internal::(\S*), (false|true)>")
btl = gdb.execute("backtrace full", to_string = True).splitlines()
for l in btl:
match = frame_re.match(l)
diff --git a/deps/v8/tools/gen-inlining-tests.py b/deps/v8/tools/gen-inlining-tests.py
index a79023642e..400386c49c 100644
--- a/deps/v8/tools/gen-inlining-tests.py
+++ b/deps/v8/tools/gen-inlining-tests.py
@@ -1,9 +1,11 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
from collections import namedtuple
import textwrap
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index af6e2f3cb4..b95cd12164 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -46,6 +46,9 @@
# the generated libv8 binary.
#
+# for py2/py3 compatibility
+from __future__ import print_function
+
import re
import sys
@@ -82,7 +85,6 @@ consts_misc = [
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
- { 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
@@ -124,26 +126,14 @@ consts_misc = [
'value': 'PropertyDetails::RepresentationField::kMask' },
{ 'name': 'prop_representation_shift',
'value': 'PropertyDetails::RepresentationField::kShift' },
- { 'name': 'prop_representation_integer8',
- 'value': 'Representation::Kind::kInteger8' },
- { 'name': 'prop_representation_uinteger8',
- 'value': 'Representation::Kind::kUInteger8' },
- { 'name': 'prop_representation_integer16',
- 'value': 'Representation::Kind::kInteger16' },
- { 'name': 'prop_representation_uinteger16',
- 'value': 'Representation::Kind::kUInteger16' },
{ 'name': 'prop_representation_smi',
'value': 'Representation::Kind::kSmi' },
- { 'name': 'prop_representation_integer32',
- 'value': 'Representation::Kind::kInteger32' },
{ 'name': 'prop_representation_double',
'value': 'Representation::Kind::kDouble' },
{ 'name': 'prop_representation_heapobject',
'value': 'Representation::Kind::kHeapObject' },
{ 'name': 'prop_representation_tagged',
'value': 'Representation::Kind::kTagged' },
- { 'name': 'prop_representation_external',
- 'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kEntryKeyIndex' },
@@ -250,8 +240,8 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
- 'FixedTypedArrayBase, external_pointer, Object, kExternalPointerOffset',
- 'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
+ 'FixedTypedArrayBase, external_pointer, uintptr_t, kExternalPointerOffset',
+ 'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_offset, size_t, kByteOffsetOffset',
@@ -313,10 +303,12 @@ header = '''
#include "src/frames-inl.h" /* for architecture-specific frame constants */
#include "src/contexts.h"
#include "src/objects.h"
+#include "src/objects/data-handler.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-string-iterator.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
extern "C" {
@@ -332,6 +324,9 @@ STACK_FRAME_TYPE_LIST(FRAME_CONST)
footer = '''
}
+
+}
+}
'''
#
@@ -624,7 +619,7 @@ def emit_set(out, consts):
# Emit the whole output file.
#
def emit_config():
- out = file(sys.argv[1], 'w');
+ out = open(sys.argv[1], 'w');
out.write(header);
diff --git a/deps/v8/tools/generate-builtins-tests.py b/deps/v8/tools/generate-builtins-tests.py
index 4e6961deb1..4380f91c1a 100755
--- a/deps/v8/tools/generate-builtins-tests.py
+++ b/deps/v8/tools/generate-builtins-tests.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import json
import optparse
import os
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 511d03c7ba..e5ee98794d 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -13,6 +13,9 @@ BUILD.gn. Just compile to check whether there are any violations to the rule
that each header must be includable in isolation.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import os.path
@@ -58,7 +61,7 @@ def parse_args():
def printv(line):
if args.verbose:
- print line
+ print(line)
def find_all_headers():
diff --git a/deps/v8/tools/get_landmines.py b/deps/v8/tools/get_landmines.py
index ff4831dff5..33e0829ac8 100755
--- a/deps/v8/tools/get_landmines.py
+++ b/deps/v8/tools/get_landmines.py
@@ -8,8 +8,17 @@ This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
import sys
+sys.path.insert(0, os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'build')))
+
+import get_landmines as build_get_landmines
+
def print_landmines(): # pylint: disable=invalid-name
"""
@@ -21,23 +30,24 @@ def print_landmines(): # pylint: disable=invalid-name
# dependency problems, fix the dependency problems instead of adding a
# landmine.
# See the Chromium version in src/build/get_landmines.py for usage examples.
- print 'Need to clobber after ICU52 roll.'
- print 'Landmines test.'
- print 'Activating MSVS 2013.'
- print 'Revert activation of MSVS 2013.'
- print 'Activating MSVS 2013 again.'
- print 'Clobber after ICU roll.'
- print 'Moar clobbering...'
- print 'Remove build/android.gypi'
- print 'Cleanup after windows ninja switch attempt.'
- print 'Switching to pinned msvs toolchain.'
- print 'Clobbering to hopefully resolve problem with mksnapshot'
- print 'Clobber after ICU roll.'
- print 'Clobber after Android NDK update.'
- print 'Clober to fix windows build problems.'
- print 'Clober again to fix windows build problems.'
- print 'Clobber to possibly resolve failure on win-32 bot.'
- print 'Clobber for http://crbug.com/668958.'
+ print('Need to clobber after ICU52 roll.')
+ print('Landmines test.')
+ print('Activating MSVS 2013.')
+ print('Revert activation of MSVS 2013.')
+ print('Activating MSVS 2013 again.')
+ print('Clobber after ICU roll.')
+ print('Moar clobbering...')
+ print('Remove build/android.gypi')
+ print('Cleanup after windows ninja switch attempt.')
+ print('Switching to pinned msvs toolchain.')
+ print('Clobbering to hopefully resolve problem with mksnapshot')
+ print('Clobber after ICU roll.')
+ print('Clobber after Android NDK update.')
+ print('Clober to fix windows build problems.')
+ print('Clober again to fix windows build problems.')
+ print('Clobber to possibly resolve failure on win-32 bot.')
+ print('Clobber for http://crbug.com/668958.')
+ build_get_landmines.print_landmines()
return 0
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 34689316af..6d06d00418 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -27,6 +27,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# flake8: noqa # https://bugs.chromium.org/p/v8/issues/detail?id=8784
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
import BaseHTTPServer
import bisect
import cgi
@@ -69,7 +75,7 @@ DEBUG=False
def DebugPrint(s):
if not DEBUG: return
- print s
+ print(s)
class Descriptor(object):
@@ -120,7 +126,7 @@ class Descriptor(object):
def FullDump(reader, heap):
"""Dump all available memory regions."""
def dump_region(reader, start, size, location):
- print
+ print()
while start & 3 != 0:
start += 1
size -= 1
@@ -131,17 +137,17 @@ def FullDump(reader, heap):
if is_executable is not False:
lines = reader.GetDisasmLines(start, size)
for line in lines:
- print FormatDisasmLine(start, heap, line)
- print
+ print(FormatDisasmLine(start, heap, line))
+ print()
if is_ascii is not False:
# Output in the same format as the Unix hd command
addr = start
- for i in xrange(0, size, 16):
+ for i in range(0, size, 16):
slot = i + location
hex_line = ""
asc_line = ""
- for i in xrange(16):
+ for i in range(16):
if slot + i < location + size:
byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
if byte >= 0x20 and byte < 0x7f:
@@ -153,24 +159,24 @@ def FullDump(reader, heap):
hex_line += " "
if i == 7:
hex_line += " "
- print "%s %s |%s|" % (reader.FormatIntPtr(addr),
+ print("%s %s |%s|" % (reader.FormatIntPtr(addr),
hex_line,
- asc_line)
+ asc_line))
addr += 16
if is_executable is not True and is_ascii is not True:
- print "%s - %s" % (reader.FormatIntPtr(start),
- reader.FormatIntPtr(start + size))
- print start + size + 1;
- for i in xrange(0, size, reader.PointerSize()):
+ print("%s - %s" % (reader.FormatIntPtr(start),
+ reader.FormatIntPtr(start + size)))
+ print(start + size + 1);
+ for i in range(0, size, reader.PointerSize()):
slot = start + i
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
- print "%s: %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(maybe_address))
+ print("%s: %s" % (reader.FormatIntPtr(slot),
+ reader.FormatIntPtr(maybe_address)))
if heap_object:
heap_object.Print(Printer())
- print
+ print()
reader.ForEachMemoryRegion(dump_region)
@@ -611,11 +617,11 @@ class MinidumpReader(object):
self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE)
self.header = MINIDUMP_HEADER.Read(self.minidump, 0)
if self.header.signature != MinidumpReader._HEADER_MAGIC:
- print >>sys.stderr, "Warning: Unsupported minidump header magic!"
+ print("Warning: Unsupported minidump header magic!", file=sys.stderr)
DebugPrint(self.header)
directories = []
offset = self.header.stream_directories_rva
- for _ in xrange(self.header.stream_count):
+ for _ in range(self.header.stream_count):
directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
offset += MINIDUMP_DIRECTORY.size
self.arch = None
@@ -677,7 +683,7 @@ class MinidumpReader(object):
assert ctypes.sizeof(self.module_list) == d.location.data_size
DebugPrint(self.module_list)
elif d.stream_type == MD_MEMORY_LIST_STREAM:
- print >>sys.stderr, "Warning: This is not a full minidump!"
+ print("Warning: This is not a full minidump!", file=sys.stderr)
assert self.memory_list is None
self.memory_list = MINIDUMP_MEMORY_LIST.Read(
self.minidump, d.location.rva)
@@ -699,8 +705,8 @@ class MinidumpReader(object):
else:
objdump_bin = self._FindThirdPartyObjdump()
if not objdump_bin or not os.path.exists(objdump_bin):
- print "# Cannot find '%s', falling back to default objdump '%s'" % (
- objdump_bin, DEFAULT_OBJDUMP_BIN)
+ print("# Cannot find '%s', falling back to default objdump '%s'" % (
+ objdump_bin, DEFAULT_OBJDUMP_BIN))
objdump_bin = DEFAULT_OBJDUMP_BIN
global OBJDUMP_BIN
OBJDUMP_BIN = objdump_bin
@@ -722,12 +728,12 @@ class MinidumpReader(object):
else:
# use default otherwise
return None
- print ("# Looking for platform specific (%s) objdump in "
- "third_party directory.") % platform_filter
+ print(("# Looking for platform specific (%s) objdump in "
+ "third_party directory.") % platform_filter)
objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
if len(objdumps) == 0:
- print "# Could not find platform specific objdump in third_party."
- print "# Make sure you installed the correct SDK."
+ print("# Could not find platform specific objdump in third_party.")
+ print("# Make sure you installed the correct SDK.")
return None
return objdumps[0]
@@ -822,7 +828,7 @@ class MinidumpReader(object):
def IsProbableASCIIRegion(self, location, length):
ascii_bytes = 0
non_ascii_bytes = 0
- for i in xrange(length):
+ for i in range(length):
loc = location + i
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if byte >= 0x7f:
@@ -844,7 +850,7 @@ class MinidumpReader(object):
def IsProbableExecutableRegion(self, location, length):
opcode_bytes = 0
sixty_four = self.Is64()
- for i in xrange(length):
+ for i in range(length):
loc = location + i
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if (byte == 0x8b or # mov
@@ -893,19 +899,19 @@ class MinidumpReader(object):
def FindWord(self, word, alignment=0):
def search_inside_region(reader, start, size, location):
location = (location + alignment) & ~alignment
- for i in xrange(size - self.PointerSize()):
+ for i in range(size - self.PointerSize()):
loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
- print "%s: %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(word))
+ print("%s: %s" % (reader.FormatIntPtr(slot),
+ reader.FormatIntPtr(word)))
self.ForEachMemoryRegion(search_inside_region)
def FindWordList(self, word):
aligned_res = []
unaligned_res = []
def search_inside_region(reader, start, size, location):
- for i in xrange(size - self.PointerSize()):
+ for i in range(size - self.PointerSize()):
loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
@@ -1026,7 +1032,7 @@ class MinidumpReader(object):
# http://code.google.com/p/google-breakpad/wiki/SymbolFiles
#
def _LoadSymbolsFrom(self, symfile, baseaddr):
- print "Loading symbols from %s" % (symfile)
+ print("Loading symbols from %s" % (symfile))
funcs = []
with open(symfile) as f:
for line in f:
@@ -1038,7 +1044,7 @@ class MinidumpReader(object):
name = result.group(4).rstrip()
bisect.insort_left(self.symbols,
FuncSymbol(baseaddr + start, size, name))
- print " ... done"
+ print(" ... done")
def TryLoadSymbolsFor(self, modulename, module):
try:
@@ -1048,7 +1054,7 @@ class MinidumpReader(object):
self._LoadSymbolsFrom(symfile, module.base_of_image)
self.modules_with_symbols.append(module)
except Exception as e:
- print " ... failure (%s)" % (e)
+ print(" ... failure (%s)" % (e))
# Returns true if address is covered by some module that has loaded symbols.
def _IsInModuleWithSymbols(self, addr):
@@ -1090,11 +1096,11 @@ class Printer(object):
self.indent -= 2
def Print(self, string):
- print "%s%s" % (self._IndentString(), string)
+ print("%s%s" % (self._IndentString(), string))
def PrintLines(self, lines):
indent = self._IndentString()
- print "\n".join("%s%s" % (indent, line) for line in lines)
+ print("\n".join("%s%s" % (indent, line) for line in lines))
def _IndentString(self):
return self.indent * " "
@@ -1428,7 +1434,7 @@ class FixedArray(HeapObject):
p.Indent()
p.Print("length: %d" % self.length)
base_offset = self.ElementsOffset()
- for i in xrange(self.length):
+ for i in range(self.length):
offset = base_offset + 4 * i
try:
p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
@@ -1498,7 +1504,7 @@ class DescriptorArray(object):
p.Print("Descriptors(%08x, length=%d)" % (array.address, length))
p.Print("[et] %s" % (array.Get(1)))
- for di in xrange(length):
+ for di in range(length):
i = 2 + di * 3
p.Print("0x%x" % (array.address + array.MemberOffset(i)))
p.Print("[%i] name: %s" % (di, array.Get(i + 0)))
@@ -1543,7 +1549,7 @@ class TransitionArray(object):
if prototype is not None:
p.Print("[prototype ] %s" % (prototype))
- for di in xrange(length):
+ for di in range(length):
i = 3 + di * 2
p.Print("[%i] symbol: %s" % (di, array.Get(i + 0)))
p.Print("[%i] target: %s" % (di, array.Get(i + 1)))
@@ -1941,10 +1947,10 @@ class InspectionInfo(object):
exception_thread.stack.memory.data_size
frame_pointer = self.reader.ExceptionFP()
self.styles[frame_pointer] = "frame"
- for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
# stack address
self.styles[slot] = "sa"
- for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
# stack value
self.styles[maybe_address] = "sv"
@@ -2087,15 +2093,15 @@ class InspectionPadawan(object):
raise NotImplementedError
def PrintKnowledge(self):
- print " known_first_map_page = %s\n"\
+ print(" known_first_map_page = %s\n"\
" known_first_old_page = %s" % (
self.reader.FormatIntPtr(self.known_first_map_page),
- self.reader.FormatIntPtr(self.known_first_old_page))
+ self.reader.FormatIntPtr(self.known_first_old_page)))
def FindFirstAsciiString(self, start, end=None, min_length=32):
""" Walk the memory until we find a large string """
if not end: end = start + 64
- for slot in xrange(start, end):
+ for slot in range(start, end):
if not self.reader.IsValidAddress(slot): break
message = self.reader.ReadAsciiString(slot)
if len(message) > min_length:
@@ -2113,7 +2119,7 @@ class InspectionPadawan(object):
if not self.reader.IsValidAddress(start): return start
end = start + ptr_size * 1024 * 4
magic1 = None
- for slot in xrange(start, end, ptr_size):
+ for slot in range(start, end, ptr_size):
if not self.reader.IsValidAddress(slot + ptr_size): break
magic1 = self.reader.ReadUIntPtr(slot)
magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
@@ -2138,23 +2144,23 @@ class InspectionPadawan(object):
end_search = start + (32 * 1024) + (header_size * ptr_size);
end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
if not end_slot: return start
- print "Stack Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ print("Stack Message (start=%s):" % self.heap.FormatIntPtr(slot))
slot += ptr_size
for name in ("isolate","ptr1", "ptr2", "ptr3", "ptr4", "codeObject1",
"codeObject2", "codeObject3", "codeObject4"):
value = self.reader.ReadUIntPtr(slot)
- print " %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value))
+ print(" %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value)))
slot += ptr_size
- print " message start: %s" % self.heap.FormatIntPtr(slot)
+ print(" message start: %s" % self.heap.FormatIntPtr(slot))
stack_start = end_slot + ptr_size
- print " stack_start: %s" % self.heap.FormatIntPtr(stack_start)
+ print(" stack_start: %s" % self.heap.FormatIntPtr(stack_start))
(message_start, message) = self.FindFirstAsciiString(slot)
self.FormatStackTrace(message, print_message)
return stack_start
def FindPtr(self, expected_value, start, end):
ptr_size = self.reader.PointerSize()
- for slot in xrange(start, end, ptr_size):
+ for slot in range(start, end, ptr_size):
if not self.reader.IsValidAddress(slot): return None
value = self.reader.ReadUIntPtr(slot)
if value == expected_value: return slot
@@ -2167,7 +2173,7 @@ class InspectionPadawan(object):
end_search = start + 1024 + (header_size * ptr_size);
end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
if not end_slot: return start
- print "Error Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ print("Error Message (start=%s):" % self.heap.FormatIntPtr(slot))
slot += ptr_size
(message_start, message) = self.FindFirstAsciiString(slot)
self.FormatStackTrace(message, print_message)
@@ -2193,27 +2199,27 @@ class InspectionPadawan(object):
# Make sure the address is word aligned
stack_start = stack_start - (stack_start % ptr_size)
if magic1 is None:
- print "Stack Message:"
- print " message start: %s" % self.heap.FormatIntPtr(message_start)
- print " stack_start: %s" % self.heap.FormatIntPtr(stack_start )
+ print("Stack Message:")
+ print(" message start: %s" % self.heap.FormatIntPtr(message_start))
+ print(" stack_start: %s" % self.heap.FormatIntPtr(stack_start ))
else:
ptr1 = self.reader.ReadUIntPtr(slot + ptr_size * 2)
ptr2 = self.reader.ReadUIntPtr(slot + ptr_size * 3)
- print "Stack Message:"
- print " magic1: %s" % self.heap.FormatIntPtr(magic1)
- print " magic2: %s" % self.heap.FormatIntPtr(magic2)
- print " ptr1: %s" % self.heap.FormatIntPtr(ptr1)
- print " ptr2: %s" % self.heap.FormatIntPtr(ptr2)
- print " message start: %s" % self.heap.FormatIntPtr(message_start)
- print " stack_start: %s" % self.heap.FormatIntPtr(stack_start )
- print ""
+ print("Stack Message:")
+ print(" magic1: %s" % self.heap.FormatIntPtr(magic1))
+ print(" magic2: %s" % self.heap.FormatIntPtr(magic2))
+ print(" ptr1: %s" % self.heap.FormatIntPtr(ptr1))
+ print(" ptr2: %s" % self.heap.FormatIntPtr(ptr2))
+ print(" message start: %s" % self.heap.FormatIntPtr(message_start))
+ print(" stack_start: %s" % self.heap.FormatIntPtr(stack_start ))
+ print("")
self.FormatStackTrace(message, print_message)
return stack_start
def FormatStackTrace(self, message, print_message):
if not print_message:
- print " Use `dsa` to print the message with annotated addresses."
- print ""
+ print(" Use `dsa` to print the message with annotated addresses.")
+ print("")
return
ptr_size = self.reader.PointerSize()
# Annotate all addresses in the dumped message
@@ -2224,11 +2230,11 @@ class InspectionPadawan(object):
address = self.heap.FormatIntPtr(int(address_org, 16))
if address_org != address:
message = message.replace(address_org, address)
- print "Message:"
- print "="*80
- print message
- print "="*80
- print ""
+ print("Message:")
+ print("="*80)
+ print(message)
+ print("="*80)
+ print("")
def TryInferFramePointer(self, slot, address):
@@ -2279,9 +2285,9 @@ class InspectionPadawan(object):
free_space_end = 0
ptr_size = self.reader.PointerSize()
- for slot in xrange(start, end, ptr_size):
+ for slot in range(start, end, ptr_size):
if not self.reader.IsValidAddress(slot):
- print "%s: Address is not contained within the minidump!" % slot
+ print("%s: Address is not contained within the minidump!" % slot)
return
maybe_address = self.reader.ReadUIntPtr(slot)
address_info = []
@@ -2339,17 +2345,17 @@ class InspectionPadawan(object):
frame_pointer = maybe_address
address_type_marker = self.heap.AddressTypeMarker(maybe_address)
string_value = self.reader.ReadAsciiPtr(slot)
- print "%s: %s %s %s %s" % (self.reader.FormatIntPtr(slot),
+ print("%s: %s %s %s %s" % (self.reader.FormatIntPtr(slot),
self.reader.FormatIntPtr(maybe_address),
address_type_marker,
string_value,
- ' | '.join(address_info))
+ ' | '.join(address_info)))
if maybe_address_contents == 0xdecade01:
in_oom_dump_area = False
heap_object = self.heap.FindObject(maybe_address)
if heap_object:
heap_object.Print(Printer())
- print ""
+ print("")
WEB_HEADER = """
<!DOCTYPE html>
@@ -2701,7 +2707,7 @@ class InspectionWebFormatter(object):
stack_bottom = exception_thread.stack.start + \
exception_thread.stack.memory.data_size
stack_map = {self.reader.ExceptionIP(): -1}
- for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
@@ -2719,7 +2725,7 @@ class InspectionWebFormatter(object):
address = int(straddress, 0)
self.comments.set_comment(address, comment)
except ValueError:
- print "Invalid address"
+ print("Invalid address")
def set_page_address(self, kind, straddress):
try:
@@ -2730,7 +2736,7 @@ class InspectionWebFormatter(object):
self.padawan.known_first_map_page = address
self.comments.save_page_address(kind, address)
except ValueError:
- print "Invalid address"
+ print("Invalid address")
def td_from_address(self, f, address):
f.write("<td %s>" % self.comments.get_style_class_string(address))
@@ -2853,7 +2859,7 @@ class InspectionWebFormatter(object):
if details == InspectionWebFormatter.CONTEXT_FULL:
if self.reader.exception.exception.parameter_count > 0:
f.write("&nbsp;&nbsp; Exception parameters: ")
- for i in xrange(0, self.reader.exception.exception.parameter_count):
+ for i in range(0, self.reader.exception.exception.parameter_count):
f.write("%08x" % self.reader.exception.exception.information[i])
f.write("<br><br>")
@@ -2929,19 +2935,19 @@ class InspectionWebFormatter(object):
f.write('<div class="code">')
f.write("<table class=codedump>")
- for j in xrange(0, end_address - start_address, size):
+ for j in range(0, end_address - start_address, size):
slot = start_address + j
heap_object = ""
maybe_address = None
end_region = region[0] + region[1]
if slot < region[0] or slot + size > end_region:
straddress = "0x"
- for i in xrange(end_region, slot + size):
+ for i in range(end_region, slot + size):
straddress += "??"
for i in reversed(
- xrange(max(slot, region[0]), min(slot + size, end_region))):
+ range(max(slot, region[0]), min(slot + size, end_region))):
straddress += "%02x" % self.reader.ReadU8(i)
- for i in xrange(slot, region[0]):
+ for i in range(slot, region[0]):
straddress += "??"
else:
maybe_address = self.reader.ReadUIntPtr(slot)
@@ -3003,7 +3009,7 @@ class InspectionWebFormatter(object):
start = self.align_down(start_address, line_width)
- for i in xrange(end_address - start):
+ for i in range(end_address - start):
address = start + i
if address % 64 == 0:
if address != start:
@@ -3073,7 +3079,7 @@ class InspectionWebFormatter(object):
(start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
f.write("<table class=\"codedump\">");
- for i in xrange(len(lines)):
+ for i in range(len(lines)):
line = lines[i]
next_address = count
if i + 1 < len(lines):
@@ -3449,8 +3455,8 @@ class InspectionShell(cmd.Cmd):
def do_help(self, cmd=None):
if len(cmd) == 0:
- print "Available commands"
- print "=" * 79
+ print("Available commands")
+ print("=" * 79)
prefix = "do_"
methods = inspect.getmembers(InspectionShell, predicate=inspect.ismethod)
for name,method in methods:
@@ -3459,8 +3465,8 @@ class InspectionShell(cmd.Cmd):
if not doc: continue
name = prefix.join(name.split(prefix)[1:])
description = doc.splitlines()[0]
- print (name + ": ").ljust(16) + description
- print "=" * 79
+ print((name + ": ").ljust(16) + description)
+ print("=" * 79)
else:
return super(InspectionShell, self).do_help(cmd)
@@ -3488,9 +3494,9 @@ class InspectionShell(cmd.Cmd):
address = self.ParseAddressExpr(address)
string = self.reader.ReadAsciiString(address)
if string == "":
- print "Not an ASCII string at %s" % self.reader.FormatIntPtr(address)
+ print("Not an ASCII string at %s" % self.reader.FormatIntPtr(address))
else:
- print "%s\n" % string
+ print("%s\n" % string)
def do_dsa(self, address):
""" see display_stack_ascii"""
@@ -3501,7 +3507,7 @@ class InspectionShell(cmd.Cmd):
Print ASCII stack error message.
"""
if self.reader.exception is None:
- print "Minidump has no exception info"
+ print("Minidump has no exception info")
return
if len(address) == 0:
address = None
@@ -3526,7 +3532,7 @@ class InspectionShell(cmd.Cmd):
else:
self.dd_start += self.dd_num * self.reader.PointerSize()
if not self.reader.IsAlignedAddress(self.dd_start):
- print "Warning: Dumping un-aligned memory, is this what you had in mind?"
+ print("Warning: Dumping un-aligned memory, is this what you had in mind?")
end = self.dd_start + self.reader.PointerSize() * self.dd_num
self.padawan.InterpretMemory(self.dd_start, end)
@@ -3545,13 +3551,13 @@ class InspectionShell(cmd.Cmd):
if self.reader.IsAlignedAddress(address):
address = address + 1
elif not self.heap.IsTaggedObjectAddress(address):
- print "Address doesn't look like a valid pointer!"
+ print("Address doesn't look like a valid pointer!")
return
heap_object = self.padawan.SenseObject(address)
if heap_object:
heap_object.Print(Printer())
else:
- print "Address cannot be interpreted as object!"
+ print("Address cannot be interpreted as object!")
def do_dso(self, args):
""" see display_stack_objects """
@@ -3618,10 +3624,10 @@ class InspectionShell(cmd.Cmd):
address = self.ParseAddressExpr(address)
page_address = address & ~self.heap.PageAlignmentMask()
if self.reader.IsValidAddress(page_address):
- print "**** Not Implemented"
+ print("**** Not Implemented")
return
else:
- print "Page header is not available!"
+ print("Page header is not available!")
def do_k(self, arguments):
"""
@@ -3666,10 +3672,10 @@ class InspectionShell(cmd.Cmd):
List all available memory regions.
"""
def print_region(reader, start, size, location):
- print " %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
+ print(" %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size),
- size)
- print "Available memory regions:"
+ size))
+ print("Available memory regions:")
self.reader.ForEachMemoryRegion(print_region)
def do_lm(self, arg):
@@ -3690,7 +3696,7 @@ class InspectionShell(cmd.Cmd):
PrintModuleDetails(self.reader, module)
else:
PrintModuleDetails(self.reader, module)
- print
+ print()
def do_s(self, word):
""" see search """
@@ -3707,9 +3713,10 @@ class InspectionShell(cmd.Cmd):
try:
word = self.ParseAddressExpr(word)
except ValueError:
- print "Malformed word, prefix with '0x' to use hexadecimal format."
+ print("Malformed word, prefix with '0x' to use hexadecimal format.")
return
- print "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word))
+ print(
+ "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word)))
self.reader.FindWord(word)
def do_sh(self, none):
@@ -3719,7 +3726,7 @@ class InspectionShell(cmd.Cmd):
You might get lucky and find this rare treasure full of invaluable
information.
"""
- print "**** Not Implemented"
+ print("**** Not Implemented")
def do_u(self, args):
""" see disassemble """
@@ -3742,24 +3749,24 @@ class InspectionShell(cmd.Cmd):
skip = True
if not self.reader.IsValidAddress(self.u_start):
- print "Address %s is not contained within the minidump!" % (
- self.reader.FormatIntPtr(self.u_start))
+ print("Address %s is not contained within the minidump!" % (
+ self.reader.FormatIntPtr(self.u_start)))
return
lines = self.reader.GetDisasmLines(self.u_start, self.u_size)
if len(lines) == 0:
- print "Address %s could not be disassembled!" % (
- self.reader.FormatIntPtr(self.u_start))
- print " Could not disassemble using %s." % OBJDUMP_BIN
- print " Pass path to architecture specific objdump via --objdump?"
+ print("Address %s could not be disassembled!" % (
+ self.reader.FormatIntPtr(self.u_start)))
+ print(" Could not disassemble using %s." % OBJDUMP_BIN)
+ print(" Pass path to architecture specific objdump via --objdump?")
return
for line in lines:
if skip:
skip = False
continue
- print FormatDisasmLine(self.u_start, self.heap, line)
+ print(FormatDisasmLine(self.u_start, self.heap, line))
# Set the next start address = last line
self.u_start += lines[-1][0]
- print
+ print()
def do_EOF(self, none):
raise KeyboardInterrupt
@@ -3796,18 +3803,18 @@ def GetModuleName(reader, module):
def PrintModuleDetails(reader, module):
- print "%s" % GetModuleName(reader, module)
+ print("%s" % GetModuleName(reader, module))
file_version = GetVersionString(module.version_info.dwFileVersionMS,
module.version_info.dwFileVersionLS);
product_version = GetVersionString(module.version_info.dwProductVersionMS,
module.version_info.dwProductVersionLS)
- print " base: %s" % reader.FormatIntPtr(module.base_of_image)
- print " end: %s" % reader.FormatIntPtr(module.base_of_image +
- module.size_of_image)
- print " file version: %s" % file_version
- print " product version: %s" % product_version
+ print(" base: %s" % reader.FormatIntPtr(module.base_of_image))
+ print(" end: %s" % reader.FormatIntPtr(module.base_of_image +
+ module.size_of_image))
+ print(" file version: %s" % file_version)
+ print(" product version: %s" % product_version)
time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
- print " timestamp: %s" % time_date_stamp
+ print(" timestamp: %s" % time_date_stamp)
def AnalyzeMinidump(options, minidump_name):
@@ -3817,7 +3824,7 @@ def AnalyzeMinidump(options, minidump_name):
stack_top = reader.ExceptionSP()
stack_bottom = reader.StackBottom()
stack_map = {reader.ExceptionIP(): -1}
- for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+ for slot in range(stack_top, stack_bottom, reader.PointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
@@ -3827,51 +3834,51 @@ def AnalyzeMinidump(options, minidump_name):
DebugPrint("========================================")
if reader.exception is None:
- print "Minidump has no exception info"
+ print("Minidump has no exception info")
else:
- print "Address markers:"
- print " T = valid tagged pointer in the minidump"
- print " S = address on the exception stack"
- print " C = address in loaded C/C++ module"
- print " * = address in the minidump"
- print ""
- print "Exception info:"
+ print("Address markers:")
+ print(" T = valid tagged pointer in the minidump")
+ print(" S = address on the exception stack")
+ print(" C = address in loaded C/C++ module")
+ print(" * = address in the minidump")
+ print("")
+ print("Exception info:")
exception_thread = reader.ExceptionThread()
- print " thread id: %d" % exception_thread.id
- print " code: %08X" % reader.exception.exception.code
- print " context:"
+ print(" thread id: %d" % exception_thread.id)
+ print(" code: %08X" % reader.exception.exception.code)
+ print(" context:")
context = CONTEXT_FOR_ARCH[reader.arch]
maxWidth = max(map(lambda s: len(s), context))
for r in context:
register_value = reader.Register(r)
- print " %s: %s" % (r.rjust(maxWidth),
- heap.FormatIntPtr(register_value))
+ print(" %s: %s" % (r.rjust(maxWidth),
+ heap.FormatIntPtr(register_value)))
# TODO(vitalyr): decode eflags.
if reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
- print " cpsr: %s" % bin(reader.exception_context.cpsr)[2:]
+ print(" cpsr: %s" % bin(reader.exception_context.cpsr)[2:])
else:
- print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
+ print(" eflags: %s" % bin(reader.exception_context.eflags)[2:])
- print
- print " modules:"
+ print()
+ print(" modules:")
for module in reader.module_list.modules:
name = GetModuleName(reader, module)
if name in KNOWN_MODULES:
- print " %s at %08X" % (name, module.base_of_image)
+ print(" %s at %08X" % (name, module.base_of_image))
reader.TryLoadSymbolsFor(name, module)
- print
+ print()
- print " stack-top: %s" % heap.FormatIntPtr(reader.StackTop())
- print " stack-bottom: %s" % heap.FormatIntPtr(reader.StackBottom())
- print ""
+ print(" stack-top: %s" % heap.FormatIntPtr(reader.StackTop()))
+ print(" stack-bottom: %s" % heap.FormatIntPtr(reader.StackBottom()))
+ print("")
if options.shell:
padawan.PrintStackTraceMessage(print_message=False)
- print "Disassembly around exception.eip:"
+ print("Disassembly around exception.eip:")
eip_symbol = reader.FindSymbol(reader.ExceptionIP())
if eip_symbol is not None:
- print eip_symbol
+ print(eip_symbol)
disasm_start = reader.ExceptionIP() - EIP_PROXIMITY
disasm_bytes = 2 * EIP_PROXIMITY
if (options.full):
@@ -3883,12 +3890,12 @@ def AnalyzeMinidump(options, minidump_name):
lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
if not lines:
- print "Could not disassemble using %s." % OBJDUMP_BIN
- print "Pass path to architecture specific objdump via --objdump?"
+ print("Could not disassemble using %s." % OBJDUMP_BIN)
+ print("Pass path to architecture specific objdump via --objdump?")
for line in lines:
- print FormatDisasmLine(disasm_start, heap, line)
- print
+ print(FormatDisasmLine(disasm_start, heap, line))
+ print()
if heap is None:
heap = V8Heap(reader, None)
@@ -3903,10 +3910,10 @@ def AnalyzeMinidump(options, minidump_name):
try:
InspectionShell(reader, heap).cmdloop("type help to get help")
except KeyboardInterrupt:
- print "Kthxbye."
+ print("Kthxbye.")
elif not options.command:
if reader.exception is not None:
- print "Annotated stack (from exception.esp to bottom):"
+ print("Annotated stack (from exception.esp to bottom):")
stack_start = padawan.PrintStackTraceMessage()
padawan.InterpretMemory(stack_start, stack_bottom)
reader.Dispose()
@@ -3934,11 +3941,11 @@ if __name__ == "__main__":
if options.web:
try:
server = InspectionWebServer(PORT_NUMBER, options, args[0])
- print 'Started httpserver on port ' , PORT_NUMBER
+ print('Started httpserver on port ' , PORT_NUMBER)
webbrowser.open('http://localhost:%i/summary.html' % PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
- print '^C received, shutting down the web server'
+ print('^C received, shutting down the web server')
server.socket.close()
else:
AnalyzeMinidump(options, args[0])
diff --git a/deps/v8/tools/ignition/bytecode_dispatches_report.py b/deps/v8/tools/ignition/bytecode_dispatches_report.py
index 97f8e8394d..aa5a9c9e59 100755
--- a/deps/v8/tools/ignition/bytecode_dispatches_report.py
+++ b/deps/v8/tools/ignition/bytecode_dispatches_report.py
@@ -5,6 +5,9 @@
# found in the LICENSE file.
#
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import heapq
import json
@@ -54,8 +57,8 @@ def warn_if_counter_may_have_saturated(dispatches_table):
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
if counter == __COUNTER_MAX:
- print "WARNING: {} -> {} may have saturated.".format(source,
- destination)
+ print("WARNING: {} -> {} may have saturated.".format(source,
+ destination))
def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
@@ -71,9 +74,9 @@ def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
top_bytecode_dispatch_pairs = (
find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
- print "Top {} bytecode dispatch pairs:".format(top_count)
+ print("Top {} bytecode dispatch pairs:".format(top_count))
for source, destination, counter in top_bytecode_dispatch_pairs:
- print "{:>12d}\t{} -> {}".format(counter, source, destination)
+ print("{:>12d}\t{} -> {}".format(counter, source, destination))
def find_top_bytecodes(dispatches_table):
@@ -87,9 +90,9 @@ def find_top_bytecodes(dispatches_table):
def print_top_bytecodes(dispatches_table):
top_bytecodes = find_top_bytecodes(dispatches_table)
- print "Top bytecodes:"
+ print("Top bytecodes:")
for bytecode, counter in top_bytecodes:
- print "{:>12d}\t{}".format(counter, bytecode)
+ print("{:>12d}\t{}".format(counter, bytecode))
def find_top_dispatch_sources_and_destinations(
@@ -116,13 +119,13 @@ def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
top_count, sort_relative):
top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_relative)
- print "Top sources of dispatches to {}:".format(bytecode)
+ print("Top sources of dispatches to {}:".format(bytecode))
for source_name, counter, ratio in top_sources:
- print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name)
+ print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
- print "\nTop destinations of dispatches from {}:".format(bytecode)
+ print("\nTop destinations of dispatches from {}:".format(bytecode))
for destination_name, counter, ratio in top_destinations:
- print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name)
+ print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
def build_counters_matrix(dispatches_table):
diff --git a/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py
index b2422323b0..96cb0ee0fd 100755
--- a/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py
+++ b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py
@@ -1,10 +1,13 @@
-#! /usr/bin/python2
+#! /usr/bin/python
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import collections
import os
@@ -94,18 +97,18 @@ def print_disassembly_annotation(offset_counts, bytecode_disassembly):
return offsets.pop() if offsets else -1
current_offset = next_offset()
- print current_offset;
+ print(current_offset);
for line in bytecode_disassembly:
disassembly_offset = int(line.split()[1])
if disassembly_offset == current_offset:
count = offset_counts[current_offset]
percentage = 100.0 * count / total
- print "{:>8d} ({:>5.1f}%) ".format(count, percentage),
+ print("{:>8d} ({:>5.1f}%) ".format(count, percentage), end=' ')
current_offset = next_offset()
else:
- print " ",
- print line
+ print(" ", end=' ')
+ print(line)
if offsets:
print ("WARNING: Offsets not empty. Output is most likely invalid due to "
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index d03151805d..561e4547e1 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -31,6 +31,9 @@
# char arrays. It is used for embedded JavaScript code in the V8
# library.
+# for py2/py3 compatibility
+from functools import reduce
+
import os, re
import optparse
import textwrap
@@ -249,7 +252,7 @@ def BuildMetadata(sources, source_bytes, native_type):
get_script_name_cases = []
get_script_source_cases = []
offset = 0
- for i in xrange(len(sources.modules)):
+ for i in range(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
@@ -290,7 +293,7 @@ def PutInt(blob_file, value):
value_with_length = (value << 2) | (size - 1)
byte_sequence = bytearray()
- for i in xrange(size):
+ for i in range(size):
byte_sequence.append(value_with_length & 255)
value_with_length >>= 8;
blob_file.write(byte_sequence)
@@ -312,7 +315,7 @@ def WriteStartupBlob(sources, startup_blob):
output = open(startup_blob, "wb")
PutInt(output, len(sources.names))
- for i in xrange(len(sources.names)):
+ for i in range(len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index ca2cb00e4b..44998743b4 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -27,6 +27,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
import bisect
import collections
import ctypes
@@ -157,7 +161,7 @@ class Code(object):
# Print annotated lines.
address = lines[0][0]
total_count = 0
- for i in xrange(len(lines)):
+ for i in range(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
@@ -183,10 +187,10 @@ class Code(object):
# 6 for the percentage number, incl. the '.'
# 1 for the '%' sign
# => 15
- print "%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1])
+ print("%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1]))
else:
- print "%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1])
- print
+ print("%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1]))
+ print()
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
@@ -267,9 +271,8 @@ class CodeMap(object):
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
- print >>sys.stderr, \
- "Warning: page limit (%d) reached for %s [%s]" % (
- max_pages, code.name, code.origin)
+ print("Warning: page limit (%d) reached for %s [%s]" % (
+ max_pages, code.name, code.origin), file=sys.stderr)
break
if page_id in self.pages:
page = self.pages[page_id]
@@ -309,7 +312,7 @@ class CodeMap(object):
def Print(self):
for code in self.AllCode():
- print code
+ print(code)
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
@@ -411,7 +414,7 @@ class LogReader(object):
continue
code = self.code_map.Find(old_start_address)
if not code:
- print >>sys.stderr, "Warning: Not found %x" % old_start_address
+ print("Warning: Not found %x" % old_start_address, file=sys.stderr)
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
@@ -591,7 +594,7 @@ class TraceReader(object):
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
- print >>sys.stderr, "Warning: unsupported trace header magic"
+ print("Warning: unsupported trace header magic", file=sys.stderr)
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
@@ -642,7 +645,7 @@ class TraceReader(object):
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
- for _ in xrange(sample.nr):
+ for _ in range(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
@@ -786,7 +789,7 @@ class LibraryRepo(object):
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
- print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
+ print("Warning: %s not found" % KERNEL_ALLSYMS_FILE, file=sys.stderr)
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
@@ -804,35 +807,35 @@ class LibraryRepo(object):
def PrintReport(code_map, library_repo, arch, ticks, options):
- print "Ticks per symbol:"
+ print("Ticks per symbol:")
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
code_ticks = code.self_ticks
- print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
- code.FullName(), code.origin)
+ print("%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
+ code.FullName(), code.origin))
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(arch, options)
- print
- print "Ticks per library:"
+ print()
+ print("Ticks per library:")
mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
- print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
- mmap_info.unique_name)
+ print("%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
+ mmap_info.unique_name))
def PrintDot(code_map, options):
- print "digraph G {"
+ print("digraph G {")
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
- print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
+ print("n%d [shape=box,label=\"%s\"];" % (code.id, code.name))
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
- print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
- print "}"
+ print("n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks))
+ print("}")
if __name__ == "__main__":
@@ -877,8 +880,8 @@ if __name__ == "__main__":
options, args = parser.parse_args()
if not options.quiet:
- print "V8 log: %s, %s.ll" % (options.log, options.log)
- print "Perf trace file: %s" % options.trace
+ print("V8 log: %s, %s.ll" % (options.log, options.log))
+ print("Perf trace file: %s" % options.trace)
V8_GC_FAKE_MMAP = options.gc_fake_mmap
HOST_ROOT = options.host_root
@@ -886,7 +889,7 @@ if __name__ == "__main__":
disasm.OBJDUMP_BIN = options.objdump
OBJDUMP_BIN = options.objdump
else:
- print "Cannot find %s, falling back to default objdump" % options.objdump
+ print("Cannot find %s, falling back to default objdump" % options.objdump)
# Stats.
events = 0
@@ -904,8 +907,8 @@ if __name__ == "__main__":
log_reader = LogReader(log_name=options.log + ".ll",
code_map=code_map)
if not options.quiet:
- print "Generated code architecture: %s" % log_reader.arch
- print
+ print("Generated code architecture: %s" % log_reader.arch)
+ print()
sys.stdout.flush()
# Process the code and trace logs.
@@ -968,11 +971,11 @@ if __name__ == "__main__":
def PrintTicks(number, total, description):
print("%10d %5.1f%% ticks in %s" %
(number, 100.0*number/total, description))
- print
- print "Stats:"
- print "%10d total trace events" % events
- print "%10d total ticks" % ticks
- print "%10d ticks not in symbols" % missed_ticks
+ print()
+ print("Stats:")
+ print("%10d total trace events" % events)
+ print("%10d total ticks" % ticks)
+ print("%10d ticks not in symbols" % missed_ticks)
unaccounted = "unaccounted ticks"
if really_missed_ticks > 0:
unaccounted += " (probably in the kernel, try --kernel)"
@@ -980,10 +983,10 @@ if __name__ == "__main__":
PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
- print "%10d total symbols" % len([c for c in code_map.AllCode()])
- print "%10d used symbols" % len([c for c in code_map.UsedCode()])
- print "%9.2fs library processing time" % mmap_time
- print "%9.2fs tick processing time" % sample_time
+ print("%10d total symbols" % len([c for c in code_map.AllCode()]))
+ print("%10d used symbols" % len([c for c in code_map.UsedCode()]))
+ print("%9.2fs library processing time" % mmap_time)
+ print("%9.2fs tick processing time" % sample_time)
log_reader.Dispose()
trace_reader.Dispose()
diff --git a/deps/v8/tools/lldb_commands.py b/deps/v8/tools/lldb_commands.py
index 46e5e8b0fb..2884cd60b0 100644
--- a/deps/v8/tools/lldb_commands.py
+++ b/deps/v8/tools/lldb_commands.py
@@ -5,6 +5,9 @@
# Load this file by adding this to your ~/.lldbinit:
# command script import <this_dir>/lldb_commands.py
+# for py2/py3 compatibility
+from __future__ import print_function
+
import lldb
import re
diff --git a/deps/v8/tools/locs.py b/deps/v8/tools/locs.py
index 6773d1a76a..05793d698d 100755
--- a/deps/v8/tools/locs.py
+++ b/deps/v8/tools/locs.py
@@ -1,4 +1,5 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,6 +8,9 @@
Consult --help for more information.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import json
import os
@@ -147,23 +151,33 @@ def GenerateCompileCommandsAndBuild(build_dir, compile_commands_file, out):
return compile_commands_file
+def fmt_bytes(bytes):
+ if bytes > 1024*1024*1024:
+ return int(bytes / (1024*1024)), "MB"
+ elif bytes > 1024*1024:
+ return int(bytes / (1024)), "kB"
+ return int(bytes), " B"
+
class CompilationData:
- def __init__(self, loc, expanded):
+ def __init__(self, loc, in_bytes, expanded, expanded_bytes):
self.loc = loc
+ self.in_bytes = in_bytes
self.expanded = expanded
+ self.expanded_bytes = expanded_bytes
def ratio(self):
return self.expanded / (self.loc+1)
def to_string(self):
- return "{:>9,} to {:>12,} ({:>5.0f}x)".format(
- self.loc, self.expanded, self.ratio())
-
+ exp_bytes, exp_unit = fmt_bytes(self.expanded_bytes)
+ in_bytes, in_unit = fmt_bytes(self.in_bytes)
+ return "{:>9,} LoC ({:>7,} {}) to {:>12,} LoC ({:>7,} {}) ({:>5.0f}x)".format(
+ self.loc, in_bytes, in_unit, self.expanded, exp_bytes, exp_unit, self.ratio())
class File(CompilationData):
- def __init__(self, file, loc, expanded):
- super().__init__(loc, expanded)
+ def __init__(self, file, loc, in_bytes, expanded, expanded_bytes):
+ super().__init__(loc, in_bytes, expanded, expanded_bytes)
self.file = file
def to_string(self):
@@ -172,7 +186,7 @@ class File(CompilationData):
class Group(CompilationData):
def __init__(self, name, regexp_string):
- super().__init__(0, 0)
+ super().__init__(0, 0, 0, 0)
self.name = name
self.count = 0
self.regexp = re.compile(regexp_string)
@@ -180,7 +194,9 @@ class Group(CompilationData):
def account(self, unit):
if (self.regexp.match(unit.file)):
self.loc += unit.loc
+ self.in_bytes += unit.in_bytes
self.expanded += unit.expanded
+ self.expanded_bytes += unit.expanded_bytes
self.count += 1
def to_string(self, name_width):
@@ -195,7 +211,8 @@ def SetupReportGroups():
"third_party": '\\.\\./\\.\\./third_party',
"gen": 'gen'}
- report_groups = {**default_report_groups, **dict(ARGS['group'])}
+ report_groups = default_report_groups.copy()
+ report_groups.update(dict(ARGS['group']))
if ARGS['only']:
for only_arg in ARGS['only']:
@@ -236,8 +253,8 @@ class Results:
is_tracked = True
return is_tracked
- def recordFile(self, filename, loc, expanded):
- unit = File(filename, loc, expanded)
+ def recordFile(self, filename, loc, in_bytes, expanded, expanded_bytes):
+ unit = File(filename, loc, in_bytes, expanded, expanded_bytes)
self.units[filename] = unit
for group in self.groups.values():
group.account(unit)
@@ -257,9 +274,11 @@ class Results:
class LocsEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, File):
- return {"file": o.file, "loc": o.loc, "expanded": o.expanded}
+ return {"file": o.file, "loc": o.loc, "in_bytes": o.in_bytes,
+ "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
if isinstance(o, Group):
- return {"name": o.name, "loc": o.loc, "expanded": o.expanded}
+ return {"name": o.name, "loc": o.loc, "in_bytes": o.in_bytes,
+ "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
if isinstance(o, Results):
return {"groups": o.groups, "units": o.units}
return json.JSONEncoder.default(self, o)
@@ -317,16 +336,15 @@ def Main():
for i, key in enumerate(data):
if not result.track(key['file']):
continue
- if not ARGS['json']:
- status.print(
- "[{}/{}] Counting LoCs of {}".format(i, len(data), key['file']))
+ status.print("[{}/{}] Counting LoCs of {}".format(i, len(data), key['file']),
+ file=out)
clangcmd, infilename, infile, outfile = cmd_splitter.process(key, temp)
outfile.parent.mkdir(parents=True, exist_ok=True)
if infile.is_file():
clangcmd = clangcmd + " -E -P " + \
- str(infile) + " -o /dev/stdout | sed '/^\\s*$/d' | wc -l"
+ str(infile) + " -o /dev/stdout | sed '/^\\s*$/d' | wc -lc"
loccmd = ("cat {} | sed '\\;^\\s*//;d' | sed '\\;^/\\*;d'"
- " | sed '/^\\*/d' | sed '/^\\s*$/d' | wc -l").format(
+ " | sed '/^\\*/d' | sed '/^\\s*$/d' | wc -lc").format(
infile)
runcmd = " {} ; {}".format(clangcmd, loccmd)
if ARGS['echocmd']:
@@ -339,8 +357,8 @@ def Main():
status.print("[{}/{}] Summing up {}".format(
i, len(processes), p['infile']), file=out)
output, err = p['process'].communicate()
- expanded, loc = list(map(int, output.split()))
- result.recordFile(p['infile'], loc, expanded)
+ expanded, expanded_bytes, loc, in_bytes = list(map(int, output.split()))
+ result.recordFile(p['infile'], loc, in_bytes, expanded, expanded_bytes)
end = time.time()
if ARGS['json']:
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index cbb5b5dd6d..1466079e26 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -10,6 +10,7 @@ MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
+# for py2/py3 compatibility
from __future__ import print_function
import argparse
@@ -36,6 +37,12 @@ sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
import gn_helpers
+try:
+ cmp # Python 2
+except NameError: # Python 3
+ def cmp(x, y): # pylint: disable=redefined-builtin
+ return (x > y) - (x < y)
+
def main(args):
mbw = MetaBuildWrapper()
@@ -1155,7 +1162,7 @@ class MetaBuildWrapper(object):
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.EEXIST:
raise
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index dbd599645d..1889f18a3a 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -284,7 +284,7 @@ class UnitTest(unittest.TestCase):
self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
def test_analyze_handles_way_too_many_results(self):
- too_many_files = ', '.join(['"//foo:foo%d"' % i for i in xrange(4 * 1024)])
+ too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
files = {'/tmp/in.json': '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index 872263f627..cc740c3d97 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -9,6 +9,9 @@ Use this script to fetch all dependencies for V8 to run build_gn.py.
Usage: fetch_deps.py <v8-path>
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import subprocess
import sys
@@ -52,9 +55,9 @@ def EnsureGit(v8_path):
expected_git_dir = os.path.join(v8_path, ".git")
actual_git_dir = git("rev-parse --absolute-git-dir")
if expected_git_dir == actual_git_dir:
- print "V8 is tracked stand-alone by git."
+ print("V8 is tracked stand-alone by git.")
return False
- print "Initializing temporary git repository in v8."
+ print("Initializing temporary git repository in v8.")
git("init")
git("config user.name \"Ada Lovelace\"")
git("config user.email ada@lovela.ce")
@@ -71,7 +74,7 @@ def FetchDeps(v8_path):
temporary_git = EnsureGit(v8_path)
try:
- print "Fetching dependencies."
+ print("Fetching dependencies.")
env = os.environ.copy()
# gclient needs to have depot_tools in the PATH.
env["PATH"] = depot_tools + os.pathsep + env["PATH"]
diff --git a/deps/v8/tools/node/node_common.py b/deps/v8/tools/node/node_common.py
index 172e9df812..2efb21860e 100755
--- a/deps/v8/tools/node/node_common.py
+++ b/deps/v8/tools/node/node_common.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import pipes
import shutil
@@ -23,7 +26,7 @@ def EnsureDepotTools(v8_path, fetch_if_not_exist):
except:
pass
if fetch_if_not_exist:
- print "Checking out depot_tools."
+ print("Checking out depot_tools.")
# shell=True needed on Windows to resolve git.bat.
subprocess.check_call("git clone {} {}".format(
pipes.quote(DEPOT_TOOLS_URL),
@@ -36,14 +39,14 @@ def EnsureDepotTools(v8_path, fetch_if_not_exist):
return None
depot_tools = _Get(v8_path)
assert depot_tools is not None
- print "Using depot tools in %s" % depot_tools
+ print("Using depot tools in %s" % depot_tools)
return depot_tools
def UninitGit(v8_path):
- print "Uninitializing temporary git repository"
+ print("Uninitializing temporary git repository")
target = os.path.join(v8_path, ".git")
if os.path.isdir(target):
- print ">> Cleaning up %s" % target
+ print(">> Cleaning up %s" % target)
def OnRmError(func, path, exec_info):
# This might happen on Windows
os.chmod(path, stat.S_IWRITE)
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
index c480a69a9b..2ebf799c5e 100755
--- a/deps/v8/tools/node/update_node.py
+++ b/deps/v8/tools/node/update_node.py
@@ -23,6 +23,9 @@ Optional flags:
--with-patch Also include currently staged files in the V8 checkout.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import shutil
@@ -57,7 +60,7 @@ FILES_TO_KEEP = [ "gypfiles" ]
def RunGclient(path):
assert os.path.isdir(path)
- print ">> Running gclient sync"
+ print(">> Running gclient sync")
subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
def CommitPatch(options):
@@ -67,7 +70,7 @@ def CommitPatch(options):
the fake git clone fetch it into node.js. We can leave the commit, as
bot_update will ensure a clean state on each run.
"""
- print ">> Committing patch"
+ print(">> Committing patch")
subprocess.check_call(
["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
"commit", "--allow-empty", "-m", "placeholder-commit"],
@@ -77,8 +80,8 @@ def CommitPatch(options):
def UpdateTarget(repository, options, files_to_keep):
source = os.path.join(options.v8_path, *repository)
target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
- print ">> Updating target directory %s" % target
- print ">> from active branch at %s" % source
+ print(">> Updating target directory %s" % target)
+ print(">> from active branch at %s" % source)
if not os.path.exists(target):
os.makedirs(target)
# Remove possible remnants of previous incomplete runs.
@@ -111,17 +114,17 @@ def UpdateTarget(repository, options, files_to_keep):
def UpdateGitIgnore(options):
file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
assert os.path.isfile(file_name)
- print ">> Updating .gitignore with lines"
+ print(">> Updating .gitignore with lines")
with open(file_name) as gitignore:
content = gitignore.readlines()
content = [x.strip() for x in content]
for x in DELETE_FROM_GITIGNORE:
if x in content:
- print "- %s" % x
+ print("- %s" % x)
content.remove(x)
for x in ADD_TO_GITIGNORE:
if x not in content:
- print "+ %s" % x
+ print("+ %s" % x)
content.append(x)
content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
with open(file_name, "w") as gitignore:
@@ -129,7 +132,7 @@ def UpdateGitIgnore(options):
gitignore.write("%s\n" % x)
def CreateCommit(options):
- print ">> Creating commit."
+ print(">> Creating commit.")
# Find git hash from source.
githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
cwd=options.v8_path).strip()
diff --git a/deps/v8/tools/perf-compare.py b/deps/v8/tools/perf-compare.py
index 75f3c73c6a..744f6aae97 100755
--- a/deps/v8/tools/perf-compare.py
+++ b/deps/v8/tools/perf-compare.py
@@ -11,6 +11,9 @@ Examples:
%prog -t "x64 results" ../result.json master.json -o results.html
'''
+# for py2/py3 compatibility
+from __future__ import print_function
+
from collections import OrderedDict
import json
import math
@@ -418,7 +421,7 @@ def Render(args):
run_names[run_name] = 0
for error in data["errors"]:
- print "Error:", error
+ print("Error:", error)
for trace in data["traces"]:
suite_name = trace["graphs"][0]
diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py
index 2299a5195e..31b62c89d6 100644
--- a/deps/v8/tools/predictable_wrapper.py
+++ b/deps/v8/tools/predictable_wrapper.py
@@ -14,16 +14,21 @@ The command is run up to three times and the printed allocation hash is
compared. Differences are reported as errors.
"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
import sys
from testrunner.local import command
from testrunner.local import utils
+
MAX_TRIES = 3
TIMEOUT = 120
# Predictable mode works only when run on the host os.
-command.setup(utils.GuessOS())
+command.setup(utils.GuessOS(), None)
def main(args):
def allocation_str(stdout):
@@ -36,19 +41,19 @@ def main(args):
previous_allocations = None
for run in range(1, MAX_TRIES + 1):
- print '### Predictable run #%d' % run
+ print('### Predictable run #%d' % run)
output = cmd.execute()
if output.stdout:
- print '### Stdout:'
- print output.stdout
+ print('### Stdout:')
+ print(output.stdout)
if output.stderr:
- print '### Stderr:'
- print output.stderr
- print '### Return code: %s' % output.exit_code
+ print('### Stderr:')
+ print(output.stderr)
+ print('### Return code: %s' % output.exit_code)
if output.HasTimedOut():
# If we get a timeout in any run, we are in an unpredictable state. Just
# report it as a failure and don't rerun.
- print '### Test timed out'
+ print('### Test timed out')
return 1
allocations = allocation_str(output.stdout)
if not allocations:
@@ -57,7 +62,7 @@ def main(args):
'--verify-predictable is passed at the cmd line.')
return 2
if previous_allocations and previous_allocations != allocations:
- print '### Allocations differ'
+ print('### Allocations differ')
return 3
if run >= MAX_TRIES:
# No difference on the last run -> report a success.
diff --git a/deps/v8/tools/release/auto_push.py b/deps/v8/tools/release/auto_push.py
index ca9e5e8734..4cb968787f 100755
--- a/deps/v8/tools/release/auto_push.py
+++ b/deps/v8/tools/release/auto_push.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import json
import os
@@ -63,7 +66,7 @@ class LastReleaseBailout(Step):
format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
if not commits:
- print "Already pushed current candidate %s" % self["candidate"]
+ print("Already pushed current candidate %s" % self["candidate"])
return True
@@ -71,7 +74,7 @@ class CreateRelease(Step):
MESSAGE = "Creating release if specified."
def RunStep(self):
- print "Creating release for %s." % self["candidate"]
+ print("Creating release for %s." % self["candidate"])
args = [
"--author", self._options.author,
@@ -96,7 +99,7 @@ class AutoPush(ScriptsBase):
def _ProcessOptions(self, options):
if not options.author or not options.reviewer: # pragma: no cover
- print "You need to specify author and reviewer."
+ print("You need to specify author and reviewer.")
return False
options.requires_editor = False
return True
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index dd60d5dff7..d6ede67082 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import sys
@@ -160,9 +163,9 @@ class UploadCL(Step):
cq=self._options.use_commit_queue,
cq_dry_run=self._options.use_dry_run,
cwd=cwd)
- print "CL uploaded."
+ print("CL uploaded.")
else:
- print "Dry run - don't upload."
+ print("Dry run - don't upload.")
self.GitCheckout("master", cwd=cwd)
self.GitDeleteBranch("work-branch", cwd=cwd)
@@ -205,7 +208,7 @@ class AutoRoll(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
- print "A reviewer (-r) and an author (-a) are required."
+ print("A reviewer (-r) and an author (-a) are required.")
return False
options.requires_editor = False
diff --git a/deps/v8/tools/release/auto_tag.py b/deps/v8/tools/release/auto_tag.py
index a52a028697..fddefed61f 100755
--- a/deps/v8/tools/release/auto_tag.py
+++ b/deps/v8/tools/release/auto_tag.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import sys
@@ -15,7 +18,7 @@ class Preparation(Step):
def RunStep(self):
# TODO(machenbach): Remove after the git switch.
if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
- print "This script is disabled until after the v8 git migration."
+ print("This script is disabled until after the v8 git migration.")
return True
self.CommonPrepare()
@@ -80,7 +83,7 @@ class GetOldestUntaggedVersion(Step):
self["candidate_version"] = version
if not self["candidate"] or not self["candidate_version"]:
- print "Nothing found to tag."
+ print("Nothing found to tag.")
self.CommonCleanup()
return True
@@ -120,18 +123,18 @@ class CalculateTagRevision(Step):
# Don't include the version change commit itself if there is no upper
# limit yet.
candidate_svn = str(int(candidate_svn) + 1)
- next_svn = sys.maxint
+ next_svn = sys.maxsize
lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
if not lkgr_svn:
- print "There is no lkgr since the candidate version yet."
+ print("There is no lkgr since the candidate version yet.")
self.CommonCleanup()
return True
# Let's check if the lkgr is at least three hours old.
self["lkgr"] = self.vc.SvnGit(lkgr_svn)
if not self["lkgr"]:
- print "Couldn't find git hash for lkgr %s" % lkgr_svn
+ print("Couldn't find git hash for lkgr %s" % lkgr_svn)
self.CommonCleanup()
return True
@@ -139,11 +142,11 @@ class CalculateTagRevision(Step):
current_utc_time = self._side_effect_handler.GetUTCStamp()
if current_utc_time < lkgr_utc_time + 10800:
- print "Candidate lkgr %s is too recent for tagging." % lkgr_svn
+ print("Candidate lkgr %s is too recent for tagging." % lkgr_svn)
self.CommonCleanup()
return True
- print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"])
+ print("Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]))
class MakeTag(Step):
@@ -172,7 +175,7 @@ class AutoTag(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.dry_run and not options.author:
- print "Specify your chromium.org email with -a"
+ print("Specify your chromium.org email with -a")
return False
options.wait_for_lgtm = False
options.force_readline_defaults = True
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index 8af835136b..021cd55286 100755
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
@@ -13,6 +13,8 @@ written to public logs. Public automated callers of this script should
suppress stdout and stderr and only process contents of the results_file.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
import argparse
import httplib
@@ -222,7 +224,7 @@ def Main():
with open(options.results_file, "w") as f:
f.write(json.dumps(results))
else:
- print results
+ print(results)
if __name__ == "__main__":
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index bd28fe3aa7..af4709bfea 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import datetime
import httplib
@@ -199,8 +202,8 @@ def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
- print "Command: %s" % cmd_line
- print "in %s" % cwd
+ print("Command: %s" % cmd_line)
+ print("in %s" % cwd)
sys.stdout.flush()
try:
if pipe:
@@ -272,8 +275,8 @@ class SideEffectHandler(object): # pragma: no cover
try:
return json.loads(data)
except:
- print data
- print "ERROR: Could not read response. Is your key valid?"
+ print(data)
+ print("ERROR: Could not read response. Is your key valid?")
raise
def Sleep(self, seconds):
@@ -448,7 +451,7 @@ class Step(GitRecipesMixin):
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
- print ">>> Step %d: %s" % (self._number, self._text)
+ print(">>> Step %d: %s" % (self._number, self._text))
try:
return self.RunStep()
finally:
@@ -484,16 +487,16 @@ class Step(GitRecipesMixin):
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
- print "Waiting for %f seconds." % wait_time
+ print("Waiting for %f seconds." % wait_time)
self._side_effect_handler.Sleep(wait_time)
- print "Retrying..."
+ print("Retrying...")
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
- print "%s (forced)" % default
+ print("%s (forced)" % default)
return default
else:
return self._side_effect_handler.ReadLine()
@@ -529,8 +532,8 @@ class Step(GitRecipesMixin):
def Die(self, msg=""):
if msg != "":
- print "Error: %s" % msg
- print "Exiting"
+ print("Error: %s" % msg)
+ print("Exiting")
raise Exception(msg)
def DieNoManualMode(self, msg=""):
@@ -539,7 +542,7 @@ class Step(GitRecipesMixin):
self.Die(msg)
def Confirm(self, msg):
- print "%s [Y/n] " % msg,
+ print("%s [Y/n] " % msg, end=' ')
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
@@ -549,7 +552,7 @@ class Step(GitRecipesMixin):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
- print "Branch %s deleted." % name
+ print("Branch %s deleted." % name)
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
@@ -612,10 +615,10 @@ class Step(GitRecipesMixin):
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
- print "> ",
+ print("> ", end=' ')
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
- print "That was not 'LGTM'."
+ print("That was not 'LGTM'.")
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
@@ -627,8 +630,8 @@ class Step(GitRecipesMixin):
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
- print "That was not 'RESOLVED' or 'ABORT'."
- print "> ",
+ print("That was not 'RESOLVED' or 'ABORT'.")
+ print("> ", end=' ')
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
@@ -769,16 +772,18 @@ class UploadStep(Step):
def RunStep(self):
reviewer = None
if self._options.reviewer:
- print "Using account %s for review." % self._options.reviewer
+ print("Using account %s for review." % self._options.reviewer)
reviewer = self._options.reviewer
tbr_reviewer = None
if self._options.tbr_reviewer:
- print "Using account %s for TBR review." % self._options.tbr_reviewer
+ print("Using account %s for TBR review." % self._options.tbr_reviewer)
tbr_reviewer = self._options.tbr_reviewer
if not reviewer and not tbr_reviewer:
- print "Please enter the email address of a V8 reviewer for your patch: ",
+ print(
+ "Please enter the email address of a V8 reviewer for your patch: ",
+ end=' ')
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
@@ -854,7 +859,7 @@ class ScriptsBase(object):
# Process common options.
if options.step < 0: # pragma: no cover
- print "Bad step number %d" % options.step
+ print("Bad step number %d" % options.step)
parser.print_help()
return None
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index f030ac804e..cbf07f448a 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import sys
@@ -27,7 +30,7 @@ class PrepareBranchRevision(Step):
self["push_hash"] = (self._options.revision or
self.GitLog(n=1, format="%H", branch="origin/master"))
assert self["push_hash"]
- print "Release revision %s" % self["push_hash"]
+ print("Release revision %s" % self["push_hash"])
class IncrementVersion(Step):
@@ -138,7 +141,7 @@ class PushBranchRef(Step):
def RunStep(self):
cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"])
if self._options.dry_run:
- print "Dry run. Command:\ngit %s" % cmd
+ print("Dry run. Command:\ngit %s" % cmd)
else:
self.Git(cmd)
@@ -216,7 +219,7 @@ class LandBranch(Step):
def RunStep(self):
if self._options.dry_run:
- print "Dry run - upload CL."
+ print("Dry run - upload CL.")
else:
self.GitUpload(force=True,
bypass_hooks=True,
@@ -224,7 +227,7 @@ class LandBranch(Step):
message_file=self.Config("COMMITMSG_FILE"))
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
- print "Dry run. Command:\ngit %s" % cmd
+ print("Dry run. Command:\ngit %s" % cmd)
else:
self.Git(cmd)
@@ -270,7 +273,7 @@ class CreateRelease(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
- print "Reviewer (-r) and author (-a) are required."
+ print("Reviewer (-r) and author (-a) are required.")
return False
return True
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index bf526bf5d8..c9594292b1 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
from collections import OrderedDict
import sys
@@ -186,10 +189,10 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
- print "*** SUMMARY ***"
- print "branch: %s" % self["merge_to_branch"]
+ print("*** SUMMARY ***")
+ print("branch: %s" % self["merge_to_branch"])
if self["revision_list"]:
- print "patches: %s" % self["revision_list"]
+ print("patches: %s" % self["revision_list"])
class MergeToBranch(ScriptsBase):
@@ -215,10 +218,10 @@ class MergeToBranch(ScriptsBase):
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
- print "Either a patch file or revision numbers must be specified"
+ print("Either a patch file or revision numbers must be specified")
return False
if not options.message:
- print "You must specify a merge comment if no patches are specified"
+ print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
@@ -233,8 +236,8 @@ class MergeToBranch(ScriptsBase):
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
- print "Please provide full git hashes of the patches to merge."
- print "Got: %s" % revision
+ print("Please provide full git hashes of the patches to merge.")
+ print("Got: %s" % revision)
return False
return True
diff --git a/deps/v8/tools/release/mergeinfo.py b/deps/v8/tools/release/mergeinfo.py
index 1e29ece909..bed7441f85 100755
--- a/deps/v8/tools/release/mergeinfo.py
+++ b/deps/v8/tools/release/mergeinfo.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import sys
@@ -77,32 +80,32 @@ def get_first_v8_version(branches):
return version
def print_analysis(git_working_dir, hash_to_search):
- print '1.) Searching for "' + hash_to_search + '"'
- print '=====================ORIGINAL COMMIT START==================='
- print describe_commit(git_working_dir, hash_to_search)
- print '=====================ORIGINAL COMMIT END====================='
- print '2.) General information:'
+ print('1.) Searching for "' + hash_to_search + '"')
+ print('=====================ORIGINAL COMMIT START===================')
+ print(describe_commit(git_working_dir, hash_to_search))
+ print('=====================ORIGINAL COMMIT END=====================')
+ print('2.) General information:')
branches = get_branches_for_commit(git_working_dir, hash_to_search)
- print 'Is LKGR: ' + str(is_lkgr(branches))
- print 'Is on Canary: ' + str(get_first_canary(branches))
- print 'First V8 branch: ' + str(get_first_v8_version(branches)) + \
- ' (Might not be the rolled version)'
- print '3.) Found follow-up commits, reverts and ports:'
+ print('Is LKGR: ' + str(is_lkgr(branches)))
+ print('Is on Canary: ' + str(get_first_canary(branches)))
+ print('First V8 branch: ' + str(get_first_v8_version(branches)) + \
+ ' (Might not be the rolled version)')
+ print('3.) Found follow-up commits, reverts and ports:')
followups = get_followup_commits(git_working_dir, hash_to_search)
for followup in followups:
- print describe_commit(git_working_dir, followup, True)
+ print(describe_commit(git_working_dir, followup, True))
- print '4.) Found merges:'
+ print('4.) Found merges:')
merges = get_merge_commits(git_working_dir, hash_to_search)
for currentMerge in merges:
- print describe_commit(git_working_dir, currentMerge, True)
- print '---Merged to:'
+ print(describe_commit(git_working_dir, currentMerge, True))
+ print('---Merged to:')
mergeOutput = git_execute(git_working_dir, ['branch',
'--contains',
currentMerge,
'-r']).strip()
- print mergeOutput
- print 'Finished successfully'
+ print(mergeOutput)
+ print('Finished successfully')
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser('Tool to check where a git commit was'
diff --git a/deps/v8/tools/release/push_to_candidates.py b/deps/v8/tools/release/push_to_candidates.py
index 538b9887d6..c706896223 100755
--- a/deps/v8/tools/release/push_to_candidates.py
+++ b/deps/v8/tools/release/push_to_candidates.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import sys
@@ -46,7 +49,7 @@ class Preparation(Step):
if(self["current_branch"] == self.Config("CANDIDATESBRANCH")
or self["current_branch"] == self.Config("BRANCHNAME")):
- print "Warning: Script started on branch %s" % self["current_branch"]
+ print("Warning: Script started on branch %s" % self["current_branch"])
self.PrepareBranch()
self.DeleteBranch(self.Config("CANDIDATESBRANCH"))
@@ -347,10 +350,10 @@ class PushToCandidates(ScriptsBase):
def _ProcessOptions(self, options): # pragma: no cover
if not options.manual and not options.reviewer:
- print "A reviewer (-r) is required in (semi-)automatic mode."
+ print("A reviewer (-r) is required in (semi-)automatic mode.")
return False
if not options.manual and not options.author:
- print "Specify your chromium.org email with -a in (semi-)automatic mode."
+ print("Specify your chromium.org email with -a in (semi-)automatic mode.")
return False
options.tbr_commit = not options.manual
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index 44ed858f7e..2f03abb54c 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
from collections import OrderedDict
import sys
@@ -202,7 +205,7 @@ class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
- print "Creating tag %s" % self["version"]
+ print("Creating tag %s" % self["version"])
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
self["commit_title"])
@@ -213,11 +216,11 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
- print "*** SUMMARY ***"
- print "version: %s" % self["version"]
- print "branch: %s" % self["merge_to_branch"]
+ print("*** SUMMARY ***")
+ print("version: %s" % self["version"])
+ print("branch: %s" % self["merge_to_branch"])
if self["revision_list"]:
- print "patches: %s" % self["revision_list"]
+ print("patches: %s" % self["revision_list"])
class RollMerge(ScriptsBase):
@@ -241,10 +244,10 @@ class RollMerge(ScriptsBase):
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
- print "Either a patch file or revision numbers must be specified"
+ print("Either a patch file or revision numbers must be specified")
return False
if not options.message:
- print "You must specify a merge comment if no patches are specified"
+ print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
@@ -254,8 +257,8 @@ class RollMerge(ScriptsBase):
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
- print "Please provide full git hashes of the patches to merge."
- print "Got: %s" % revision
+ print("Please provide full git hashes of the patches to merge.")
+ print("Got: %s" % revision)
return False
return True
diff --git a/deps/v8/tools/release/script_test.py b/deps/v8/tools/release/script_test.py
index b9a17e97fa..0f345b7fa8 100755
--- a/deps/v8/tools/release/script_test.py
+++ b/deps/v8/tools/release/script_test.py
@@ -29,6 +29,9 @@
# Wraps test execution with a coverage analysis. To get the best speed, the
# native python coverage version >= 3.7.1 should be installed.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import coverage
import os
import unittest
@@ -46,7 +49,7 @@ def Main(argv):
])
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
cov.stop()
- print cov.report()
+ print(cov.report())
if __name__ == '__main__':
diff --git a/deps/v8/tools/release/search_related_commits.py b/deps/v8/tools/release/search_related_commits.py
index d27aa56f86..e6e52d2196 100755
--- a/deps/v8/tools/release/search_related_commits.py
+++ b/deps/v8/tools/release/search_related_commits.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import operator
import os
@@ -17,7 +20,7 @@ def search_all_related_commits(
all_commits_raw = _find_commits_inbetween(
start_hash, until, git_working_dir, verbose)
if verbose:
- print "All commits between <of> and <until>: " + all_commits_raw
+ print("All commits between <of> and <until>: " + all_commits_raw)
# Adding start hash too
all_commits = [start_hash]
@@ -61,7 +64,7 @@ def _search_related_commits(
commit_position = matches.group(2)
if verbose:
- print "1.) Commit position to look for: " + commit_position
+ print("1.) Commit position to look for: " + commit_position)
search_range = start_hash + ".." + until
@@ -78,13 +81,13 @@ def _search_related_commits(
git_working_dir, git_args(start_hash), verbose).strip()
if verbose:
- print "2.) Found by hash: " + found_by_hash
+ print("2.) Found by hash: " + found_by_hash)
found_by_commit_pos = git_execute(
git_working_dir, git_args(commit_position), verbose).strip()
if verbose:
- print "3.) Found by commit position: " + found_by_commit_pos
+ print("3.) Found by commit position: " + found_by_commit_pos)
# Replace brackets or else they are wrongly interpreted by --grep
title = title.replace("[", "\\[")
@@ -94,7 +97,7 @@ def _search_related_commits(
git_working_dir, git_args(title), verbose).strip()
if verbose:
- print "4.) Found by title: " + found_by_title
+ print("4.) Found by title: " + found_by_title)
hits = (
_convert_to_array(found_by_hash) +
@@ -132,8 +135,8 @@ def _remove_duplicates(array):
def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
- print "Git working dir: " + working_dir
- print "Executing git command:" + str(command)
+ print("Git working dir: " + working_dir)
+ print("Executing git command:" + str(command))
p = Popen(args=command, stdin=PIPE,
stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
@@ -141,7 +144,7 @@ def git_execute(working_dir, args, verbose=False):
if rc != 0:
raise Exception(err)
if verbose:
- print "Git return value: " + output
+ print("Git return value: " + output)
return output
def _pretty_print_entry(hash, git_dir, pre_text, verbose):
@@ -215,4 +218,4 @@ if __name__ == "__main__": # pragma: no cover
args = sys.argv[1:]
options = parser.parse_args(args)
for current_line in main(options):
- print current_line
+ print(current_line)
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index e454f542ae..c1a571ca24 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -26,6 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import shutil
import tempfile
@@ -383,11 +386,11 @@ class ScriptTest(unittest.TestCase):
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def Call(self, fun, *args, **kwargs):
- print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
+ print("Calling %s with %s and %s" % (str(fun), str(args), str(kwargs)))
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
- print "%s %s" % (cmd, args)
- print "in %s" % cwd
+ print("%s %s" % (cmd, args))
+ print("in %s" % cwd)
return self._mock.Call("command", cmd + " " + args, cwd=cwd)
def ReadLine(self):
diff --git a/deps/v8/tools/run-clang-tidy.py b/deps/v8/tools/run-clang-tidy.py
index bf08a65fcd..aee1b40bef 100755
--- a/deps/v8/tools/run-clang-tidy.py
+++ b/deps/v8/tools/run-clang-tidy.py
@@ -4,6 +4,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import json
import multiprocessing
import optparse
@@ -376,29 +379,29 @@ def main():
options.build_folder = DetectBuildFolder()
if not CheckClangTidy():
- print 'Could not find clang-tidy'
+ print('Could not find clang-tidy')
elif options.build_folder is None or not os.path.isdir(options.build_folder):
- print 'Please provide a build folder with -b'
+ print('Please provide a build folder with -b')
elif options.gen_compdb:
GenerateCompileCommands(options.build_folder)
elif not CheckCompDB(options.build_folder):
- print 'Could not find compilation database, ' \
- 'please generate it with --gen-compdb'
+ print('Could not find compilation database, ' \
+ 'please generate it with --gen-compdb')
else:
- print 'Using build folder:', options.build_folder
+ print('Using build folder:', options.build_folder)
if options.full:
- print 'Running clang-tidy - full'
+ print('Running clang-tidy - full')
ClangTidyRunFull(options.build_folder,
options.no_output_filter,
options.checks,
options.auto_fix)
elif options.aggregate:
- print 'Running clang-tidy - aggregating warnings'
+ print('Running clang-tidy - aggregating warnings')
if options.auto_fix:
- print 'Auto fix not working in aggregate mode, running without.'
+ print('Auto fix not working in aggregate mode, running without.')
ClangTidyRunAggregate(options.build_folder, options.show_loc)
elif options.single:
- print 'Running clang-tidy - single on ' + options.file_name
+ print('Running clang-tidy - single on ' + options.file_name)
if options.file_name is not None:
line_ranges = []
for match in re.findall(r'(\[.*?\])', options.line_ranges):
@@ -409,9 +412,9 @@ def main():
options.auto_fix,
line_ranges)
else:
- print 'Filename provided, please specify a filename with --file'
+ print('Filename provided, please specify a filename with --file')
else:
- print 'Running clang-tidy'
+ print('Running clang-tidy')
ClangTidyRunDiff(options.build_folder,
options.diff_branch,
options.auto_fix)
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 46afbdedce..88856d1530 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -99,6 +99,10 @@ Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
from collections import OrderedDict
import json
import logging
@@ -114,6 +118,11 @@ from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
+try:
+ basestring # Python 2
+except NameError: # Python 3
+ basestring = str
+
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["arm",
"ia32",
@@ -662,7 +671,7 @@ class DesktopPlatform(Platform):
self.command_prefix = []
# Setup command class to OS specific version.
- command.setup(utils.GuessOS())
+ command.setup(utils.GuessOS(), options.device)
if options.prioritize or options.affinitize != None:
self.command_prefix = ["schedtool"]
@@ -916,7 +925,7 @@ def Main(args):
help="Adapt to path structure used on buildbots and adds "
"timestamps/level to all logged status messages",
default=False, action="store_true")
- parser.add_option("--device",
+ parser.add_option("-d", "--device",
help="The device ID to run Android tests on. If not given "
"it will be autodetected.")
parser.add_option("--extra-flags",
@@ -1074,7 +1083,7 @@ def Main(args):
def Runner():
"""Output generator that reruns several times."""
total_runs = runnable.run_count * options.run_count_multiplier
- for i in xrange(0, max(1, total_runs)):
+ for i in range(0, max(1, total_runs)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
try:
@@ -1092,12 +1101,12 @@ def Main(args):
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
- print results
+ print(results)
if options.json_test_results_secondary:
results_secondary.WriteToFile(options.json_test_results_secondary)
else: # pragma: no cover
- print results_secondary
+ print(results_secondary)
if results.errors or have_failed_tests[0]:
return 1
diff --git a/deps/v8/tools/sanitizers/sancov_formatter.py b/deps/v8/tools/sanitizers/sancov_formatter.py
index 2e168fb0ec..b66bfed815 100755
--- a/deps/v8/tools/sanitizers/sancov_formatter.py
+++ b/deps/v8/tools/sanitizers/sancov_formatter.py
@@ -39,6 +39,10 @@ directory. It's not checked out by default and must be added as a custom deps:
'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
import argparse
import json
import logging
@@ -426,26 +430,26 @@ def main(args=None):
options.build_dir = os.path.abspath(options.build_dir)
if options.action.lower() == 'all':
if not options.json_output:
- print '--json-output is required'
+ print('--json-output is required')
return 1
write_instrumented(options)
elif options.action.lower() == 'merge':
if not options.coverage_dir:
- print '--coverage-dir is required'
+ print('--coverage-dir is required')
return 1
if not options.json_input:
- print '--json-input is required'
+ print('--json-input is required')
return 1
if not options.json_output:
- print '--json-output is required'
+ print('--json-output is required')
return 1
merge(options)
elif options.action.lower() == 'split':
if not options.json_input:
- print '--json-input is required'
+ print('--json-input is required')
return 1
if not options.output_dir:
- print '--output-dir is required'
+ print('--output-dir is required')
return 1
split(options)
return 0
diff --git a/deps/v8/tools/sanitizers/sancov_merger.py b/deps/v8/tools/sanitizers/sancov_merger.py
index 867f8b4258..6fd2eb2555 100755
--- a/deps/v8/tools/sanitizers/sancov_merger.py
+++ b/deps/v8/tools/sanitizers/sancov_merger.py
@@ -106,7 +106,7 @@ def generate_inputs(keep, coverage_dir, file_map, cpus):
n = max(2, int(math.ceil(len(files) / float(cpus))))
# Chop files into buckets.
- buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
+ buckets = [files[i:i+n] for i in range(0, len(files), n)]
# Inputs for multiprocessing. List of tuples containing:
# Keep-files option, base path, executable name, index of bucket,
diff --git a/deps/v8/tools/sanitizers/sanitize_pcs.py b/deps/v8/tools/sanitizers/sanitize_pcs.py
index 47f2715096..a1e3a1df2e 100755
--- a/deps/v8/tools/sanitizers/sanitize_pcs.py
+++ b/deps/v8/tools/sanitizers/sanitize_pcs.py
@@ -5,7 +5,10 @@
"""Corrects objdump output. The logic is from sancov.py, see comments there."""
-import sys;
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
for line in sys.stdin:
- print '0x%x' % (int(line.strip(), 16) + 4)
+ print('0x%x' % (int(line.strip(), 16) + 4))
diff --git a/deps/v8/tools/stats-viewer.py b/deps/v8/tools/stats-viewer.py
index e8fc69e3c2..dd9d2c20bf 100755
--- a/deps/v8/tools/stats-viewer.py
+++ b/deps/v8/tools/stats-viewer.py
@@ -34,6 +34,9 @@ The stats viewer reads counters from a binary file and displays them
in a window, re-reading and re-displaying with regular intervals.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import mmap
import optparse
import os
@@ -100,7 +103,7 @@ class StatsViewer(object):
if not os.path.exists(self.data_name):
maps_name = "/proc/%s/maps" % self.data_name
if not os.path.exists(maps_name):
- print "\"%s\" is neither a counter file nor a PID." % self.data_name
+ print("\"%s\" is neither a counter file nor a PID." % self.data_name)
sys.exit(1)
maps_file = open(maps_name, "r")
try:
@@ -110,7 +113,7 @@ class StatsViewer(object):
self.data_name = m.group(0)
break
if self.data_name is None:
- print "Can't find counter file in maps for PID %s." % self.data_name
+ print("Can't find counter file in maps for PID %s." % self.data_name)
sys.exit(1)
finally:
maps_file.close()
@@ -123,7 +126,7 @@ class StatsViewer(object):
return CounterCollection(data_access)
elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
return ChromeCounterCollection(data_access)
- print "File %s is not stats data." % self.data_name
+ print("File %s is not stats data." % self.data_name)
sys.exit(1)
def CleanUp(self):
@@ -143,7 +146,7 @@ class StatsViewer(object):
self.RefreshCounters()
changed = True
else:
- for i in xrange(self.data.CountersInUse()):
+ for i in range(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
if name in self.ui_counters:
@@ -188,7 +191,7 @@ class StatsViewer(object):
sorted by prefix.
"""
names = {}
- for i in xrange(self.data.CountersInUse()):
+ for i in range(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
names[name] = counter
@@ -233,7 +236,7 @@ class StatsViewer(object):
text=counter_name)
name.grid(row=index, column=0, padx=1, pady=1)
count = len(counter_objs)
- for i in xrange(count):
+ for i in range(count):
counter = counter_objs[i]
name = counter.Name()
var = Tkinter.StringVar()
@@ -435,7 +438,7 @@ class ChromeCounterCollection(object):
def CountersInUse(self):
"""Return the number of counters in active use."""
- for i in xrange(self.max_counters):
+ for i in range(self.max_counters):
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
if self.data.ByteAt(name_offset) == 0:
return i
diff --git a/deps/v8/tools/testrunner/OWNERS b/deps/v8/tools/testrunner/OWNERS
new file mode 100644
index 0000000000..c8693c972c
--- /dev/null
+++ b/deps/v8/tools/testrunner/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+machenbach@chromium.org
+sergiyb@chromium.org
+tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py
index 7f7596a85d..fc23947f37 100644
--- a/deps/v8/tools/testrunner/PRESUBMIT.py
+++ b/deps/v8/tools/testrunner/PRESUBMIT.py
@@ -2,7 +2,17 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+def _CommonChecks(input_api, output_api):
+ return input_api.RunTests(input_api.canned_checks.GetUnitTestsRecursively(
+ input_api,
+ output_api,
+ input_api.os_path.join(input_api.PresubmitLocalPath()),
+ whitelist=[r'.+_unittest\.py$'],
+ blacklist=[],
+ ))
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
def CheckChangeOnCommit(input_api, output_api):
- tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, '../unittests', whitelist=['run_tests_test.py$'])
- return input_api.RunTests(tests)
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 8c5f7ad205..d72aa22abc 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
from collections import OrderedDict
import json
@@ -10,6 +13,8 @@ import optparse
import os
import shlex
import sys
+import traceback
+
# Add testrunner to the path.
@@ -178,6 +183,7 @@ class BuildConfig(object):
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
self.is_android = build_config['is_android']
+ self.is_clang = build_config['is_clang']
self.is_debug = build_config['is_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
@@ -247,10 +253,10 @@ class BaseTestRunner(object):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
- print ' '.join(sys.argv)
+ print(' '.join(sys.argv))
self._load_build_config(options)
- command.setup(self.target_os)
+ command.setup(self.target_os, options.device)
try:
self._process_default_options(options)
@@ -264,11 +270,20 @@ class BaseTestRunner(object):
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
- return self._do_execute(tests, args, options)
+ exit_code = self._do_execute(tests, args, options)
+ if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file "
+ "generated with failure information.")
+ exit_code = utils.EXIT_CODE_PASS
+ return exit_code
except TestRunnerError:
+ traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
return utils.EXIT_CODE_INTERRUPTED
+ except Exception:
+ traceback.print_exc()
+ return utils.EXIT_CODE_INTERNAL_ERROR
finally:
command.tear_down()
@@ -304,6 +319,9 @@ class BaseTestRunner(object):
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
+ parser.add_option("-d", "--device",
+ help="The device ID to run Android tests on. If not "
+ "given it will be autodetected.")
# Shard
parser.add_option("--shard-count", default=1, type=int,
@@ -318,9 +336,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -368,7 +383,7 @@ class BaseTestRunner(object):
if any(map(lambda v: v and ',' in v,
[options.arch, options.mode])): # pragma: no cover
- print 'Multiple arch/mode are deprecated'
+ print('Multiple arch/mode are deprecated')
raise TestRunnerError()
return options, args
@@ -381,13 +396,13 @@ class BaseTestRunner(object):
pass
if not self.build_config: # pragma: no cover
- print 'Failed to load build config'
+ print('Failed to load build config')
raise TestRunnerError
- print 'Build found: %s' % self.outdir
+ print('Build found: %s' % self.outdir)
if str(self.build_config):
- print '>>> Autodetected:'
- print self.build_config
+ print('>>> Autodetected:')
+ print(self.build_config)
# Represents the OS where tests are run on. Same as host OS except for
# Android, which is determined by build output.
@@ -464,7 +479,7 @@ class BaseTestRunner(object):
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
if options.mode not in MODES: # pragma: no cover
- print '%s mode is invalid' % options.mode
+ print('%s mode is invalid' % options.mode)
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
print ('execution mode (%s) for %s is inconsistent with build config '
@@ -605,25 +620,20 @@ class BaseTestRunner(object):
names = self._args_to_suite_names(args, options.test_root)
test_config = self._create_test_config(options)
variables = self._get_statusfile_variables(options)
- slow_chain, fast_chain = [], []
+
+ # Head generator with no elements
+ test_chain = testsuite.TestGenerator(0, [], [])
for name in names:
if options.verbose:
- print '>>> Loading test suite: %s' % name
+ print('>>> Loading test suite: %s' % name)
suite = testsuite.TestSuite.Load(
os.path.join(options.test_root, name), test_config)
if self._is_testsuite_supported(suite, options):
- slow_tests, fast_tests = suite.load_tests_from_disk(variables)
- slow_chain.append(slow_tests)
- fast_chain.append(fast_tests)
-
- for tests in slow_chain:
- for test in tests:
- yield test
+ tests = suite.load_tests_from_disk(variables)
+ test_chain.merge(tests)
- for tests in fast_chain:
- for test in tests:
- yield test
+ return test_chain
def _is_testsuite_supported(self, suite, options):
"""A predicate that can be overridden to filter out unsupported TestSuite
@@ -653,6 +663,7 @@ class BaseTestRunner(object):
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
+ "is_clang": self.build_config.is_clang,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode
if not self.build_config.dcheck_always_on
@@ -710,7 +721,7 @@ class BaseTestRunner(object):
def _prepare_procs(self, procs):
procs = filter(None, procs)
- for i in xrange(0, len(procs) - 1):
+ for i in range(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
@@ -751,22 +762,26 @@ class BaseTestRunner(object):
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
- print "shard-run not a valid number, should be in [1:shard-count]"
- print "defaulting back to running all tests"
+ print("shard-run not a valid number, should be in [1:shard-count]")
+ print("defaulting back to running all tests")
return 1, 1
return shard_run, shard_count
- def _create_progress_indicators(self, options):
+ def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
+
+ for proc in procs:
+ try:
+ proc.set_test_count(test_count)
+ except AttributeError:
+ pass
+
return procs
def _create_result_tracker(self, options):
diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py
index 5724f9ee2a..ebf04afad6 100644
--- a/deps/v8/tools/testrunner/local/android.py
+++ b/deps/v8/tools/testrunner/local/android.py
@@ -9,7 +9,7 @@ Wrapper around the Android device abstraction from src/build/android.
import logging
import os
import sys
-
+import re
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index d176323d02..8c0264e335 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
import os
import re
@@ -62,7 +64,7 @@ class BaseCommand(object):
def execute(self):
if self.verbose:
- print '# %s' % self
+ print('# %s' % self)
process = self._start_process()
@@ -187,7 +189,7 @@ class WindowsCommand(BaseCommand):
def _kill_process(self, process):
if self.verbose:
- print 'Attempting to kill process %d' % process.pid
+ print('Attempting to kill process %d' % process.pid)
sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
@@ -196,14 +198,17 @@ class WindowsCommand(BaseCommand):
)
stdout, stderr = tk.communicate()
if self.verbose:
- print 'Taskkill results for %d' % process.pid
- print stdout
- print stderr
- print 'Return code: %d' % tk.returncode
+ print('Taskkill results for %d' % process.pid)
+ print(stdout)
+ print(stderr)
+ print('Return code: %d' % tk.returncode)
sys.stdout.flush()
class AndroidCommand(BaseCommand):
+ # This must be initialized before creating any instances of this class.
+ driver = None
+
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
verbose=False, resources_func=None):
"""Initialize the command and all files that need to be pushed to the
@@ -234,21 +239,21 @@ class AndroidCommand(BaseCommand):
This pushes all required files to the device and then runs the command.
"""
if self.verbose:
- print '# %s' % self
+ print('# %s' % self)
- android_driver().push_executable(self.shell_dir, 'bin', self.shell_name)
+ self.driver.push_executable(self.shell_dir, 'bin', self.shell_name)
for abs_file in self.files_to_push:
abs_dir = os.path.dirname(abs_file)
file_name = os.path.basename(abs_file)
rel_dir = os.path.relpath(abs_dir, BASE_DIR)
- android_driver().push_file(abs_dir, file_name, rel_dir)
+ self.driver.push_file(abs_dir, file_name, rel_dir)
start_time = time.time()
return_code = 0
timed_out = False
try:
- stdout = android_driver().run(
+ stdout = self.driver.run(
'bin', self.shell_name, self.args, '.', self.timeout, self.env)
except CommandFailedException as e:
return_code = e.status
@@ -271,10 +276,11 @@ class AndroidCommand(BaseCommand):
Command = None
-def setup(target_os):
+def setup(target_os, device):
"""Set the Command class to the OS-specific version."""
global Command
if target_os == 'android':
+ AndroidCommand.driver = android_driver(device)
Command = AndroidCommand
elif target_os == 'windows':
Command = WindowsCommand
@@ -284,4 +290,4 @@ def setup(target_os):
def tear_down():
"""Clean up after using commands."""
if Command == AndroidCommand:
- android_driver().tear_down()
+ AndroidCommand.driver.tear_down()
diff --git a/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py b/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py
index 61d75fb991..28de737155 100644
--- a/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py
+++ b/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py
@@ -8,16 +8,26 @@ import sys
from testrunner.local import testsuite, statusfile
-class TestSuite(testsuite.TestSuite):
- def _test_class(self):
- return testsuite.TestCase
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ return ["fast", "slow"]
+
+ def list_tests(self):
+ self.test_count_estimation = 2
+ fast = self._create_test("fast", self.suite)
+ slow = self._create_test("slow", self.suite)
- def ListTests(self):
- fast = self._create_test("fast")
- slow = self._create_test("slow")
slow._statusfile_outcomes.append(statusfile.SLOW)
yield fast
yield slow
+
+class TestSuite(testsuite.TestSuite):
+ def _test_loader_class(self):
+ return TestLoader
+
+ def _test_class(self):
+ return testsuite.TestCase
+
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 9735f8ea66..e0b0ec41c7 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -3,7 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from Queue import Empty
+# for py2/py3 compatibility
+from __future__ import print_function
+
from contextlib import contextmanager
from multiprocessing import Process, Queue
import os
@@ -11,6 +13,11 @@ import signal
import time
import traceback
+try:
+ from queue import Empty # Python 3
+except ImportError:
+ from Queue import Empty # Python 2
+
from . import command
@@ -22,7 +29,11 @@ def setup_testing():
global Process
del Queue
del Process
- from Queue import Queue
+ try:
+ from queue import Queue # Python 3
+ except ImportError:
+ from Queue import Queue # Python 2
+
from threading import Thread as Process
# Monkeypatch threading Queue to look like multiprocessing Queue.
Queue.cancel_join_thread = lambda self: None
@@ -70,7 +81,7 @@ def Worker(fn, work_queue, done_queue,
except command.AbortException:
# SIGINT, SIGTERM or internal hard timeout.
break
- except Exception, e:
+ except Exception as e:
traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
done_queue.put(ExceptionResult(e))
@@ -153,7 +164,7 @@ class Pool():
# Disable sigint and sigterm to prevent subprocesses from capturing the
# signals.
with without_sig():
- for w in xrange(self.num_workers):
+ for w in range(self.num_workers):
p = Process(target=Worker, args=(fn,
self.work_queue,
self.done_queue,
@@ -198,7 +209,7 @@ class Pool():
def _advance_more(self, gen):
while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
try:
- self.work_queue.put(gen.next())
+ self.work_queue.put(next(gen))
self.processing_count += 1
except StopIteration:
self.advance = self._advance_empty
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 5d05e23cc3..e4778326a9 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -25,6 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import re
@@ -133,7 +136,7 @@ class StatusFile(object):
variant_desc = 'variant independent'
else:
variant_desc = 'variant: %s' % variant
- print 'Unused rule: %s -> %s (%s)' % (rule, value, variant_desc)
+ print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
def _JoinsPassAndFail(outcomes1, outcomes2):
@@ -329,5 +332,5 @@ def PresubmitCheck(path):
"missing file for %s test %s" % (basename, rule))
return status["success"]
except Exception as e:
- print e
+ print(e)
return False
diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py
index 299e332c1c..e8d5ff99cd 100755
--- a/deps/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py
@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
import unittest
import statusfile
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 9d00d7bc12..8656c1636d 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -28,6 +28,7 @@
import fnmatch
import imp
+import itertools
import os
from contextlib import contextmanager
@@ -78,6 +79,156 @@ class TestCombiner(object):
def _combined_test_class(self):
raise NotImplementedError()
+
+class TestLoader(object):
+ """Base class for loading TestSuite tests after applying test suite
+ transformations."""
+
+ def __init__(self, suite, test_class, test_config, test_root):
+ self.suite = suite
+ self.test_class = test_class
+ self.test_config = test_config
+ self.test_root = test_root
+ self.test_count_estimation = len(list(self._list_test_filenames()))
+
+ def _list_test_filenames(self):
+ """Implemented by the subclassed TestLoaders to list filenames.
+
+ Filenames are expected to be sorted and are deterministic."""
+ raise NotImplementedError
+
+ def _should_filter_by_name(self, name):
+ return False
+
+ def _should_filter_by_test(self, test):
+ return False
+
+ def _filename_to_testname(self, filename):
+ """Hook for subclasses to write their own filename transformation
+ logic before the test creation."""
+ return filename
+
+ # TODO: not needed for every TestLoader, extract it into a subclass.
+ def _path_to_name(self, path):
+ if utils.IsWindows():
+ return path.replace(os.path.sep, "/")
+
+ return path
+
+ def _create_test(self, path, suite, **kwargs):
+ """Converts paths into test objects using the given options"""
+ return self.test_class(
+ suite, path, self._path_to_name(path), self.test_config, **kwargs)
+
+ def list_tests(self):
+ """Loads and returns the test objects for a TestSuite"""
+ # TODO: detect duplicate tests.
+ for filename in self._list_test_filenames():
+ if self._should_filter_by_name(filename):
+ continue
+
+ testname = self._filename_to_testname(filename)
+ case = self._create_test(testname, self.suite)
+ if self._should_filter_by_test(case):
+ continue
+
+ yield case
+
+
+class GenericTestLoader(TestLoader):
+ """Generic TestLoader implementing the logic for listing filenames"""
+ @property
+ def excluded_files(self):
+ return set()
+
+ @property
+ def excluded_dirs(self):
+ return set()
+
+ @property
+ def excluded_suffixes(self):
+ return set()
+
+ @property
+ def test_dirs(self):
+ return [self.test_root]
+
+ @property
+ def extension(self):
+ return ""
+
+ def _should_filter_by_name(self, filename):
+ if not filename.endswith(self.extension):
+ return True
+
+ for suffix in self.excluded_suffixes:
+ if filename.endswith(suffix):
+ return True
+
+ if os.path.basename(filename) in self.excluded_files:
+ return True
+
+ return False
+
+ def _filename_to_testname(self, filename):
+ if not self.extension:
+ return filename
+
+ return filename[:-len(self.extension)]
+
+ def _to_relpath(self, abspath, test_root):
+ return os.path.relpath(abspath, test_root)
+
+ def _list_test_filenames(self):
+ for test_dir in sorted(self.test_dirs):
+ test_root = os.path.join(self.test_root, test_dir)
+ for dirname, dirs, files in os.walk(test_root, followlinks=True):
+ dirs.sort()
+ for dir in dirs:
+ if dir in self.excluded_dirs or dir.startswith('.'):
+ dirs.remove(dir)
+
+ files.sort()
+ for filename in files:
+ abspath = os.path.join(dirname, filename)
+
+ yield self._to_relpath(abspath, test_root)
+
+
+class JSTestLoader(GenericTestLoader):
+ @property
+ def extension(self):
+ return ".js"
+
+
+class TestGenerator(object):
+ def __init__(self, test_count_estimate, slow_tests, fast_tests):
+ self.test_count_estimate = test_count_estimate
+ self.slow_tests = slow_tests
+ self.fast_tests = fast_tests
+ self._rebuild_iterator()
+
+ def _rebuild_iterator(self):
+ self._iterator = itertools.chain(self.slow_tests, self.fast_tests)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self.next()
+
+ def next(self):
+ return next(self._iterator)
+
+ def merge(self, test_generator):
+ self.test_count_estimate += test_generator.test_count_estimate
+ self.slow_tests = itertools.chain(
+ self.slow_tests, test_generator.slow_tests)
+ self.fast_tests = itertools.chain(
+ self.fast_tests, test_generator.fast_tests)
+ self._rebuild_iterator()
+
+
@contextmanager
def _load_testsuite_module(name, root):
f = None
@@ -102,20 +253,35 @@ class TestSuite(object):
self.tests = None # list of TestCase objects
self.statusfile = None
+ self._test_loader = self._test_loader_class()(
+ self, self._test_class(), self.test_config, self.root)
+
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- def ListTests(self):
+ @property
+ def _test_loader_class(self):
raise NotImplementedError
+ def ListTests(self):
+ return self._test_loader.list_tests()
+
+ def __initialize_test_count_estimation(self):
+ # Retrieves a single test to initialize the test generator.
+ next(iter(self.ListTests()), None)
+
+ def __calculate_test_count(self):
+ self.__initialize_test_count_estimation()
+ return self._test_loader.test_count_estimation
+
def load_tests_from_disk(self, statusfile_variables):
self.statusfile = statusfile.StatusFile(
self.status_file(), statusfile_variables)
+ test_count = self.__calculate_test_count()
slow_tests = (test for test in self.ListTests() if test.is_slow)
fast_tests = (test for test in self.ListTests() if not test.is_slow)
-
- return slow_tests, fast_tests
+ return TestGenerator(test_count, slow_tests, fast_tests)
def get_variants_gen(self, variants):
return self._variants_gen_class()(variants)
@@ -138,15 +304,5 @@ class TestSuite(object):
"""
return None
- def _create_test(self, path, **kwargs):
- test_class = self._test_class()
- return test_class(self, path, self._path_to_name(path), self.test_config,
- **kwargs)
-
def _test_class(self):
raise NotImplementedError
-
- def _path_to_name(self, path):
- if utils.IsWindows():
- return path.replace("\\", "/")
- return path
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index efc9fdacf0..1cca79c205 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import itertools
import os
import sys
import tempfile
@@ -13,7 +14,7 @@ TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
-from testrunner.local.testsuite import TestSuite
+from testrunner.local.testsuite import TestSuite, TestGenerator
from testrunner.objects.testcase import TestCase
from testrunner.test_config import TestConfig
@@ -47,21 +48,38 @@ class TestSuiteTest(unittest.TestCase):
self.assertIsNone(self.suite.statusfile)
def testLoadingTestsFromDisk(self):
- slow_tests, fast_tests = self.suite.load_tests_from_disk(
+ tests = self.suite.load_tests_from_disk(
statusfile_variables={})
def is_generator(iterator):
return iterator == iter(iterator)
- self.assertTrue(is_generator(slow_tests))
- self.assertTrue(is_generator(fast_tests))
+ self.assertTrue(is_generator(tests))
+ self.assertEquals(tests.test_count_estimate, 2)
- slow_tests, fast_tests = list(slow_tests), list(fast_tests)
+ slow_tests, fast_tests = list(tests.slow_tests), list(tests.fast_tests)
# Verify that the components of the TestSuite are loaded.
self.assertTrue(len(slow_tests) == len(fast_tests) == 1)
self.assertTrue(all(test.is_slow for test in slow_tests))
self.assertFalse(any(test.is_slow for test in fast_tests))
self.assertIsNotNone(self.suite.statusfile)
+ def testMergingTestGenerators(self):
+ tests = self.suite.load_tests_from_disk(
+ statusfile_variables={})
+ more_tests = self.suite.load_tests_from_disk(
+ statusfile_variables={})
+
+ # Merge the test generators
+ tests.merge(more_tests)
+ self.assertEquals(tests.test_count_estimate, 4)
+
+ # Check the tests are sorted by speed
+ test_speeds = []
+ for test in tests:
+ test_speeds.append(test.is_slow)
+
+ self.assertEquals(test_speeds, [True, True, False, False])
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 9834386d01..9128c433a0 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -25,6 +25,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
from os.path import exists
from os.path import isdir
@@ -147,7 +149,7 @@ def URLRetrieve(source, destination):
return
except:
# If there's no curl, fall back to urlopen.
- print "Curl is currently not installed. Falling back to python."
+ print("Curl is currently not installed. Falling back to python.")
pass
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 80710ff055..b3e446fb3f 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -10,6 +10,8 @@ ALL_VARIANT_FLAGS = {
"gc_stats": [["--gc_stats=1"]],
# Alias of exhaustive variants, but triggering new test framework features.
"infra_staging": [[]],
+ "interpreted_regexp": [["--regexp-interpret-all"]],
+ "jitless": [["--jitless"]],
"no_liftoff": [["--no-wasm-tier-up"]],
"minor_mc": [["--minor-mc"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
diff --git a/deps/v8/tools/testrunner/local/verbose.py b/deps/v8/tools/testrunner/local/verbose.py
index 49e808588c..8569368a75 100644
--- a/deps/v8/tools/testrunner/local/verbose.py
+++ b/deps/v8/tools/testrunner/local/verbose.py
@@ -25,6 +25,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# for py2/py3 compatibility
+from __future__ import print_function
import sys
import time
@@ -63,7 +65,7 @@ def PrintReport(tests):
else:
assert False # Unreachable # TODO: check this in outcomes parsing phase.
- print REPORT_TEMPLATE % {
+ print(REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
"nocrash": nocrash,
@@ -71,17 +73,17 @@ def PrintReport(tests):
"fail_ok": fail_ok,
"fail": fail,
"crash": crash,
- }
+ })
def PrintTestSource(tests):
for test in tests:
- print "--- begin source: %s ---" % test
+ print("--- begin source: %s ---" % test)
if test.is_source_available():
- print test.get_source()
+ print(test.get_source())
else:
- print '(no source available)'
- print "--- end source: %s ---" % test
+ print('(no source available)')
+ print("--- end source: %s ---" % test)
def FormatTime(d):
@@ -92,11 +94,11 @@ def FormatTime(d):
def PrintTestDurations(suites, outputs, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
- print
+ print()
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
if t in outputs]
- timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
+ timed_tests.sort(key=lambda test_duration: test_duration[1], reverse=True)
index = 1
for test, duration in timed_tests[:20]:
t = FormatTime(duration)
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index d5e399626c..e51966b5cf 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -4,6 +4,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
import random
import sys
@@ -97,8 +99,8 @@ class NumFuzzer(base_runner.BaseTestRunner):
if options.combine_tests:
if options.combine_min > options.combine_max:
- print ('min_group_size (%d) cannot be larger than max_group_size (%d)' %
- options.min_group_size, options.max_group_size)
+ print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+ options.min_group_size, options.max_group_size))
raise base_runner.TestRunnerError()
if options.variants != 'default':
@@ -127,14 +129,15 @@ class NumFuzzer(base_runner.BaseTestRunner):
return variables
def _do_execute(self, tests, args, options):
- loader = LoadProc()
+ loader = LoadProc(tests)
fuzzer_rng = random.Random(options.fuzzer_random_seed)
combiner = self._create_combiner(fuzzer_rng, options)
results = self._create_result_tracker(options)
execproc = ExecutionProc(options.j)
sigproc = self._create_signal_proc()
- indicators = self._create_progress_indicators(options)
+ indicators = self._create_progress_indicators(
+ tests.test_count_estimate, options)
procs = [
loader,
NameFilterProc(args) if args else None,
@@ -153,7 +156,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
execproc,
]
self._prepare_procs(procs)
- loader.load_tests(tests)
+ loader.load_initial_tests(initial_batch_size=float('inf'))
# TODO(majeski): maybe some notification from loader would be better?
if combiner:
@@ -166,7 +169,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
for indicator in indicators:
indicator.finished()
- print '>>> %d tests ran' % results.total
+ print('>>> %d tests ran' % results.total)
if results.failed:
return utils.EXIT_CODE_FAILURES
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index c8f7bdbfb0..80c7c29ed1 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -53,6 +53,14 @@ MODULE_RESOURCES_PATTERN_1 = re.compile(
MODULE_RESOURCES_PATTERN_2 = re.compile(
r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
+TIMEOUT_LONG = "long"
+
+try:
+ cmp # Python 2
+except NameError:
+ def cmp(x, y): # Python 3
+ return (x > y) - (x < y)
+
class TestCase(object):
def __init__(self, suite, path, name, test_config):
@@ -197,6 +205,9 @@ class TestCase(object):
def _get_files_params(self):
return []
+ def _get_timeout_param(self):
+ return None
+
def _get_random_seed_flags(self):
return ['--random-seed=%d' % self.random_seed]
@@ -235,6 +246,8 @@ class TestCase(object):
timeout *= 4
if "--noenable-vfp3" in params:
timeout *= 2
+ if self._get_timeout_param() == TIMEOUT_LONG:
+ timeout *= 10
# TODO(majeski): make it slow outcome dependent.
timeout *= 2
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index c84260c0a6..300340ed98 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -4,6 +4,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
import os
import re
@@ -29,7 +32,7 @@ ARCH_GUESS = utils.DefaultArch()
VARIANTS = ["default"]
MORE_VARIANTS = [
- "nooptimization",
+ "jitless",
"stress",
"stress_background_compile",
"stress_incremental_marking",
@@ -43,7 +46,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ("more" first - it has the longer running tests).
"exhaustive": MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- "extra": ["future", "no_liftoff", "no_wasm_traps", "trusted"],
+ "extra": ["nooptimization", "future", "no_liftoff", "no_wasm_traps"],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -217,7 +220,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
def CheckTestMode(name, option): # pragma: no cover
if not option in ["run", "skip", "dontcare"]:
- print "Unknown %s mode %s" % (name, option)
+ print("Unknown %s mode %s" % (name, option))
raise base_runner.TestRunnerError()
CheckTestMode("slow test", options.slow_tests)
CheckTestMode("pass|fail test", options.pass_fail_tests)
@@ -240,7 +243,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for v in user_variants:
if v not in ALL_VARIANTS:
- print 'Unknown variant: %s' % v
+ print('Unknown variant: %s' % v)
raise base_runner.TestRunnerError()
assert False, 'Unreachable'
@@ -280,10 +283,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
def _do_execute(self, tests, args, options):
jobs = options.j
- print '>>> Running with test processors'
- loader = LoadProc()
+ print('>>> Running with test processors')
+ loader = LoadProc(tests)
results = self._create_result_tracker(options)
- indicators = self._create_progress_indicators(options)
+ indicators = self._create_progress_indicators(
+ tests.test_count_estimate, options)
outproc_factory = None
if self.build_config.predictable:
@@ -295,10 +299,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
- self._create_shard_proc(options),
VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_predictable_filter(),
+ self._create_shard_proc(options),
self._create_seed_proc(options),
sigproc,
] + indicators + [
@@ -310,7 +314,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
self._prepare_procs(procs)
- loader.load_tests(tests)
+ loader.load_initial_tests(initial_batch_size=options.j*2)
# This starts up worker processes and blocks until all tests are
# processed.
@@ -319,7 +323,17 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for indicator in indicators:
indicator.finished()
- print '>>> %d tests ran' % (results.total - results.remaining)
+
+ if tests.test_count_estimate:
+ percentage = float(results.total) / tests.test_count_estimate * 100
+ else:
+ percentage = 0
+
+ print (('>>> %d base tests produced %d (%d%s)'
+ ' non-filtered tests') % (
+ tests.test_count_estimate, results.total, percentage, '%'))
+
+ print('>>> %d tests ran' % (results.total - results.remaining))
exit_code = utils.EXIT_CODE_PASS
if results.failed:
@@ -328,13 +342,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
exit_code = utils.EXIT_CODE_NO_TESTS
# Indicate if a SIGINT or SIGTERM happened.
- exit_code = max(exit_code, sigproc.exit_code)
-
- if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
- print("Force exit code 0 after failures. Json test results file "
- "generated with failure information.")
- exit_code = utils.EXIT_CODE_PASS
- return exit_code
+ return max(exit_code, sigproc.exit_code)
def _create_predictable_filter(self):
if not self.build_config.predictable:
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index 5cb1182e89..c52c779752 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -79,6 +79,8 @@ class TestProc(object):
"""
Method called by previous processor whenever it produces new test.
This method shouldn't be called by anyone except previous processor.
+ Returns a boolean value to signal whether the test was loaded into the
+ execution queue successfully or not.
"""
raise NotImplementedError()
@@ -109,7 +111,7 @@ class TestProc(object):
def _send_test(self, test):
"""Helper method for sending test to the next processor."""
- self._next_proc.next_test(test)
+ return self._next_proc.next_test(test)
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
@@ -126,7 +128,7 @@ class TestProcObserver(TestProc):
def next_test(self, test):
self._on_next_test(test)
- self._send_test(test)
+ return self._send_test(test)
def result_for(self, test, result):
self._on_result_for(test, result)
@@ -158,7 +160,7 @@ class TestProcProducer(TestProc):
self._name = name
def next_test(self, test):
- self._next_test(test)
+ return self._next_test(test)
def result_for(self, subtest, result):
self._result_for(subtest.origin, subtest, result)
@@ -190,9 +192,9 @@ class TestProcFilter(TestProc):
def next_test(self, test):
if self._filter(test):
- self._send_result(test, SKIPPED)
- else:
- self._send_test(test)
+ return False
+
+ return self._send_test(test)
def result_for(self, test, result):
self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/combiner.py b/deps/v8/tools/testrunner/testproc/combiner.py
index 50944e1e5e..4d992f4c65 100644
--- a/deps/v8/tools/testrunner/testproc/combiner.py
+++ b/deps/v8/tools/testrunner/testproc/combiner.py
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
from collections import defaultdict
import time
@@ -9,7 +12,6 @@ from . import base
from ..objects import testcase
from ..outproc import base as outproc
-
class CombinerProc(base.TestProc):
def __init__(self, rng, min_group_size, max_group_size, count):
"""
@@ -45,9 +47,10 @@ class CombinerProc(base.TestProc):
group_key = self._get_group_key(test)
if not group_key:
# Test not suitable for combining
- return
+ return False
self._groups[test.suite.name].add_test(group_key, test)
+ return True
def _get_group_key(self, test):
combiner = self._get_combiner(test.suite)
@@ -61,22 +64,22 @@ class CombinerProc(base.TestProc):
self._send_next_test()
def generate_initial_tests(self, num=1):
- for _ in xrange(0, num):
+ for _ in range(0, num):
self._send_next_test()
def _send_next_test(self):
if self.is_stopped:
- return
+ return False
if self._count and self._current_num >= self._count:
- return
+ return False
combined_test = self._create_new_test()
if not combined_test:
# Not enough tests
- return
+ return False
- self._send_test(combined_test)
+ return self._send_test(combined_test)
def _create_new_test(self):
suite, combiner = self._select_suite()
@@ -121,4 +124,4 @@ class TestGroups(object):
group_key = rng.choice(self._keys)
tests = self._groups[group_key]
- return [rng.choice(tests) for _ in xrange(0, max_size)]
+ return [rng.choice(tests) for _ in range(0, max_size)]
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
index 2d1ea02cd0..68ecf45a37 100644
--- a/deps/v8/tools/testrunner/testproc/execution.py
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -64,7 +64,7 @@ class ExecutionProc(base.TestProc):
def next_test(self, test):
if self.is_stopped:
- return
+ return False
test_id = test.procid
cmd = test.get_command()
@@ -73,6 +73,8 @@ class ExecutionProc(base.TestProc):
outproc = self._outproc_factory(test)
self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
+ return True
+
def result_for(self, test, result):
assert False, 'ExecutionProc cannot receive results'
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
index 607c010cf3..fdc9e3e1b0 100644
--- a/deps/v8/tools/testrunner/testproc/expectation.py
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -21,7 +21,8 @@ class ForgiveTimeoutProc(base.TestProcProducer):
elif statusfile.TIMEOUT not in subtest.expected_outcomes:
subtest.expected_outcomes = (
subtest.expected_outcomes + [statusfile.TIMEOUT])
- self._send_test(subtest)
+
+ return self._send_test(subtest)
def _result_for(self, test, subtest, result):
self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 799b4bfb5e..187145b4c8 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -69,14 +69,14 @@ class FuzzerProc(base.TestProcProducer):
def _next_test(self, test):
if self.is_stopped:
- return
+ return False
analysis_subtest = self._create_analysis_subtest(test)
if analysis_subtest:
- self._send_test(analysis_subtest)
- else:
- self._gens[test.procid] = self._create_gen(test)
- self._try_send_next_test(test)
+ return self._send_test(analysis_subtest)
+
+ self._gens[test.procid] = self._create_gen(test)
+ return self._try_send_next_test(test)
def _create_analysis_subtest(self, test):
if self._disable_analysis:
@@ -100,6 +100,7 @@ class FuzzerProc(base.TestProcProducer):
if result.has_unexpected_output:
self._send_result(test, None)
return
+
self._gens[test.procid] = self._create_gen(test, result)
self._try_send_next_test(test)
@@ -146,11 +147,11 @@ class FuzzerProc(base.TestProcProducer):
def _try_send_next_test(self, test):
if not self.is_stopped:
for subtest in self._gens[test.procid]:
- self._send_test(subtest)
- return
+ if self._send_test(subtest):
+ return True
del self._gens[test.procid]
- self._send_result(test, None)
+ return False
def _next_seed(self):
seed = None
diff --git a/deps/v8/tools/testrunner/testproc/loader.py b/deps/v8/tools/testrunner/testproc/loader.py
index 0a3d0df1b3..f4afeae0e0 100644
--- a/deps/v8/tools/testrunner/testproc/loader.py
+++ b/deps/v8/tools/testrunner/testproc/loader.py
@@ -9,19 +9,34 @@ class LoadProc(base.TestProc):
"""First processor in the chain that passes all tests to the next processor.
"""
- def load_tests(self, tests):
- loaded = set()
- for test in tests:
- if test.procid in loaded:
- print 'Warning: %s already obtained' % test.procid
- continue
+ def __init__(self, tests):
+ super(LoadProc, self).__init__()
- loaded.add(test.procid)
- self._send_test(test)
+ self.tests = tests
+
+ def load_initial_tests(self, initial_batch_size):
+ """
+ Args:
+ exec_proc: execution processor that the tests are being loaded into
+ initial_batch_size: initial number of tests to load
+ """
+ loaded_tests = 0
+ while loaded_tests < initial_batch_size:
+ try:
+ t = next(self.tests)
+ except StopIteration:
+ return
+
+ if self._send_test(t):
+ loaded_tests += 1
def next_test(self, test):
assert False, 'Nothing can be connected to the LoadProc'
def result_for(self, test, result):
- # Ignore all results.
- pass
+ try:
+ while not self._send_test(next(self.tests)):
+ pass
+ except StopIteration:
+ # No more tests to load.
+ pass
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 096228dc35..29ad5c797b 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -2,13 +2,15 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import json
import os
import sys
import time
from . import base
-from ..local import junit_output
def print_failure_header(test):
@@ -16,10 +18,10 @@ def print_failure_header(test):
negative_marker = '[negative] '
else:
negative_marker = ''
- print "=== %(label)s %(negative)s===" % {
+ print("=== %(label)s %(negative)s===" % {
'label': test,
'negative': negative_marker,
- }
+ })
class ResultsTracker(base.TestProcObserver):
@@ -42,7 +44,7 @@ class ResultsTracker(base.TestProcObserver):
if result.has_unexpected_output:
self.failed += 1
if self.max_failures and self.failed >= self.max_failures:
- print '>>> Too many failures, exiting...'
+ print('>>> Too many failures, exiting...')
self.stop()
@@ -65,33 +67,33 @@ class SimpleProgressIndicator(ProgressIndicator):
def finished(self):
crashed = 0
- print
+ print()
for test, result in self._failed:
print_failure_header(test)
if result.output.stderr:
- print "--- stderr ---"
- print result.output.stderr.strip()
+ print("--- stderr ---")
+ print(result.output.stderr.strip())
if result.output.stdout:
- print "--- stdout ---"
- print result.output.stdout.strip()
- print "Command: %s" % result.cmd.to_string()
+ print("--- stdout ---")
+ print(result.output.stdout.strip())
+ print("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
- print "exit code: %d" % result.output.exit_code
- print "--- CRASHED ---"
+ print("exit code: %d" % result.output.exit_code)
+ print("--- CRASHED ---")
crashed += 1
if result.output.HasTimedOut():
- print "--- TIMEOUT ---"
+ print("--- TIMEOUT ---")
if len(self._failed) == 0:
- print "==="
- print "=== All tests succeeded"
- print "==="
+ print("===")
+ print("=== All tests succeeded")
+ print("===")
else:
- print
- print "==="
- print "=== %i tests failed" % len(self._failed)
+ print()
+ print("===")
+ print("=== %i tests failed" % len(self._failed))
if crashed > 0:
- print "=== %i tests CRASHED" % crashed
- print "==="
+ print("=== %i tests CRASHED" % crashed)
+ print("===")
class VerboseProgressIndicator(SimpleProgressIndicator):
@@ -100,7 +102,7 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
self._last_printed_time = time.time()
def _print(self, text):
- print text
+ print(text)
sys.stdout.flush()
self._last_printed_time = time.time()
@@ -114,7 +116,9 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
outcome = 'FAIL'
else:
outcome = 'pass'
- self._print('Done running %s: %s' % (test, outcome))
+
+ self._print('Done running %s %s: %s' % (
+ test, test.variant or 'default', outcome))
def _on_heartbeat(self):
if time.time() - self._last_printed_time > 30:
@@ -129,6 +133,7 @@ class DotsProgressIndicator(SimpleProgressIndicator):
self._count = 0
def _on_result_for(self, test, result):
+ super(DotsProgressIndicator, self)._on_result_for(test, result)
# TODO(majeski): Support for dummy/grouped results
self._count += 1
if self._count > 1 and self._count % 50 == 1:
@@ -160,6 +165,9 @@ class CompactProgressIndicator(ProgressIndicator):
self._passed = 0
self._failed = 0
+ def set_test_count(self, test_count):
+ self._total = test_count
+
def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results
if result.has_unexpected_output:
@@ -176,25 +184,30 @@ class CompactProgressIndicator(ProgressIndicator):
self._clear_line(self._last_status_length)
print_failure_header(test)
if len(stdout):
- print self._templates['stdout'] % stdout
+ print(self._templates['stdout'] % stdout)
if len(stderr):
- print self._templates['stderr'] % stderr
- print "Command: %s" % result.cmd
+ print(self._templates['stderr'] % stderr)
+ print("Command: %s" % result.cmd.to_string(relative=True))
if output.HasCrashed():
- print "exit code: %d" % output.exit_code
- print "--- CRASHED ---"
+ print("exit code: %d" % output.exit_code)
+ print("--- CRASHED ---")
if output.HasTimedOut():
- print "--- TIMEOUT ---"
+ print("--- TIMEOUT ---")
def finished(self):
self._print_progress('Done')
- print
+ print()
def _print_progress(self, name):
self._clear_line(self._last_status_length)
elapsed = time.time() - self._start_time
+ if self._total:
+ progress = (self._passed + self._failed) * 100 // self._total
+ else:
+ progress = 0
status = self._templates['status_line'] % {
'passed': self._passed,
+ 'progress': progress,
'failed': self._failed,
'test': name,
'mins': int(elapsed) / 60,
@@ -202,7 +215,7 @@ class CompactProgressIndicator(ProgressIndicator):
}
status = self._truncate(status, 78)
self._last_status_length = len(status)
- print status,
+ print(status, end='')
sys.stdout.flush()
def _truncate(self, string, length):
@@ -219,6 +232,7 @@ class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
+ "\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
@@ -227,13 +241,13 @@ class ColorProgressIndicator(CompactProgressIndicator):
super(ColorProgressIndicator, self).__init__(templates)
def _clear_line(self, last_length):
- print "\033[1K\r",
+ print("\033[1K\r", end='')
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|"
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
@@ -241,46 +255,7 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
super(MonochromeProgressIndicator, self).__init__(templates)
def _clear_line(self, last_length):
- print ("\r" + (" " * last_length) + "\r"),
-
-
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
+ print(("\r" + (" " * last_length) + "\r"), end='')
class JsonTestProgressIndicator(ProgressIndicator):
@@ -349,7 +324,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
float(len(self.tests)))
# Sort tests by duration.
- self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+ self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
slowest_tests = [
{
"name": str(test),
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
index a72bb3ebc6..d085c55553 100644
--- a/deps/v8/tools/testrunner/testproc/rerun.py
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -19,7 +19,7 @@ class RerunProc(base.TestProcProducer):
self._rerun_total_left = rerun_max_total
def _next_test(self, test):
- self._send_next_subtest(test)
+ return self._send_next_subtest(test)
def _result_for(self, test, subtest, result):
# First result
@@ -52,7 +52,7 @@ class RerunProc(base.TestProcProducer):
def _send_next_subtest(self, test, run=0):
subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
- self._send_test(subtest)
+ return self._send_test(subtest)
def _finalize_test(self, test):
del self._rerun[test.procid]
diff --git a/deps/v8/tools/testrunner/testproc/seed.py b/deps/v8/tools/testrunner/testproc/seed.py
index 3f40e79b34..160eac812b 100644
--- a/deps/v8/tools/testrunner/testproc/seed.py
+++ b/deps/v8/tools/testrunner/testproc/seed.py
@@ -34,12 +34,19 @@ class SeedProc(base.TestProcProducer):
assert requirement == base.DROP_RESULT
def _next_test(self, test):
- for _ in xrange(0, self._parallel_subtests):
- self._try_send_next_test(test)
+ is_loaded = False
+ for _ in range(0, self._parallel_subtests):
+ is_loaded |= self._try_send_next_test(test)
+
+ return is_loaded
def _result_for(self, test, subtest, result):
self._todo[test.procid] -= 1
- self._try_send_next_test(test)
+ if not self._try_send_next_test(test):
+ if not self._todo.get(test.procid):
+ del self._last_idx[test.procid]
+ del self._todo[test.procid]
+ self._send_result(test, None)
def _try_send_next_test(self, test):
def create_subtest(idx):
@@ -49,10 +56,8 @@ class SeedProc(base.TestProcProducer):
num = self._last_idx[test.procid]
if not self._count or num < self._count:
num += 1
- self._send_test(create_subtest(num))
self._todo[test.procid] += 1
self._last_idx[test.procid] = num
- elif not self._todo.get(test.procid):
- del self._last_idx[test.procid]
- del self._todo[test.procid]
- self._send_result(test, None)
+ return self._send_test(create_subtest(num))
+
+ return False
diff --git a/deps/v8/tools/testrunner/testproc/shard.py b/deps/v8/tools/testrunner/testproc/shard.py
index 1caac9fee6..9475ea15f3 100644
--- a/deps/v8/tools/testrunner/testproc/shard.py
+++ b/deps/v8/tools/testrunner/testproc/shard.py
@@ -5,10 +5,21 @@
from . import base
+# Alphabet size determines the hashing radix. Choosing a prime number prevents
+# clustering of the hashes.
+HASHING_ALPHABET_SIZE = 2 ** 7 -1
+
+def radix_hash(capacity, key):
+ h = 0
+ for character in key:
+ h = (h * HASHING_ALPHABET_SIZE + ord(character)) % capacity
+
+ return h
+
+
class ShardProc(base.TestProcFilter):
"""Processor distributing tests between shards.
- It simply passes every n-th test. To be deterministic it has to be placed
- before all processors that generate tests dynamically.
+ It hashes the unique test identifiers uses the hash to shard tests.
"""
def __init__(self, myid, shards_count):
"""
@@ -22,9 +33,6 @@ class ShardProc(base.TestProcFilter):
self._myid = myid
self._shards_count = shards_count
- self._last = 0
def _filter(self, test):
- res = self._last != self._myid
- self._last = (self._last + 1) % self._shards_count
- return res
+ return self._myid != radix_hash(self._shards_count, test.procid)
diff --git a/deps/v8/tools/testrunner/testproc/shard_unittest.py b/deps/v8/tools/testrunner/testproc/shard_unittest.py
new file mode 100755
index 0000000000..33a094e05a
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/shard_unittest.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc.shard import radix_hash
+
+
+class TestRadixHashing(unittest.TestCase):
+ def test_hash_character_by_radix(self):
+ self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
+
+ def test_hash_character_by_radix_with_capacity(self):
+ self.assertEqual(6, radix_hash(capacity=7, key="a"))
+
+ def test_hash_string(self):
+ self.assertEqual(6, radix_hash(capacity=7, key="ab"))
+
+ def test_hash_test_id(self):
+ self.assertEqual(
+ 5,
+ radix_hash(capacity=7,
+ key="test262/Map/class-private-method-Variant-0-1"))
+
+ def test_hash_boundaries(self):
+ total_variants = 5
+ cases = []
+ for case in [
+ "test262/Map/class-private-method",
+ "test262/Map/class-public-method",
+ "test262/Map/object-retrieval",
+ "test262/Map/object-deletion",
+ "test262/Map/object-creation",
+ "test262/Map/garbage-collection",
+ ]:
+ for variant_index in range(total_variants):
+ cases.append("%s-Variant-%d" % (case, variant_index))
+
+ for case in cases:
+ self.assertTrue(0 <= radix_hash(capacity=7, key=case) < 7)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/testproc/sigproc.py b/deps/v8/tools/testrunner/testproc/sigproc.py
index e97fe7ece3..f29fa22e60 100644
--- a/deps/v8/tools/testrunner/testproc/sigproc.py
+++ b/deps/v8/tools/testrunner/testproc/sigproc.py
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import signal
from . import base
@@ -21,11 +24,11 @@ class SignalProc(base.TestProcObserver):
signal.signal(signal.SIGTERM, self._on_sigterm)
def _on_ctrlc(self, _signum, _stack_frame):
- print '>>> Ctrl-C detected, early abort...'
+ print('>>> Ctrl-C detected, early abort...')
self.exit_code = utils.EXIT_CODE_INTERRUPTED
self.stop()
def _on_sigterm(self, _signum, _stack_frame):
- print '>>> SIGTERM received, early abort...'
+ print('>>> SIGTERM received, early abort...')
self.exit_code = utils.EXIT_CODE_TERMINATED
self.stop()
diff --git a/deps/v8/tools/testrunner/testproc/variant.py b/deps/v8/tools/testrunner/testproc/variant.py
index dba1af91fc..0164ad8845 100644
--- a/deps/v8/tools/testrunner/testproc/variant.py
+++ b/deps/v8/tools/testrunner/testproc/variant.py
@@ -39,21 +39,22 @@ class VariantProc(base.TestProcProducer):
def _next_test(self, test):
gen = self._variants_gen(test)
self._next_variant[test.procid] = gen
- self._try_send_new_subtest(test, gen)
+ return self._try_send_new_subtest(test, gen)
def _result_for(self, test, subtest, result):
gen = self._next_variant[test.procid]
- self._try_send_new_subtest(test, gen)
+ if not self._try_send_new_subtest(test, gen):
+ self._send_result(test, None)
def _try_send_new_subtest(self, test, variants_gen):
for variant, flags, suffix in variants_gen:
subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
variant=variant, flags=flags)
- self._send_test(subtest)
- return
+ if self._send_test(subtest):
+ return True
del self._next_variant[test.procid]
- self._send_result(test, None)
+ return False
def _variants_gen(self, test):
"""Generator producing (variant, flags, procid suffix) tuples."""
diff --git a/deps/v8/tools/testrunner/testproc/variant_unittest.py b/deps/v8/tools/testrunner/testproc/variant_unittest.py
new file mode 100755
index 0000000000..56e28c8d5b
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/variant_unittest.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc import base
+from testrunner.testproc.variant import VariantProc
+
+
+class FakeResultObserver(base.TestProcObserver):
+ def __init__(self):
+ super(FakeResultObserver, self).__init__()
+
+ self.results = set()
+
+ def result_for(self, test, result):
+ self.results.add((test, result))
+
+
+class FakeFilter(base.TestProcFilter):
+ def __init__(self, filter_predicate):
+ super(FakeFilter, self).__init__()
+
+ self._filter_predicate = filter_predicate
+
+ self.loaded = set()
+ self.call_counter = 0
+
+ def next_test(self, test):
+ self.call_counter += 1
+
+ if self._filter_predicate(test):
+ return False
+
+ self.loaded.add(test)
+ return True
+
+
+class FakeSuite(object):
+ def __init__(self, name):
+ self.name = name
+
+
+class FakeTest(object):
+ def __init__(self, procid):
+ self.suite = FakeSuite("fake_suite")
+ self.procid = procid
+
+ self.keep_output = False
+
+ def create_subtest(self, proc, subtest_id, **kwargs):
+ variant = kwargs['variant']
+
+ variant.origin = self
+ return variant
+
+
+class FakeVariantGen(object):
+ def __init__(self, variants):
+ self._variants = variants
+
+ def gen(self, test):
+ for variant in self._variants:
+ yield variant, [], "fake_suffix"
+
+
+class TestVariantProcLoading(unittest.TestCase):
+ def setUp(self):
+ self.test = FakeTest("test")
+
+ def _simulate_proc(self, variants):
+ """Expects the list of instantiated test variants to load into the
+ VariantProc."""
+ variants_mapping = {self.test: variants}
+
+ # Creates a Variant processor containing the possible types of test
+ # variants.
+ self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
+ self.variant_proc._variant_gens = {
+ "fake_suite": FakeVariantGen(variants)}
+
+ # FakeFilter only lets tests passing the predicate to be loaded.
+ self.fake_filter = FakeFilter(
+ filter_predicate=(lambda t: t.procid == "to_filter"))
+
+ # FakeResultObserver to verify that VariantProc calls result_for correctly.
+ self.fake_result_observer = FakeResultObserver()
+
+ # Links up processors together to form a test processing pipeline.
+ self.variant_proc._prev_proc = self.fake_result_observer
+ self.fake_filter._prev_proc = self.variant_proc
+ self.variant_proc._next_proc = self.fake_filter
+
+ # Injects the test into the VariantProc
+ is_loaded = self.variant_proc.next_test(self.test)
+
+ # Verifies the behavioral consistency by using the instrumentation in
+ # FakeFilter
+ loaded_variants = list(self.fake_filter.loaded)
+ self.assertEqual(is_loaded, any(loaded_variants))
+ return self.fake_filter.loaded, self.fake_filter.call_counter
+
+ def test_filters_first_two_variants(self):
+ variants = [
+ FakeTest('to_filter'),
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
+ FakeTest('to_load'),
+ ]
+ expected_load_results = {variants[2]}
+
+ load_results, call_count = self._simulate_proc(variants)
+
+ self.assertSetEqual(expected_load_results, load_results)
+ self.assertEqual(call_count, 3)
+
+ def test_stops_loading_after_first_successful_load(self):
+ variants = [
+ FakeTest('to_load'),
+ FakeTest('to_load'),
+ FakeTest('to_filter'),
+ ]
+ expected_load_results = {variants[0]}
+
+ loaded_tests, call_count = self._simulate_proc(variants)
+
+ self.assertSetEqual(expected_load_results, loaded_tests)
+ self.assertEqual(call_count, 1)
+
+ def test_return_result_when_out_of_variants(self):
+ variants = [
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
+ ]
+
+ self._simulate_proc(variants)
+
+ self.variant_proc.result_for(variants[1], None)
+
+ expected_results = {(self.test, None)}
+
+ self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+ def test_return_result_after_running_variants(self):
+ variants = [
+ FakeTest('to_filter'),
+ FakeTest('to_load'),
+ FakeTest('to_load'),
+ ]
+
+ self._simulate_proc(variants)
+ self.variant_proc.result_for(variants[1], None)
+
+ self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)
+
+ self.variant_proc.result_for(variants[2], None)
+
+ expected_results = {(self.test, None)}
+ self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py b/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
index 7f72627131..963b0e2abe 100644
--- a/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
+++ b/deps/v8/tools/testrunner/utils/dump_build_config_gyp.py
@@ -11,6 +11,9 @@ Raw gyp values are supported - they will be tranformed into valid json.
"""
# TODO(machenbach): Remove this when gyp is deprecated.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import json
import os
import sys
@@ -47,7 +50,7 @@ def as_json(kv):
try:
return k, json.loads(v2)
except ValueError as e:
- print(k, v, v2)
+ print((k, v, v2))
raise e
with open(sys.argv[1], 'w') as f:
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 148e9af4c5..761f727e6f 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -6,6 +6,9 @@
"""This program either generates the parser files for Torque, generating
the source and header files directly in V8's src directory."""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import subprocess
import sys
import re
@@ -46,11 +49,11 @@ def preprocess(input):
return input
def postprocess(output):
- output = re.sub(r'% RawObjectCast', r'%RawObjectCast', output)
- output = re.sub(r'% RawPointerCast', r'%RawPointerCast', output)
- output = re.sub(r'% RawConstexprCast', r'%RawConstexprCast', output)
- output = re.sub(r'% FromConstexpr', r'%FromConstexpr', output)
- output = re.sub(r'% Allocate', r'%Allocate', output)
+ output = re.sub(r'%\s*RawDownCast', r'%RawDownCast', output)
+ output = re.sub(r'%\s*RawConstexprCast', r'%RawConstexprCast', output)
+ output = re.sub(r'%\s*FromConstexpr', r'%FromConstexpr', output)
+ output = re.sub(r'%\s*Allocate', r'%Allocate', output)
+ output = re.sub(r'%\s*GetAllocationBaseSize', r'%GetAllocationBaseSize', output)
output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1 labels\2', output)
@@ -86,56 +89,66 @@ def postprocess(output):
return output
-def process(filename, only_lint, use_stdout):
+def process(filename, lint, should_format):
with open(filename, 'r') as content_file:
content = content_file.read()
original_input = content
- p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ if sys.platform.startswith('win'):
+ p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
+ else:
+ p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(preprocess(content))
output = postprocess(output)
rc = p.returncode
- if (rc <> 0):
- print "error code " + str(rc) + " running clang-format. Exiting..."
+ if (rc != 0):
+ print("error code " + str(rc) + " running clang-format. Exiting...")
sys.exit(rc);
- if only_lint:
+ if lint:
if (output != original_input):
- print >>sys.stderr, filename + ' requires formatting'
- elif use_stdout:
- print output
- else:
+ print(filename + ' requires formatting', file=sys.stderr)
+
+ if should_format:
output_file = open(filename, 'w')
output_file.write(output);
output_file.close()
def print_usage():
- print 'format-torque -i file1[, file2[, ...]]'
- print ' format and overwrite input files'
- print 'format-torque -l file1[, file2[, ...]]'
- print ' merely indicate which files need formatting'
+ print('format-torque -i file1[, file2[, ...]]')
+ print(' format and overwrite input files')
+ print('format-torque -l file1[, file2[, ...]]')
+ print(' merely indicate which files need formatting')
def Main():
if len(sys.argv) < 3:
- print "error: at least 2 arguments required"
+ print("error: at least 2 arguments required")
print_usage();
sys.exit(-1)
+ def is_option(arg):
+ return arg in ['-i', '-l', '-il']
+
+ should_format = lint = False
use_stdout = True
- lint = False
- if sys.argv[1] == '-i':
- use_stdout = False
- elif sys.argv[1] == '-l':
- lint = True
+ flag, files = sys.argv[1], sys.argv[2:]
+ if is_option(flag):
+ if '-i' == flag:
+ should_format = True
+ elif '-l' == flag:
+ lint = True
+ else:
+ lint = True
+ should_format = True
else:
- print "error: -i or -l must be specified as the first argument"
+ print("error: -i and/or -l flags must be specified")
print_usage();
sys.exit(-1);
- for filename in sys.argv[2:]:
- process(filename, lint, use_stdout)
+ for filename in files:
+ process(filename, lint, should_format)
return 0
diff --git a/deps/v8/tools/torque/make-torque-parser.py b/deps/v8/tools/torque/make-torque-parser.py
index 8151d151e1..807b68bf36 100755
--- a/deps/v8/tools/torque/make-torque-parser.py
+++ b/deps/v8/tools/torque/make-torque-parser.py
@@ -23,7 +23,7 @@ result = subprocess.call(cargs)
os.chdir(cwd)
def fix_file(filename):
- is_header = re.search(r'\.h', filename) <> None;
+ is_header = re.search(r'\.h', filename) is not None;
header_macro = filename.upper();
header_macro = re.sub('\.', '_', header_macro);
header_macro = "V8_TORQUE_" + header_macro + '_';
diff --git a/deps/v8/tools/torque/vscode-torque/.npmrc b/deps/v8/tools/torque/vscode-torque/.npmrc
new file mode 100644
index 0000000000..43c97e719a
--- /dev/null
+++ b/deps/v8/tools/torque/vscode-torque/.npmrc
@@ -0,0 +1 @@
+package-lock=false
diff --git a/deps/v8/tools/torque/vscode-torque/README.md b/deps/v8/tools/torque/vscode-torque/README.md
index 1d01daa3b9..fc4efa69ff 100644
--- a/deps/v8/tools/torque/vscode-torque/README.md
+++ b/deps/v8/tools/torque/vscode-torque/README.md
@@ -1,14 +1,33 @@
-# Torque syntax support
+# Torque support
-This extensions adds rudimentary syntax highlighting support for the WIP
-Torque language used in V8.
+This extension adds language support for [the Torque language used in V8](https://v8.dev/docs/torque).
## Installation
-Since the extension is not published to the marketplace, the easiest way to
-install the extension is to symlink the extension to your local extension
-directory:
+Since the extension is currently not published to the marketplace, the easiest way to
+install the extension is to symlink it to your local extension directory:
```
ln -s $V8/tools/torque/vscode-torque $HOME/.vscode/extensions/vscode-torque
-``` \ No newline at end of file
+```
+
+Additionally, for advanced language server features, the extension needs to be built
+locally (the syntax highlighting does not require this step). The following needs to be run
+everytime the extension is updated:
+
+```
+cd $V8/tools/torque/vscode-torque
+npm install
+```
+
+### Language server
+
+The language server is not built by default. To build the language server manually:
+
+```
+autoninja -C <output dir> torque-language-server
+```
+
+The default directory where the extension looks for the executable is "out/x64.release",
+but the absolute path to the executable can be configured with the `torque.ls.executable`
+setting.
diff --git a/deps/v8/tools/torque/vscode-torque/out/extension.js b/deps/v8/tools/torque/vscode-torque/out/extension.js
new file mode 100644
index 0000000000..8cfae5dea4
--- /dev/null
+++ b/deps/v8/tools/torque/vscode-torque/out/extension.js
@@ -0,0 +1,99 @@
+"use strict";
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
+ function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+};
+Object.defineProperty(exports, "__esModule", { value: true });
+// The file out/extension.js gets automatically created from
+// src/extension.ts. out/extension.js should not be modified manually.
+const path = require("path");
+const vscode_1 = require("vscode");
+const vscode_languageclient_1 = require("vscode-languageclient");
+const vscode_languageclient_2 = require("vscode-languageclient");
+let client;
+let outputChannel;
+class TorqueErrorHandler {
+ constructor(config) {
+ this.config = config;
+ }
+ error(error, message, count) {
+ outputChannel.appendLine("TorqueErrorHandler: ");
+ outputChannel.append(error.toString());
+ outputChannel.append(message.toString());
+ return vscode_languageclient_1.ErrorAction.Continue;
+ }
+ closed() {
+ return vscode_languageclient_1.CloseAction.DoNotRestart;
+ }
+}
+function activate(context) {
+ return __awaiter(this, void 0, void 0, function* () {
+ // Create a status bar item that displays the current status of the language server.
+ const statusBarItem = vscode_1.window.createStatusBarItem(vscode_1.StatusBarAlignment.Left, 0);
+ statusBarItem.text = "torque-ls: <unknown>";
+ statusBarItem.show();
+ const torqueConfiguration = vscode_1.workspace.getConfiguration("torque.ls");
+ let serverExecutable = torqueConfiguration.get("executable");
+ if (serverExecutable == null) {
+ serverExecutable = path.join(vscode_1.workspace.rootPath, "out", "x64.release", "torque-language-server");
+ }
+ let serverArguments = [];
+ const loggingEnabled = torqueConfiguration.get("logging");
+ if (loggingEnabled) {
+ const logfile = torqueConfiguration.get("logfile");
+ serverArguments = ["-l", logfile];
+ }
+ const serverOptions = { command: serverExecutable, args: serverArguments };
+ outputChannel = vscode_1.window.createOutputChannel("Torque Language Server");
+ const clientOptions = {
+ diagnosticCollectionName: "torque",
+ documentSelector: [{ scheme: "file", language: "torque" }],
+ errorHandler: new TorqueErrorHandler(vscode_1.workspace.getConfiguration("torque")),
+ initializationFailedHandler: (e) => {
+ outputChannel.appendLine(e);
+ return false;
+ },
+ outputChannel,
+ revealOutputChannelOn: vscode_languageclient_1.RevealOutputChannelOn.Info,
+ };
+ // Create the language client and start the client.
+ client = new vscode_languageclient_2.LanguageClient("torque", "Torque Language Server", serverOptions, clientOptions);
+ client.trace = vscode_languageclient_1.Trace.Verbose;
+ // Update the status bar according to the client state.
+ client.onDidChangeState((event) => {
+ if (event.newState === vscode_languageclient_1.State.Running) {
+ statusBarItem.text = "torque-ls: Running";
+ }
+ else if (event.newState === vscode_languageclient_1.State.Starting) {
+ statusBarItem.text = "torque-ls: Starting";
+ }
+ else {
+ statusBarItem.text = "torque-ls: Stopped";
+ }
+ });
+ // This will start client and server.
+ client.start();
+ yield client.onReady();
+ // The server needs an initial list of all the Torque files
+ // in the workspace, send them over.
+ vscode_1.workspace.findFiles("**/*.tq").then((urls) => {
+ client.sendNotification("torque/fileList", { files: urls.map((url) => url.toString()) });
+ });
+ });
+}
+exports.activate = activate;
+function deactivate() {
+ if (!client) {
+ return undefined;
+ }
+ return client.stop();
+}
+exports.deactivate = deactivate;
+//# sourceMappingURL=extension.js.map \ No newline at end of file
diff --git a/deps/v8/tools/torque/vscode-torque/package.json b/deps/v8/tools/torque/vscode-torque/package.json
index ac79161ae3..42174a6c9f 100644
--- a/deps/v8/tools/torque/vscode-torque/package.json
+++ b/deps/v8/tools/torque/vscode-torque/package.json
@@ -1,26 +1,90 @@
{
"name": "vscode-torque",
- "displayName": "Torque syntax support",
- "description": "Basic Torque syntax highlighting support",
+ "displayName": "Torque language support",
+ "description": "Syntax highlighting and language server for the V8 Torque programming language",
"version": "0.0.1",
"publisher": "szuend",
"engines": {
- "vscode": "^1.22.0"
+ "vscode": "^1.31.0"
},
"categories": [
- "Languages"
+ "Programming Languages"
],
+ "activationEvents": [
+ "onLanguage:torque",
+ "workspaceContains:**/*.tq"
+ ],
+ "main": "./out/extension",
"contributes": {
- "languages": [{
- "id": "torque",
- "aliases": ["Torque", "torque"],
- "extensions": [".tq"],
- "configuration": "./language-configuration.json"
- }],
- "grammars": [{
- "language": "torque",
- "scopeName": "source.torque",
- "path": "./syntaxes/torque.tmLanguage.json"
- }]
+ "configuration": {
+ "type": "object",
+ "title": "Torque",
+ "properties": {
+ "torque.ls.executable": {
+ "type": [
+ "string",
+ null
+ ],
+ "default": null,
+ "description": "Path to the torque language server executable (absolute)"
+ },
+ "torque.ls.logging": {
+ "type": "boolean",
+ "default": false,
+ "description": "Enable language server diagnostics output to log file"
+ },
+ "torque.ls.logfile": {
+ "type": "string",
+ "default": "torque-log.txt",
+ "description": "Target file for language server logging output"
+ },
+ "torque.trace.server": {
+ "type": "string",
+ "enum": [
+ "off",
+ "messages",
+ "verbose"
+ ],
+ "default": "off",
+ "description": "Trace the communication with the Torque language server from VSCode."
+ }
+ }
+ },
+ "languages": [
+ {
+ "id": "torque",
+ "aliases": [
+ "Torque",
+ "torque"
+ ],
+ "extensions": [
+ ".tq"
+ ],
+ "configuration": "./language-configuration.json"
+ }
+ ],
+ "grammars": [
+ {
+ "language": "torque",
+ "scopeName": "source.torque",
+ "path": "./syntaxes/torque.tmLanguage.json"
+ }
+ ]
+ },
+ "dependencies": {
+ "vscode-languageclient": "^5.2.1"
+ },
+ "devDependencies": {
+ "@types/node": "^8.0.0",
+ "vscode": "^1.1.21",
+ "tslint": "^5.11.0",
+ "typescript": "^3.1.3"
+ },
+ "scripts": {
+ "update-vscode": "vscode-install",
+ "postinstall": "vscode-install",
+ "vscode:prepublish": "npm run update-vscode && npm run compile",
+ "compile": "tsc -b",
+ "watch": "tsc -b -w"
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/tools/torque/vscode-torque/src/extension.ts b/deps/v8/tools/torque/vscode-torque/src/extension.ts
new file mode 100644
index 0000000000..7caff1e6dd
--- /dev/null
+++ b/deps/v8/tools/torque/vscode-torque/src/extension.ts
@@ -0,0 +1,104 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The file out/extension.js gets automatically created from
+// src/extension.ts. out/extension.js should not be modified manually.
+
+import * as path from "path";
+import { ExtensionContext, OutputChannel, StatusBarAlignment,
+ window, workspace, WorkspaceConfiguration } from "vscode";
+import { CloseAction, ErrorAction, ErrorHandler, Message,
+ RevealOutputChannelOn, State, Trace } from "vscode-languageclient";
+
+import {
+ LanguageClient,
+ LanguageClientOptions,
+ ServerOptions,
+} from "vscode-languageclient";
+
+let client: LanguageClient;
+let outputChannel: OutputChannel;
+
+class TorqueErrorHandler implements ErrorHandler {
+ constructor(readonly config: WorkspaceConfiguration) {}
+
+ public error(error: Error, message: Message, count: number): ErrorAction {
+ outputChannel.appendLine("TorqueErrorHandler: ");
+ outputChannel.append(error.toString());
+ outputChannel.append(message.toString());
+ return ErrorAction.Continue;
+ }
+
+ public closed(): CloseAction {
+ return CloseAction.DoNotRestart;
+ }
+}
+
+export async function activate(context: ExtensionContext) {
+ // Create a status bar item that displays the current status of the language server.
+ const statusBarItem = window.createStatusBarItem(StatusBarAlignment.Left, 0);
+ statusBarItem.text = "torque-ls: <unknown>";
+ statusBarItem.show();
+
+ const torqueConfiguration = workspace.getConfiguration("torque.ls");
+ let serverExecutable: string | null = torqueConfiguration.get("executable");
+ if (serverExecutable == null) {
+ serverExecutable = path.join(workspace.rootPath, "out", "x64.release", "torque-language-server");
+ }
+
+ let serverArguments = [];
+ const loggingEnabled: boolean = torqueConfiguration.get("logging");
+ if (loggingEnabled) {
+ const logfile = torqueConfiguration.get("logfile");
+ serverArguments = ["-l", logfile];
+ }
+
+ const serverOptions: ServerOptions = { command: serverExecutable, args: serverArguments };
+
+ outputChannel = window.createOutputChannel("Torque Language Server");
+
+ const clientOptions: LanguageClientOptions = {
+ diagnosticCollectionName: "torque",
+ documentSelector: [{ scheme: "file", language: "torque" }],
+ errorHandler: new TorqueErrorHandler(workspace.getConfiguration("torque")),
+ initializationFailedHandler: (e) => {
+ outputChannel.appendLine(e);
+ return false;
+ },
+ outputChannel,
+ revealOutputChannelOn: RevealOutputChannelOn.Info,
+ };
+
+ // Create the language client and start the client.
+ client = new LanguageClient("torque", "Torque Language Server", serverOptions, clientOptions);
+ client.trace = Trace.Verbose;
+
+ // Update the status bar according to the client state.
+ client.onDidChangeState((event) => {
+ if (event.newState === State.Running) {
+ statusBarItem.text = "torque-ls: Running";
+ } else if (event.newState === State.Starting) {
+ statusBarItem.text = "torque-ls: Starting";
+ } else {
+ statusBarItem.text = "torque-ls: Stopped";
+ }
+ });
+
+ // This will start client and server.
+ client.start();
+
+ await client.onReady();
+
+ // The server needs an initial list of all the Torque files
+ // in the workspace, send them over.
+ workspace.findFiles("**/*.tq").then((urls) => {
+ client.sendNotification("torque/fileList",
+ { files: urls.map((url) => url.toString())});
+ });
+}
+
+export function deactivate(): Thenable<void> | undefined {
+ if (!client) { return undefined; }
+ return client.stop();
+}
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
index 39b994ec25..cbbf381da8 100644
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
@@ -3,12 +3,6 @@
"name": "Torque",
"patterns": [
{
- "include": "#keywords"
- },
- {
- "include": "#strings"
- },
- {
"name": "comment.line.double-slash.torque",
"begin": "//",
"end": "$"
@@ -19,67 +13,160 @@
"end": "\\*/"
},
{
- "name": "string.quoted.single.torque",
- "begin": "'",
- "end": "'",
+ "name": "support.function.torque",
+ "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
+ },
+ {
+ "name": "constant.other.torque",
+ "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
+ },
+ {
+ "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
+ "end": ">",
"patterns": [
{
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }]
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
},
{
- "name": "support.function.torque",
- "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
+ "begin": "\\b(?=extern\\b)",
+ "end": ";",
+ "patterns": [
+ {
+ "begin": "\\)\\(|(?=(\\b[a-zA-Z0-9_]+)\\((?!\\s*implicit))",
+ "end": "\\)",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "include": "#common"
+ }
+ ]
},
- {
- "name": "support.variable.torque",
- "match": "\\b(true|True|false|False|Undefined|Hole|Null)\\b"
- },
- {
- "begin": "<(?=[A-Za-z][0-9A-Za-z_]*>)",
- "end": ">",
- "patterns": [
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "begin": ":(\\s*)?",
- "end": "(?=[^0-9A-Za-z_])",
- "patterns": [
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- }
- ],
- "repository": {
- "keywords": {
- "patterns": [{
- "name": "keyword.control.torque",
- "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
- },
- {
- "name": "keyword.other.torque",
- "match": "\\b(constexpr|module|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|type|struct|class|weak|extends|extern|const|typeswitch|case|transient|transitioning)\\b"
+ {
+ "begin": "\\b(type)\\b",
+ "end": ";",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ }
},
- {
- "name": "keyword.operator.torque",
- "match": "\\b(=|\\*=)\\b"
- }]
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
+ }
+ ]
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "#include"
},
- "strings": {
- "name": "string.quoted.double.torque",
- "begin": "\"",
- "end": "\"",
+ {
+ "include": "#common"
+ }
+ ],
+ "repository": {
+ "common": {
"patterns": [
{
- "name": "constant.character.escape.torque",
- "match": "\\\\."
+ "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
+ },
+ {
+ "name": "keyword.other.torque",
+ "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace)\\b"
+ },
+ {
+ "name": "keyword.operator.torque",
+ "match": "\\b(=|\\*=)\\b"
+ },
+ {
+ "match": "\\b(class)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "string.quoted.double.torque",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "name": "string.quoted.single.torque",
+ "begin": "'",
+ "end": "'",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "begin": ":(\\s*)?",
+ "end": "(?=(generates|[^0-9A-Za-z_| ]))",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "name": "support.function.torque",
+ "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
}
]
}
diff --git a/deps/v8/tools/torque/vscode-torque/tsconfig.json b/deps/v8/tools/torque/vscode-torque/tsconfig.json
new file mode 100644
index 0000000000..e1b012eed7
--- /dev/null
+++ b/deps/v8/tools/torque/vscode-torque/tsconfig.json
@@ -0,0 +1,17 @@
+{
+ "compilerOptions": {
+ "module": "commonjs",
+ "target": "es6",
+ "outDir": "out",
+ "rootDir": "src",
+ "lib": [ "es6" ],
+ "sourceMap": true
+ },
+ "include": [
+ "src"
+ ],
+ "exclude": [
+ "node_modules",
+ ".vscode-test"
+ ]
+} \ No newline at end of file
diff --git a/deps/v8/tools/torque/vscode-torque/tslint.json b/deps/v8/tools/torque/vscode-torque/tslint.json
new file mode 100644
index 0000000000..eaa124644b
--- /dev/null
+++ b/deps/v8/tools/torque/vscode-torque/tslint.json
@@ -0,0 +1,11 @@
+{
+ "defaultSeverity": "error",
+ "extends": [
+ "tslint:recommended"
+ ],
+ "jsRules": {},
+ "rules": {
+ "indent": [true, "spaces", 2]
+ },
+ "rulesDirectory": []
+} \ No newline at end of file
diff --git a/deps/v8/tools/trace-maps-processor.py b/deps/v8/tools/trace-maps-processor.py
index bf8c8a8c92..4a29eab5db 100755
--- a/deps/v8/tools/trace-maps-processor.py
+++ b/deps/v8/tools/trace-maps-processor.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import sys
@@ -169,4 +172,4 @@ elif action == "count":
reasons_list.append("%8d %s" % (reasons[r], r))
reasons_list.sort(reverse=True)
for r in reasons_list[:20]:
- print r
+ print(r)
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index a0a98ee752..7c450f7bc1 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import argparse
import os
import subprocess
@@ -28,6 +31,8 @@ DEFAULT_BOTS = [
PUBLIC_BENCHMARKS = [
'arewefastyet',
+ 'ares6',
+ 'blazor',
'compile',
'embenchen',
'emscripten',
@@ -67,11 +72,11 @@ def main():
help='Add %s trybot.' % BOTS[option])
options = parser.parse_args()
if not options.bots:
- print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
+ print('No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS))
options.bots = DEFAULT_BOTS
if not options.benchmarks:
- print 'Please specify the benchmarks to run as arguments.'
+ print('Please specify the benchmarks to run as arguments.')
return 1
for benchmark in options.benchmarks:
@@ -79,7 +84,7 @@ def main():
print ('%s not found in our benchmark list. The respective trybot might '
'fail, unless you run something this script isn\'t aware of. '
'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
- print 'Proceed anyways? [Y/n] ',
+ print('Proceed anyways? [Y/n] ', end=' ')
answer = sys.stdin.readline().strip()
if answer != "" and answer != "Y" and answer != "y":
return 1
@@ -100,7 +105,7 @@ def main():
cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
if options.verbose:
cmd.append('-vv')
- print 'Running %s' % ' '.join(cmd)
+ print('Running %s' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
if __name__ == '__main__': # pragma: no cover
diff --git a/deps/v8/tools/turbolizer-perf.py b/deps/v8/tools/turbolizer-perf.py
index c90a1174d4..d35f538c7c 100644
--- a/deps/v8/tools/turbolizer-perf.py
+++ b/deps/v8/tools/turbolizer-perf.py
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import sys
import json
@@ -25,7 +28,7 @@ def trace_begin():
known_addrs.add(result.group(0))
def trace_end():
- print json.dumps(json_obj)
+ print(json.dumps(json_obj))
def process_event(param_dict):
addr = "0x%x" % int(param_dict['sample']['ip'])
diff --git a/deps/v8/tools/turbolizer/deploy.sh b/deps/v8/tools/turbolizer/deploy.sh
index ae069762d9..011c2f47f1 100755
--- a/deps/v8/tools/turbolizer/deploy.sh
+++ b/deps/v8/tools/turbolizer/deploy.sh
@@ -13,12 +13,12 @@ function copy() {
}
echo -n "Deploying..."
-copy *.jpg $DEST/
copy *.png $DEST/
copy *.css $DEST/
copy index.html $DEST/
copy info-view.html $DEST/
copy -R build $DEST/
+copy -R img $DEST/
echo "done!"
echo "Deployed to $DEST/."
diff --git a/deps/v8/tools/turbolizer/package-lock.json b/deps/v8/tools/turbolizer/package-lock.json
index 34dea91004..69b7d3bd7f 100644
--- a/deps/v8/tools/turbolizer/package-lock.json
+++ b/deps/v8/tools/turbolizer/package-lock.json
@@ -1130,9 +1130,9 @@
"dev": true
},
"fs-extra": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.0.tgz",
- "integrity": "sha512-EglNDLRpmaTWiD/qraZn6HREAEAHJcJOmxNEYwq6xeMKnVMAy3GUcFB+wXt2C6k4CNvB/mP1y/U3dzvKKj5OtQ==",
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+ "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
"requires": {
"graceful-fs": "^4.1.2",
"jsonfile": "^4.0.0",
@@ -1943,9 +1943,9 @@
"dev": true
},
"math-random": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz",
- "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w="
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.4.tgz",
+ "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A=="
},
"media-typer": {
"version": "0.3.0",
@@ -2394,11 +2394,11 @@
}
},
"rollup-plugin-typescript2": {
- "version": "0.18.1",
- "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.18.1.tgz",
- "integrity": "sha512-aR2m5NCCAUV/KpcKgCWX6Giy8rTko9z92b5t0NX9eZyjOftCvcdDFa1C9Ze/9yp590hnRymr5hG0O9SAXi1oUg==",
+ "version": "0.19.3",
+ "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.19.3.tgz",
+ "integrity": "sha512-lsRqfBCZhMl/tq9AT5YnQvzQWzXtnx3EQYFcHD72gul7nyyoOrzx5yCEH20smpw58v6UkHHZz03FbdLEPoHWjA==",
"requires": {
- "fs-extra": "7.0.0",
+ "fs-extra": "7.0.1",
"resolve": "1.8.1",
"rollup-pluginutils": "2.3.3",
"tslib": "1.9.3"
diff --git a/deps/v8/tools/turbolizer/package.json b/deps/v8/tools/turbolizer/package.json
index ae354ba393..511d10cf71 100644
--- a/deps/v8/tools/turbolizer/package.json
+++ b/deps/v8/tools/turbolizer/package.json
@@ -17,7 +17,7 @@
"d3": "^5.7.0",
"rollup": "^0.68.2",
"rollup-plugin-node-resolve": "^4.0.0",
- "rollup-plugin-typescript2": "^0.18.1"
+ "rollup-plugin-typescript2": "^0.19.3"
},
"repository": {
"type": "git",
diff --git a/deps/v8/tools/turbolizer/src/schedule-view.ts b/deps/v8/tools/turbolizer/src/schedule-view.ts
index ed36d126fd..3da62ecf13 100644
--- a/deps/v8/tools/turbolizer/src/schedule-view.ts
+++ b/deps/v8/tools/turbolizer/src/schedule-view.ts
@@ -123,6 +123,7 @@ export class ScheduleView extends TextView {
}
const scheduleBlock = createElement("div", "schedule-block");
+ scheduleBlock.classList.toggle("deferred", block.isDeferred);
const [start, end] = view.sourceResolver.getInstructionRangeForBlock(block.id);
const instrMarker = createElement("div", "instr-marker com", "&#8857;");
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index b37dcc498b..216ca13d04 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -540,6 +540,10 @@ text {
content: "Block B";
}
+.schedule-block.deferred>.block-id::after {
+ content: " (deferred)";
+}
+
.schedule-block .block-list {
display: inline-block;
}
diff --git a/deps/v8/tools/ubsan/blacklist.txt b/deps/v8/tools/ubsan/blacklist.txt
index 12504639c5..0705adc0b4 100644
--- a/deps/v8/tools/ubsan/blacklist.txt
+++ b/deps/v8/tools/ubsan/blacklist.txt
@@ -1,6 +1,11 @@
#############################################################################
# UBSan blacklist.
-# UBSan bug, fixed in LLVM r350779. Drop this suppression when that
-# revision has rolled into Chromium's bundled Clang.
-fun:*v8*internal*NewArray*
+# Bug 8735: PropertyCallbackInfo<void> vs PropertyCallbackInfo<T>.
+fun:*v8*internal*PropertyCallbackArguments*CallAccessorSetter*
+fun:*v8*internal*PropertyCallbackArguments*BasicCallNamedGetterCallback*
+fun:*v8*internal*InvokeAccessorGetterCallback*
+
+# Bug 8735: WeakCallbackInfo<void> vs. WeakCallbackInfo<T>.
+fun:*v8*internal*GlobalHandles*PendingPhantomCallback*Invoke*
+fun:*v8*internal*GlobalHandles*Node*PostGarbageCollectionProcessing*
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 0e22c77c11..f1028dee6a 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -3,6 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
from collections import namedtuple
import coverage
import json
@@ -103,8 +106,8 @@ class PerfTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cls._cov.stop()
- print ""
- print cls._cov.report()
+ print("")
+ print(cls._cov.report())
def setUp(self):
self.maxDiff = None
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index e136db6b53..ffe440447d 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -17,6 +17,9 @@ with different test suite extensions and build configurations.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import collections
import contextlib
import json
@@ -126,7 +129,7 @@ class SystemTest(unittest.TestCase):
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
- print 'Python coverage version >= 4 required.'
+ print('Python coverage version >= 4 required.')
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
@@ -142,10 +145,12 @@ class SystemTest(unittest.TestCase):
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
- print 'Running without python coverage.'
+ print('Running without python coverage.')
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
+ global num_fuzzer
+ from testrunner import num_fuzzer
from testrunner.local import command
from testrunner.local import pool
command.setup_testing()
@@ -155,8 +160,8 @@ class SystemTest(unittest.TestCase):
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
- print ''
- print cls._cov.report(show_missing=True)
+ print('')
+ print(cls._cov.report(show_missing=True))
def testPass(self):
"""Test running only passing tests in two variants.
@@ -173,7 +178,7 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
+ self.assertIn('Done running sweet/bananas default: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
@@ -189,17 +194,24 @@ class SystemTest(unittest.TestCase):
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
- 'sweet/bananas',
+ 'sweet/blackberries',
'sweet/raspberries',
infra_staging=False,
)
# One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
- self.assertIn('Done running sweet/bananas', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/raspberries default', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/raspberries stress', result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
else:
- self.assertIn('Done running sweet/raspberries', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertIn(
+ 'sweet/blackberries default: FAIL', result.stdout, result)
+ self.assertIn(
+ 'sweet/blackberries stress: FAIL', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
@unittest.skip("incompatible with test processors")
def testSharded(self):
@@ -233,7 +245,7 @@ class SystemTest(unittest.TestCase):
'sweet/strawberries',
infra_staging=False,
)
- self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(
@@ -278,7 +290,7 @@ class SystemTest(unittest.TestCase):
'sweet/strawberries',
infra_staging=False,
)
- self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+ self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
@@ -308,7 +320,7 @@ class SystemTest(unittest.TestCase):
infra_staging=False,
)
self.assertIn(
- 'Done running sweet/bananaflakes: pass', result.stdout, result)
+ 'Done running sweet/bananaflakes default: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
@@ -401,13 +413,6 @@ class SystemTest(unittest.TestCase):
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
- def testGNOption(self):
- """Test using gn option, but no gn build folder is found."""
- with temp_base() as basedir:
- # TODO(machenbach): This should fail gracefully.
- with self.assertRaises(OSError):
- run_tests(basedir, '--gn')
-
def testInconsistentMode(self):
"""Test failing run when attempting to wrongly override the mode."""
with temp_base() as basedir:
@@ -505,7 +510,8 @@ class SystemTest(unittest.TestCase):
infra_staging=False,
)
self.assertIn('1 tests ran', result.stdout, result)
- self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@@ -619,10 +625,11 @@ class SystemTest(unittest.TestCase):
infra_staging=False,
)
if name == 'color':
- expected = ('\033[32m+ 1\033[0m|'
+ expected = ('\033[34m% 28\033[0m|'
+ '\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
- expected = '+ 1|- 1]: Done'
+ expected = '% 28|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
@@ -641,14 +648,25 @@ class SystemTest(unittest.TestCase):
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
- self.assertIn('sweet/mangoes: pass', result.stdout, result)
- self.assertIn('sweet/strawberries: FAIL', result.stdout, result)
+ self.assertIn('sweet/mangoes default: pass', result.stdout, result)
+ self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
- self.assertIn('sweet/blackberries: FAIL', result.stdout, result)
+ self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
self.assertNotIn('Done running sweet/raspberries', result.stdout, result)
self.assertIn('2 tests failed', result.stdout, result)
self.assertIn('3 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ def testNumFuzzer(self):
+ sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release']
+
+ with temp_base() as basedir:
+ with capture() as (stdout, stderr):
+ code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
+ result = Result(stdout.getvalue(), stderr.getvalue(), code)
+
+ self.assertEqual(0, result.returncode, result)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/unittests/testdata/d8_mocked1.py b/deps/v8/tools/unittests/testdata/d8_mocked1.py
index 53405a6626..ff330af8c4 100644
--- a/deps/v8/tools/unittests/testdata/d8_mocked1.py
+++ b/deps/v8/tools/unittests/testdata/d8_mocked1.py
@@ -3,5 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-print 'Richards: 1.2'
-print 'DeltaBlue: 2.1'
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards: 1.2')
+print('DeltaBlue: 2.1')
diff --git a/deps/v8/tools/unittests/testdata/d8_mocked2.py b/deps/v8/tools/unittests/testdata/d8_mocked2.py
index 71a3d047b5..36304628aa 100644
--- a/deps/v8/tools/unittests/testdata/d8_mocked2.py
+++ b/deps/v8/tools/unittests/testdata/d8_mocked2.py
@@ -3,8 +3,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-print 'Richards1: 1'
-print 'DeltaBlue1: 1'
-print 'Richards2: 0.2'
-print 'DeltaBlue2: 1.0'
-print 'DeltaBlue3: 0.1'
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards1: 1')
+print('DeltaBlue1: 1')
+print('Richards2: 0.2')
+print('DeltaBlue2: 1.0')
+print('DeltaBlue3: 0.1')
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 9ae985c3dc..7f6742e4cc 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -1,113 +1,112 @@
{
- "arch": "x64",
- "duration_mean": 1,
- "mode": "release",
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
- "target_name": "d8_mocked.py",
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "target_name": "d8_mocked.py",
"variant": "default"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 2,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
- "target_name": "d8_mocked.py",
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "target_name": "d8_mocked.py",
"variant": "default"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 3,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
- "target_name": "d8_mocked.py",
+ ],
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "target_name": "d8_mocked.py",
"variant": "default"
}
- ],
+ ],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "marked_slow": true,
+ ],
+ "marked_slow": true,
"name": "sweet/strawberries"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "marked_slow": true,
+ ],
+ "marked_slow": true,
"name": "sweet/strawberries"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "duration": 1,
"flags": [
"--test",
- "strawberries",
- "--random-seed=123",
+ "strawberries",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "marked_slow": true,
+ ],
+ "marked_slow": true,
"name": "sweet/strawberries"
}
- ],
+ ],
"test_total": 3
}
-
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index cdb4766e95..95224befdd 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -1,74 +1,74 @@
{
- "arch": "x64",
- "duration_mean": 1,
- "mode": "release",
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
+ "bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
- "target_name": "d8_mocked.py",
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
+ "target_name": "d8_mocked.py",
"variant": "default"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
- "duration": 1,
- "exit_code": 0,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "duration": 1,
+ "exit_code": 0,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
+ "bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "PASS",
- "run": 2,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
- "target_name": "d8_mocked.py",
+ ],
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "PASS",
+ "run": 2,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
+ "target_name": "d8_mocked.py",
"variant": "default"
}
- ],
+ ],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "duration": 1,
"flags": [
- "bananaflakes",
- "--random-seed=123",
+ "bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "marked_slow": false,
+ ],
+ "marked_slow": false,
"name": "sweet/bananaflakes"
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "duration": 1,
"flags": [
- "bananaflakes",
- "--random-seed=123",
+ "bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
- ],
- "marked_slow": false,
+ ],
+ "marked_slow": false,
"name": "sweet/bananaflakes"
}
- ],
+ ],
"test_total": 2
}
diff --git a/deps/v8/tools/unittests/testdata/predictable_mocked.py b/deps/v8/tools/unittests/testdata/predictable_mocked.py
index cc332c2c46..b9e73f6454 100644
--- a/deps/v8/tools/unittests/testdata/predictable_mocked.py
+++ b/deps/v8/tools/unittests/testdata/predictable_mocked.py
@@ -3,22 +3,25 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# for py2/py3 compatibility
+from __future__ import print_function
+
import sys
assert len(sys.argv) == 3
if sys.argv[1] == 'equal':
# 1. Scenario: print equal allocation hashes.
- print '### Allocations = 9497, hash = 0xc322c6b0'
+ print('### Allocations = 9497, hash = 0xc322c6b0')
elif sys.argv[1] == 'differ':
# 2. Scenario: print different allocation hashes. This prints a different
# hash on the second run, based on the content of a semaphore file. This
# file is expected to be empty in the beginning.
with open(sys.argv[2]) as f:
if f.read():
- print '### Allocations = 9497, hash = 0xc322c6b0'
+ print('### Allocations = 9497, hash = 0xc322c6b0')
else:
- print '### Allocations = 9497, hash = 0xc322c6b1'
+ print('### Allocations = 9497, hash = 0xc322c6b1')
with open(sys.argv[2], 'w') as f:
f.write('something')
else:
diff --git a/deps/v8/tools/unittests/testdata/results_processor.py b/deps/v8/tools/unittests/testdata/results_processor.py
index 69c23e306d..d8c5ad9448 100644
--- a/deps/v8/tools/unittests/testdata/results_processor.py
+++ b/deps/v8/tools/unittests/testdata/results_processor.py
@@ -7,6 +7,9 @@
Fake results processor for testing that just sums some things up.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import fileinput
import re
@@ -21,5 +24,5 @@ for line in fileinput.input():
if match:
deltablue += float(match.group(1))
-print 'Richards: %f' % richards
-print 'DeltaBlue: %f' % deltablue
+print('Richards: %f' % richards)
+print('DeltaBlue: %f' % deltablue)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
index c7ca55a571..d67e0304f6 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/d8_mocked.py
@@ -6,10 +6,13 @@
Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import sys
args = ' '.join(sys.argv[1:])
-print args
+print(args)
# Let all berries fail.
if 'berries' in args:
sys.exit(1)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index bf9c780621..a2dfc9d748 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -9,14 +9,17 @@ Dummy test suite extension with some fruity tests.
from testrunner.local import testsuite
from testrunner.objects import testcase
-class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- return map(
- self._create_test, [
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ return [
'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
'blackberries', 'raspberries',
- ],
- )
+ ]
+
+
+class TestSuite(testsuite.TestSuite):
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 79f9856a47..39b7cdf87c 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -4,6 +4,7 @@
"is_android": false,
"is_asan": false,
"is_cfi": false,
+ "is_clang": true,
"is_component_build": false,
"is_debug": false,
"is_gcov_coverage": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
index e66e299bc6..48d6bcea53 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
+++ b/deps/v8/tools/unittests/testdata/testroot2/d8_mocked.py
@@ -6,12 +6,15 @@
Dummy d8 replacement for flaky tests.
"""
+# for py2/py3 compatibility
+from __future__ import print_function
+
import os
import sys
PATH = os.path.dirname(os.path.abspath(__file__))
-print ' '.join(sys.argv[1:])
+print(' '.join(sys.argv[1:]))
# Test files ending in 'flakes' should first fail then pass. We store state in
# a file side by side with the executable. No clean-up required as all tests
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
index a986af5c2f..3606cd3eca 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -9,12 +9,15 @@ Dummy test suite extension with some flaky fruity tests.
from testrunner.local import testsuite
from testrunner.objects import testcase
+
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ return ['bananaflakes']
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self):
- return map(
- self._create_test,
- ['bananaflakes'],
- )
+ def _test_loader_class(self):
+ return TestLoader
def _test_class(self):
return TestCase
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index e4946321c5..73b7a0b7c8 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -4,6 +4,7 @@
"is_android": false,
"is_asan": false,
"is_cfi": false,
+ "is_clang": true,
"is_component_build": false,
"is_debug": false,
"is_gcov_coverage": false,
diff --git a/deps/v8/tools/update-object-macros-undef.py b/deps/v8/tools/update-object-macros-undef.py
index ecec6239ad..866fdb6468 100755
--- a/deps/v8/tools/update-object-macros-undef.py
+++ b/deps/v8/tools/update-object-macros-undef.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 5d775c8cb9..67105394c7 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -27,10 +27,14 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
try:
import hashlib
md5er = hashlib.md5
-except ImportError, e:
+except ImportError as e:
import md5
md5er = md5.new
@@ -84,7 +88,7 @@ def CppLintWorker(command):
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
- print "Failed to process %s" % command.pop()
+ print("Failed to process %s" % command.pop())
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
@@ -114,7 +118,8 @@ def TorqueLintWorker(command):
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
- sys.stdout.write("tip: use 'tools/torque/format-torque.py -i <filename>'\n");
+ sys.stdout.write(
+ "warning: formatting and overwriting unformatted Torque files\n")
return error_count
except KeyboardInterrupt:
process.kill()
@@ -267,7 +272,7 @@ class CacheableSourceFileProcessor(SourceFileProcessor):
files = cache.FilterUnchangedFiles(files)
if len(files) == 0:
- print 'No changes in %s files detected. Skipping check' % self.file_type
+ print('No changes in %s files detected. Skipping check' % self.file_type)
return True
files_requiring_changes = self.DetectFilesToChange(files)
@@ -292,7 +297,7 @@ class CacheableSourceFileProcessor(SourceFileProcessor):
try:
results = pool.map_async(worker, commands).get(timeout=240)
except KeyboardInterrupt:
- print "\nCaught KeyboardInterrupt, terminating workers."
+ print("\nCaught KeyboardInterrupt, terminating workers.")
pool.terminate()
pool.join()
sys.exit(1)
@@ -371,7 +376,7 @@ class TorqueLintProcessor(CacheableSourceFileProcessor):
def GetProcessorScript(self):
torque_tools = os.path.join(TOOLS_PATH, "torque")
torque_path = os.path.join(torque_tools, "format-torque.py")
- arguments = ['-l']
+ arguments = ["-il"]
if os.path.isfile(torque_path):
return torque_path, arguments
@@ -486,12 +491,12 @@ class SourceProcessor(SourceFileProcessor):
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
- print "%s contains tabs" % name
+ print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
- print "%s is missing a correct copyright header." % name
+ print("%s is missing a correct copyright header." % name)
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
@@ -504,34 +509,34 @@ class SourceProcessor(SourceFileProcessor):
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
- print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
+ print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
else:
- print "%s has trailing whitespaces in line %s." % (name, linenumbers)
+ print("%s has trailing whitespaces in line %s." % (name, linenumbers))
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
- print "%s does not end with a single new line." % name
+ print("%s does not end with a single new line." % name)
result = False
# Sanitize flags for fuzzer.
if ".js" in name and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
- print "%s Flags should use '-' (not '_')" % name
+ print("%s Flags should use '-' (not '_')" % name)
result = False
if not "mjsunit/mjsunit.js" in name:
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
- print "%s Flag --opt should be set if " \
- "assertOptimized() is used" % name
+ print("%s Flag --opt should be set if " \
+ "assertOptimized() is used" % name)
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
not FLAGS_NO_ALWAYS_OPT.search(contents):
- print "%s Flag --no-always-opt should be set if " \
- "assertUnoptimized() is used" % name
+ print("%s Flag --no-always-opt should be set if " \
+ "assertUnoptimized() is used" % name)
result = False
match = self.runtime_function_call_pattern.search(contents)
if match:
- print "%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1))
+ print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
result = False
return result
@@ -547,7 +552,7 @@ class SourceProcessor(SourceFileProcessor):
violations += 1
finally:
handle.close()
- print "Total violating files: %s" % violations
+ print("Total violating files: %s" % violations)
return success
def _CheckStatusFileForDuplicateKeys(filepath):
@@ -650,10 +655,12 @@ def CheckDeps(workspace):
def PyTests(workspace):
result = True
for script in [
+ join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
+ join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
- print 'Running ' + script
+ print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
@@ -675,22 +682,22 @@ def Main():
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
- print "Running checkdeps..."
+ print("Running checkdeps...")
success &= CheckDeps(workspace)
use_linter_cache = not options.no_linter_cache
if not options.no_lint:
- print "Running C++ lint check..."
+ print("Running C++ lint check...")
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
- print "Running Torque formatting check..."
+ print("Running Torque formatting check...")
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
- print "Running copyright header, trailing whitespaces and " \
- "two empty lines between declarations check..."
+ print("Running copyright header, trailing whitespaces and " \
+ "two empty lines between declarations check...")
success &= SourceProcessor().RunOnPath(workspace)
- print "Running status-files check..."
+ print("Running status-files check...")
success &= StatusFilesProcessor().RunOnPath(workspace)
- print "Running python tests..."
+ print("Running python tests...")
success &= PyTests(workspace)
if success:
return 0
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index fc053b60f7..9a0f323a0b 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -1,4 +1,4 @@
-# Copyright 2018 the V8 project authors. All rights reserved.
+# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
@@ -11,127 +11,126 @@ INSTANCE_TYPES = {
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
- 18: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 34: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
- 42: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
- 50: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 64: "STRING_TYPE",
- 65: "CONS_STRING_TYPE",
- 66: "EXTERNAL_STRING_TYPE",
- 67: "SLICED_STRING_TYPE",
- 69: "THIN_STRING_TYPE",
- 72: "ONE_BYTE_STRING_TYPE",
- 73: "CONS_ONE_BYTE_STRING_TYPE",
- 74: "EXTERNAL_ONE_BYTE_STRING_TYPE",
- 75: "SLICED_ONE_BYTE_STRING_TYPE",
- 77: "THIN_ONE_BYTE_STRING_TYPE",
- 82: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 98: "UNCACHED_EXTERNAL_STRING_TYPE",
- 106: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
- 114: "UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
- 128: "SYMBOL_TYPE",
- 129: "HEAP_NUMBER_TYPE",
- 130: "BIGINT_TYPE",
- 131: "ODDBALL_TYPE",
- 132: "MAP_TYPE",
- 133: "CODE_TYPE",
- 134: "MUTABLE_HEAP_NUMBER_TYPE",
- 135: "FOREIGN_TYPE",
- 136: "BYTE_ARRAY_TYPE",
- 137: "BYTECODE_ARRAY_TYPE",
- 138: "FREE_SPACE_TYPE",
- 139: "FIXED_INT8_ARRAY_TYPE",
- 140: "FIXED_UINT8_ARRAY_TYPE",
- 141: "FIXED_INT16_ARRAY_TYPE",
- 142: "FIXED_UINT16_ARRAY_TYPE",
- 143: "FIXED_INT32_ARRAY_TYPE",
- 144: "FIXED_UINT32_ARRAY_TYPE",
- 145: "FIXED_FLOAT32_ARRAY_TYPE",
- 146: "FIXED_FLOAT64_ARRAY_TYPE",
- 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 148: "FIXED_BIGINT64_ARRAY_TYPE",
- 149: "FIXED_BIGUINT64_ARRAY_TYPE",
- 150: "FIXED_DOUBLE_ARRAY_TYPE",
- 151: "FEEDBACK_METADATA_TYPE",
- 152: "FILLER_TYPE",
- 153: "ACCESS_CHECK_INFO_TYPE",
- 154: "ACCESSOR_INFO_TYPE",
- 155: "ACCESSOR_PAIR_TYPE",
- 156: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 157: "ALLOCATION_MEMENTO_TYPE",
- 158: "ASM_WASM_DATA_TYPE",
- 159: "ASYNC_GENERATOR_REQUEST_TYPE",
- 160: "DEBUG_INFO_TYPE",
- 161: "FUNCTION_TEMPLATE_INFO_TYPE",
- 162: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 163: "INTERCEPTOR_INFO_TYPE",
- 164: "INTERPRETER_DATA_TYPE",
- 165: "MODULE_INFO_ENTRY_TYPE",
- 166: "MODULE_TYPE",
- 167: "OBJECT_TEMPLATE_INFO_TYPE",
- 168: "PROMISE_CAPABILITY_TYPE",
- 169: "PROMISE_REACTION_TYPE",
- 170: "PROTOTYPE_INFO_TYPE",
- 171: "SCRIPT_TYPE",
- 172: "STACK_FRAME_INFO_TYPE",
- 173: "TUPLE2_TYPE",
- 174: "TUPLE3_TYPE",
- 175: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 176: "WASM_DEBUG_INFO_TYPE",
- 177: "WASM_EXCEPTION_TAG_TYPE",
- 178: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 179: "CALLABLE_TASK_TYPE",
- 180: "CALLBACK_TASK_TYPE",
- 181: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 182: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 183: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 184: "WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE",
- 185: "ALLOCATION_SITE_TYPE",
- 186: "EMBEDDER_DATA_ARRAY_TYPE",
- 187: "FIXED_ARRAY_TYPE",
- 188: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 189: "HASH_TABLE_TYPE",
- 190: "ORDERED_HASH_MAP_TYPE",
- 191: "ORDERED_HASH_SET_TYPE",
- 192: "ORDERED_NAME_DICTIONARY_TYPE",
- 193: "NAME_DICTIONARY_TYPE",
- 194: "GLOBAL_DICTIONARY_TYPE",
- 195: "NUMBER_DICTIONARY_TYPE",
- 196: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 197: "STRING_TABLE_TYPE",
- 198: "EPHEMERON_HASH_TABLE_TYPE",
- 199: "SCOPE_INFO_TYPE",
- 200: "SCRIPT_CONTEXT_TABLE_TYPE",
- 201: "AWAIT_CONTEXT_TYPE",
- 202: "BLOCK_CONTEXT_TYPE",
- 203: "CATCH_CONTEXT_TYPE",
- 204: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 205: "EVAL_CONTEXT_TYPE",
- 206: "FUNCTION_CONTEXT_TYPE",
- 207: "MODULE_CONTEXT_TYPE",
- 208: "NATIVE_CONTEXT_TYPE",
- 209: "SCRIPT_CONTEXT_TYPE",
- 210: "WITH_CONTEXT_TYPE",
- 211: "WEAK_FIXED_ARRAY_TYPE",
- 212: "TRANSITION_ARRAY_TYPE",
- 213: "CALL_HANDLER_INFO_TYPE",
- 214: "CELL_TYPE",
- 215: "CODE_DATA_CONTAINER_TYPE",
- 216: "DESCRIPTOR_ARRAY_TYPE",
- 217: "FEEDBACK_CELL_TYPE",
- 218: "FEEDBACK_VECTOR_TYPE",
- 219: "LOAD_HANDLER_TYPE",
- 220: "PREPARSE_DATA_TYPE",
- 221: "PROPERTY_ARRAY_TYPE",
- 222: "PROPERTY_CELL_TYPE",
- 223: "SHARED_FUNCTION_INFO_TYPE",
- 224: "SMALL_ORDERED_HASH_MAP_TYPE",
- 225: "SMALL_ORDERED_HASH_SET_TYPE",
- 226: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 227: "STORE_HANDLER_TYPE",
- 228: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 229: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 230: "WEAK_ARRAY_LIST_TYPE",
+ 18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
+ 26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+ 32: "STRING_TYPE",
+ 33: "CONS_STRING_TYPE",
+ 34: "EXTERNAL_STRING_TYPE",
+ 35: "SLICED_STRING_TYPE",
+ 37: "THIN_STRING_TYPE",
+ 40: "ONE_BYTE_STRING_TYPE",
+ 41: "CONS_ONE_BYTE_STRING_TYPE",
+ 42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
+ 43: "SLICED_ONE_BYTE_STRING_TYPE",
+ 45: "THIN_ONE_BYTE_STRING_TYPE",
+ 50: "UNCACHED_EXTERNAL_STRING_TYPE",
+ 58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
+ 64: "SYMBOL_TYPE",
+ 65: "HEAP_NUMBER_TYPE",
+ 66: "BIGINT_TYPE",
+ 67: "ODDBALL_TYPE",
+ 68: "MAP_TYPE",
+ 69: "CODE_TYPE",
+ 70: "MUTABLE_HEAP_NUMBER_TYPE",
+ 71: "FOREIGN_TYPE",
+ 72: "BYTE_ARRAY_TYPE",
+ 73: "BYTECODE_ARRAY_TYPE",
+ 74: "FREE_SPACE_TYPE",
+ 75: "FIXED_INT8_ARRAY_TYPE",
+ 76: "FIXED_UINT8_ARRAY_TYPE",
+ 77: "FIXED_INT16_ARRAY_TYPE",
+ 78: "FIXED_UINT16_ARRAY_TYPE",
+ 79: "FIXED_INT32_ARRAY_TYPE",
+ 80: "FIXED_UINT32_ARRAY_TYPE",
+ 81: "FIXED_FLOAT32_ARRAY_TYPE",
+ 82: "FIXED_FLOAT64_ARRAY_TYPE",
+ 83: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 84: "FIXED_BIGINT64_ARRAY_TYPE",
+ 85: "FIXED_BIGUINT64_ARRAY_TYPE",
+ 86: "FIXED_DOUBLE_ARRAY_TYPE",
+ 87: "FEEDBACK_METADATA_TYPE",
+ 88: "FILLER_TYPE",
+ 89: "ACCESS_CHECK_INFO_TYPE",
+ 90: "ACCESSOR_INFO_TYPE",
+ 91: "ACCESSOR_PAIR_TYPE",
+ 92: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 93: "ALLOCATION_MEMENTO_TYPE",
+ 94: "ASM_WASM_DATA_TYPE",
+ 95: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 96: "CLASS_POSITIONS_TYPE",
+ 97: "DEBUG_INFO_TYPE",
+ 98: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 99: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 100: "INTERCEPTOR_INFO_TYPE",
+ 101: "INTERPRETER_DATA_TYPE",
+ 102: "MODULE_INFO_ENTRY_TYPE",
+ 103: "MODULE_TYPE",
+ 104: "OBJECT_TEMPLATE_INFO_TYPE",
+ 105: "PROMISE_CAPABILITY_TYPE",
+ 106: "PROMISE_REACTION_TYPE",
+ 107: "PROTOTYPE_INFO_TYPE",
+ 108: "SCRIPT_TYPE",
+ 109: "STACK_FRAME_INFO_TYPE",
+ 110: "STACK_TRACE_FRAME_TYPE",
+ 111: "TUPLE2_TYPE",
+ 112: "TUPLE3_TYPE",
+ 113: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 114: "WASM_DEBUG_INFO_TYPE",
+ 115: "WASM_EXCEPTION_TAG_TYPE",
+ 116: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 117: "CALLABLE_TASK_TYPE",
+ 118: "CALLBACK_TASK_TYPE",
+ 119: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 120: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 121: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 122: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
+ 123: "ALLOCATION_SITE_TYPE",
+ 124: "EMBEDDER_DATA_ARRAY_TYPE",
+ 125: "FIXED_ARRAY_TYPE",
+ 126: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 127: "HASH_TABLE_TYPE",
+ 128: "ORDERED_HASH_MAP_TYPE",
+ 129: "ORDERED_HASH_SET_TYPE",
+ 130: "ORDERED_NAME_DICTIONARY_TYPE",
+ 131: "NAME_DICTIONARY_TYPE",
+ 132: "GLOBAL_DICTIONARY_TYPE",
+ 133: "NUMBER_DICTIONARY_TYPE",
+ 134: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 135: "STRING_TABLE_TYPE",
+ 136: "EPHEMERON_HASH_TABLE_TYPE",
+ 137: "SCOPE_INFO_TYPE",
+ 138: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 139: "AWAIT_CONTEXT_TYPE",
+ 140: "BLOCK_CONTEXT_TYPE",
+ 141: "CATCH_CONTEXT_TYPE",
+ 142: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 143: "EVAL_CONTEXT_TYPE",
+ 144: "FUNCTION_CONTEXT_TYPE",
+ 145: "MODULE_CONTEXT_TYPE",
+ 146: "NATIVE_CONTEXT_TYPE",
+ 147: "SCRIPT_CONTEXT_TYPE",
+ 148: "WITH_CONTEXT_TYPE",
+ 149: "WEAK_FIXED_ARRAY_TYPE",
+ 150: "TRANSITION_ARRAY_TYPE",
+ 151: "CALL_HANDLER_INFO_TYPE",
+ 152: "CELL_TYPE",
+ 153: "CODE_DATA_CONTAINER_TYPE",
+ 154: "DESCRIPTOR_ARRAY_TYPE",
+ 155: "FEEDBACK_CELL_TYPE",
+ 156: "FEEDBACK_VECTOR_TYPE",
+ 157: "LOAD_HANDLER_TYPE",
+ 158: "PREPARSE_DATA_TYPE",
+ 159: "PROPERTY_ARRAY_TYPE",
+ 160: "PROPERTY_CELL_TYPE",
+ 161: "SHARED_FUNCTION_INFO_TYPE",
+ 162: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 163: "SMALL_ORDERED_HASH_SET_TYPE",
+ 164: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 165: "STORE_HANDLER_TYPE",
+ 166: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 167: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 168: "WEAK_ARRAY_LIST_TYPE",
+ 169: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -163,188 +162,186 @@ INSTANCE_TYPES = {
1078: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
1079: "JS_SET_VALUE_ITERATOR_TYPE",
1080: "JS_STRING_ITERATOR_TYPE",
- 1081: "JS_WEAK_CELL_TYPE",
- 1082: "JS_WEAK_REF_TYPE",
- 1083: "JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE",
- 1084: "JS_WEAK_FACTORY_TYPE",
- 1085: "JS_WEAK_MAP_TYPE",
- 1086: "JS_WEAK_SET_TYPE",
- 1087: "JS_TYPED_ARRAY_TYPE",
- 1088: "JS_DATA_VIEW_TYPE",
- 1089: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
- 1090: "JS_INTL_COLLATOR_TYPE",
- 1091: "JS_INTL_DATE_TIME_FORMAT_TYPE",
- 1092: "JS_INTL_LIST_FORMAT_TYPE",
- 1093: "JS_INTL_LOCALE_TYPE",
- 1094: "JS_INTL_NUMBER_FORMAT_TYPE",
- 1095: "JS_INTL_PLURAL_RULES_TYPE",
- 1096: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
- 1097: "JS_INTL_SEGMENT_ITERATOR_TYPE",
- 1098: "JS_INTL_SEGMENTER_TYPE",
- 1099: "WASM_EXCEPTION_TYPE",
- 1100: "WASM_GLOBAL_TYPE",
- 1101: "WASM_INSTANCE_TYPE",
- 1102: "WASM_MEMORY_TYPE",
- 1103: "WASM_MODULE_TYPE",
- 1104: "WASM_TABLE_TYPE",
- 1105: "JS_BOUND_FUNCTION_TYPE",
- 1106: "JS_FUNCTION_TYPE",
+ 1081: "JS_WEAK_REF_TYPE",
+ 1082: "JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE",
+ 1083: "JS_FINALIZATION_GROUP_TYPE",
+ 1084: "JS_WEAK_MAP_TYPE",
+ 1085: "JS_WEAK_SET_TYPE",
+ 1086: "JS_TYPED_ARRAY_TYPE",
+ 1087: "JS_DATA_VIEW_TYPE",
+ 1088: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
+ 1089: "JS_INTL_COLLATOR_TYPE",
+ 1090: "JS_INTL_DATE_TIME_FORMAT_TYPE",
+ 1091: "JS_INTL_LIST_FORMAT_TYPE",
+ 1092: "JS_INTL_LOCALE_TYPE",
+ 1093: "JS_INTL_NUMBER_FORMAT_TYPE",
+ 1094: "JS_INTL_PLURAL_RULES_TYPE",
+ 1095: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
+ 1096: "JS_INTL_SEGMENT_ITERATOR_TYPE",
+ 1097: "JS_INTL_SEGMENTER_TYPE",
+ 1098: "WASM_EXCEPTION_TYPE",
+ 1099: "WASM_GLOBAL_TYPE",
+ 1100: "WASM_INSTANCE_TYPE",
+ 1101: "WASM_MEMORY_TYPE",
+ 1102: "WASM_MODULE_TYPE",
+ 1103: "WASM_TABLE_TYPE",
+ 1104: "JS_BOUND_FUNCTION_TYPE",
+ 1105: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("RO_SPACE", 0x00139): (138, "FreeSpaceMap"),
- ("RO_SPACE", 0x00189): (132, "MetaMap"),
- ("RO_SPACE", 0x00209): (131, "NullMap"),
- ("RO_SPACE", 0x00271): (216, "DescriptorArrayMap"),
- ("RO_SPACE", 0x002d1): (211, "WeakFixedArrayMap"),
- ("RO_SPACE", 0x00321): (152, "OnePointerFillerMap"),
- ("RO_SPACE", 0x00371): (152, "TwoPointerFillerMap"),
- ("RO_SPACE", 0x003f1): (131, "UninitializedMap"),
+ ("RO_SPACE", 0x00139): (74, "FreeSpaceMap"),
+ ("RO_SPACE", 0x00189): (68, "MetaMap"),
+ ("RO_SPACE", 0x00209): (67, "NullMap"),
+ ("RO_SPACE", 0x00271): (154, "DescriptorArrayMap"),
+ ("RO_SPACE", 0x002d1): (149, "WeakFixedArrayMap"),
+ ("RO_SPACE", 0x00321): (88, "OnePointerFillerMap"),
+ ("RO_SPACE", 0x00371): (88, "TwoPointerFillerMap"),
+ ("RO_SPACE", 0x003f1): (67, "UninitializedMap"),
("RO_SPACE", 0x00461): (8, "OneByteInternalizedStringMap"),
- ("RO_SPACE", 0x00501): (131, "UndefinedMap"),
- ("RO_SPACE", 0x00561): (129, "HeapNumberMap"),
- ("RO_SPACE", 0x005e1): (131, "TheHoleMap"),
- ("RO_SPACE", 0x00689): (131, "BooleanMap"),
- ("RO_SPACE", 0x00761): (136, "ByteArrayMap"),
- ("RO_SPACE", 0x007b1): (187, "FixedArrayMap"),
- ("RO_SPACE", 0x00801): (187, "FixedCOWArrayMap"),
- ("RO_SPACE", 0x00851): (189, "HashTableMap"),
- ("RO_SPACE", 0x008a1): (128, "SymbolMap"),
- ("RO_SPACE", 0x008f1): (72, "OneByteStringMap"),
- ("RO_SPACE", 0x00941): (199, "ScopeInfoMap"),
- ("RO_SPACE", 0x00991): (223, "SharedFunctionInfoMap"),
- ("RO_SPACE", 0x009e1): (133, "CodeMap"),
- ("RO_SPACE", 0x00a31): (206, "FunctionContextMap"),
- ("RO_SPACE", 0x00a81): (214, "CellMap"),
- ("RO_SPACE", 0x00ad1): (222, "GlobalPropertyCellMap"),
- ("RO_SPACE", 0x00b21): (135, "ForeignMap"),
- ("RO_SPACE", 0x00b71): (212, "TransitionArrayMap"),
- ("RO_SPACE", 0x00bc1): (218, "FeedbackVectorMap"),
- ("RO_SPACE", 0x00c61): (131, "ArgumentsMarkerMap"),
- ("RO_SPACE", 0x00d01): (131, "ExceptionMap"),
- ("RO_SPACE", 0x00da1): (131, "TerminationExceptionMap"),
- ("RO_SPACE", 0x00e49): (131, "OptimizedOutMap"),
- ("RO_SPACE", 0x00ee9): (131, "StaleRegisterMap"),
- ("RO_SPACE", 0x00f59): (208, "NativeContextMap"),
- ("RO_SPACE", 0x00fa9): (207, "ModuleContextMap"),
- ("RO_SPACE", 0x00ff9): (205, "EvalContextMap"),
- ("RO_SPACE", 0x01049): (209, "ScriptContextMap"),
- ("RO_SPACE", 0x01099): (201, "AwaitContextMap"),
- ("RO_SPACE", 0x010e9): (202, "BlockContextMap"),
- ("RO_SPACE", 0x01139): (203, "CatchContextMap"),
- ("RO_SPACE", 0x01189): (210, "WithContextMap"),
- ("RO_SPACE", 0x011d9): (204, "DebugEvaluateContextMap"),
- ("RO_SPACE", 0x01229): (200, "ScriptContextTableMap"),
- ("RO_SPACE", 0x01279): (151, "FeedbackMetadataArrayMap"),
- ("RO_SPACE", 0x012c9): (187, "ArrayListMap"),
- ("RO_SPACE", 0x01319): (130, "BigIntMap"),
- ("RO_SPACE", 0x01369): (188, "ObjectBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x013b9): (137, "BytecodeArrayMap"),
- ("RO_SPACE", 0x01409): (215, "CodeDataContainerMap"),
- ("RO_SPACE", 0x01459): (150, "FixedDoubleArrayMap"),
- ("RO_SPACE", 0x014a9): (194, "GlobalDictionaryMap"),
- ("RO_SPACE", 0x014f9): (217, "ManyClosuresCellMap"),
- ("RO_SPACE", 0x01549): (187, "ModuleInfoMap"),
- ("RO_SPACE", 0x01599): (134, "MutableHeapNumberMap"),
- ("RO_SPACE", 0x015e9): (193, "NameDictionaryMap"),
- ("RO_SPACE", 0x01639): (217, "NoClosuresCellMap"),
- ("RO_SPACE", 0x01689): (217, "NoFeedbackCellMap"),
- ("RO_SPACE", 0x016d9): (195, "NumberDictionaryMap"),
- ("RO_SPACE", 0x01729): (217, "OneClosureCellMap"),
- ("RO_SPACE", 0x01779): (190, "OrderedHashMapMap"),
- ("RO_SPACE", 0x017c9): (191, "OrderedHashSetMap"),
- ("RO_SPACE", 0x01819): (192, "OrderedNameDictionaryMap"),
- ("RO_SPACE", 0x01869): (220, "PreparseDataMap"),
- ("RO_SPACE", 0x018b9): (221, "PropertyArrayMap"),
- ("RO_SPACE", 0x01909): (213, "SideEffectCallHandlerInfoMap"),
- ("RO_SPACE", 0x01959): (213, "SideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x019a9): (213, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x019f9): (196, "SimpleNumberDictionaryMap"),
- ("RO_SPACE", 0x01a49): (187, "SloppyArgumentsElementsMap"),
- ("RO_SPACE", 0x01a99): (224, "SmallOrderedHashMapMap"),
- ("RO_SPACE", 0x01ae9): (225, "SmallOrderedHashSetMap"),
- ("RO_SPACE", 0x01b39): (226, "SmallOrderedNameDictionaryMap"),
- ("RO_SPACE", 0x01b89): (197, "StringTableMap"),
- ("RO_SPACE", 0x01bd9): (228, "UncompiledDataWithoutPreparseDataMap"),
- ("RO_SPACE", 0x01c29): (229, "UncompiledDataWithPreparseDataMap"),
- ("RO_SPACE", 0x01c79): (230, "WeakArrayListMap"),
- ("RO_SPACE", 0x01cc9): (198, "EphemeronHashTableMap"),
- ("RO_SPACE", 0x01d19): (186, "EmbedderDataArrayMap"),
- ("RO_SPACE", 0x01d69): (106, "NativeSourceStringMap"),
- ("RO_SPACE", 0x01db9): (64, "StringMap"),
- ("RO_SPACE", 0x01e09): (73, "ConsOneByteStringMap"),
- ("RO_SPACE", 0x01e59): (65, "ConsStringMap"),
- ("RO_SPACE", 0x01ea9): (77, "ThinOneByteStringMap"),
- ("RO_SPACE", 0x01ef9): (69, "ThinStringMap"),
- ("RO_SPACE", 0x01f49): (67, "SlicedStringMap"),
- ("RO_SPACE", 0x01f99): (75, "SlicedOneByteStringMap"),
- ("RO_SPACE", 0x01fe9): (66, "ExternalStringMap"),
- ("RO_SPACE", 0x02039): (82, "ExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x02089): (74, "ExternalOneByteStringMap"),
- ("RO_SPACE", 0x020d9): (98, "UncachedExternalStringMap"),
- ("RO_SPACE", 0x02129): (114, "UncachedExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x02179): (0, "InternalizedStringMap"),
- ("RO_SPACE", 0x021c9): (2, "ExternalInternalizedStringMap"),
- ("RO_SPACE", 0x02219): (18, "ExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x02269): (10, "ExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x022b9): (34, "UncachedExternalInternalizedStringMap"),
- ("RO_SPACE", 0x02309): (50, "UncachedExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x02359): (42, "UncachedExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x023a9): (106, "UncachedExternalOneByteStringMap"),
- ("RO_SPACE", 0x023f9): (140, "FixedUint8ArrayMap"),
- ("RO_SPACE", 0x02449): (139, "FixedInt8ArrayMap"),
- ("RO_SPACE", 0x02499): (142, "FixedUint16ArrayMap"),
- ("RO_SPACE", 0x024e9): (141, "FixedInt16ArrayMap"),
- ("RO_SPACE", 0x02539): (144, "FixedUint32ArrayMap"),
- ("RO_SPACE", 0x02589): (143, "FixedInt32ArrayMap"),
- ("RO_SPACE", 0x025d9): (145, "FixedFloat32ArrayMap"),
- ("RO_SPACE", 0x02629): (146, "FixedFloat64ArrayMap"),
- ("RO_SPACE", 0x02679): (147, "FixedUint8ClampedArrayMap"),
- ("RO_SPACE", 0x026c9): (149, "FixedBigUint64ArrayMap"),
- ("RO_SPACE", 0x02719): (148, "FixedBigInt64ArrayMap"),
- ("RO_SPACE", 0x02769): (131, "SelfReferenceMarkerMap"),
- ("RO_SPACE", 0x027d1): (173, "Tuple2Map"),
- ("RO_SPACE", 0x02871): (175, "ArrayBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x02bb1): (163, "InterceptorInfoMap"),
- ("RO_SPACE", 0x05081): (153, "AccessCheckInfoMap"),
- ("RO_SPACE", 0x050d1): (154, "AccessorInfoMap"),
- ("RO_SPACE", 0x05121): (155, "AccessorPairMap"),
- ("RO_SPACE", 0x05171): (156, "AliasedArgumentsEntryMap"),
- ("RO_SPACE", 0x051c1): (157, "AllocationMementoMap"),
- ("RO_SPACE", 0x05211): (158, "AsmWasmDataMap"),
- ("RO_SPACE", 0x05261): (159, "AsyncGeneratorRequestMap"),
- ("RO_SPACE", 0x052b1): (160, "DebugInfoMap"),
- ("RO_SPACE", 0x05301): (161, "FunctionTemplateInfoMap"),
- ("RO_SPACE", 0x05351): (162, "FunctionTemplateRareDataMap"),
- ("RO_SPACE", 0x053a1): (164, "InterpreterDataMap"),
- ("RO_SPACE", 0x053f1): (165, "ModuleInfoEntryMap"),
- ("RO_SPACE", 0x05441): (166, "ModuleMap"),
- ("RO_SPACE", 0x05491): (167, "ObjectTemplateInfoMap"),
- ("RO_SPACE", 0x054e1): (168, "PromiseCapabilityMap"),
- ("RO_SPACE", 0x05531): (169, "PromiseReactionMap"),
- ("RO_SPACE", 0x05581): (170, "PrototypeInfoMap"),
- ("RO_SPACE", 0x055d1): (171, "ScriptMap"),
- ("RO_SPACE", 0x05621): (172, "StackFrameInfoMap"),
- ("RO_SPACE", 0x05671): (174, "Tuple3Map"),
- ("RO_SPACE", 0x056c1): (176, "WasmDebugInfoMap"),
- ("RO_SPACE", 0x05711): (177, "WasmExceptionTagMap"),
- ("RO_SPACE", 0x05761): (178, "WasmExportedFunctionDataMap"),
- ("RO_SPACE", 0x057b1): (179, "CallableTaskMap"),
- ("RO_SPACE", 0x05801): (180, "CallbackTaskMap"),
- ("RO_SPACE", 0x05851): (181, "PromiseFulfillReactionJobTaskMap"),
- ("RO_SPACE", 0x058a1): (182, "PromiseRejectReactionJobTaskMap"),
- ("RO_SPACE", 0x058f1): (183, "PromiseResolveThenableJobTaskMap"),
- ("RO_SPACE", 0x05941): (184, "WeakFactoryCleanupJobTaskMap"),
- ("RO_SPACE", 0x05991): (185, "AllocationSiteWithWeakNextMap"),
- ("RO_SPACE", 0x059e1): (185, "AllocationSiteWithoutWeakNextMap"),
- ("RO_SPACE", 0x05a31): (219, "LoadHandler1Map"),
- ("RO_SPACE", 0x05a81): (219, "LoadHandler2Map"),
- ("RO_SPACE", 0x05ad1): (219, "LoadHandler3Map"),
- ("RO_SPACE", 0x05b21): (227, "StoreHandler0Map"),
- ("RO_SPACE", 0x05b71): (227, "StoreHandler1Map"),
- ("RO_SPACE", 0x05bc1): (227, "StoreHandler2Map"),
- ("RO_SPACE", 0x05c11): (227, "StoreHandler3Map"),
+ ("RO_SPACE", 0x00501): (67, "UndefinedMap"),
+ ("RO_SPACE", 0x00561): (65, "HeapNumberMap"),
+ ("RO_SPACE", 0x005e1): (67, "TheHoleMap"),
+ ("RO_SPACE", 0x00689): (67, "BooleanMap"),
+ ("RO_SPACE", 0x00761): (72, "ByteArrayMap"),
+ ("RO_SPACE", 0x007b1): (125, "FixedArrayMap"),
+ ("RO_SPACE", 0x00801): (125, "FixedCOWArrayMap"),
+ ("RO_SPACE", 0x00851): (127, "HashTableMap"),
+ ("RO_SPACE", 0x008a1): (64, "SymbolMap"),
+ ("RO_SPACE", 0x008f1): (40, "OneByteStringMap"),
+ ("RO_SPACE", 0x00941): (137, "ScopeInfoMap"),
+ ("RO_SPACE", 0x00991): (161, "SharedFunctionInfoMap"),
+ ("RO_SPACE", 0x009e1): (69, "CodeMap"),
+ ("RO_SPACE", 0x00a31): (144, "FunctionContextMap"),
+ ("RO_SPACE", 0x00a81): (152, "CellMap"),
+ ("RO_SPACE", 0x00ad1): (160, "GlobalPropertyCellMap"),
+ ("RO_SPACE", 0x00b21): (71, "ForeignMap"),
+ ("RO_SPACE", 0x00b71): (150, "TransitionArrayMap"),
+ ("RO_SPACE", 0x00bc1): (156, "FeedbackVectorMap"),
+ ("RO_SPACE", 0x00c61): (67, "ArgumentsMarkerMap"),
+ ("RO_SPACE", 0x00d01): (67, "ExceptionMap"),
+ ("RO_SPACE", 0x00da1): (67, "TerminationExceptionMap"),
+ ("RO_SPACE", 0x00e49): (67, "OptimizedOutMap"),
+ ("RO_SPACE", 0x00ee9): (67, "StaleRegisterMap"),
+ ("RO_SPACE", 0x00f59): (146, "NativeContextMap"),
+ ("RO_SPACE", 0x00fa9): (145, "ModuleContextMap"),
+ ("RO_SPACE", 0x00ff9): (143, "EvalContextMap"),
+ ("RO_SPACE", 0x01049): (147, "ScriptContextMap"),
+ ("RO_SPACE", 0x01099): (139, "AwaitContextMap"),
+ ("RO_SPACE", 0x010e9): (140, "BlockContextMap"),
+ ("RO_SPACE", 0x01139): (141, "CatchContextMap"),
+ ("RO_SPACE", 0x01189): (148, "WithContextMap"),
+ ("RO_SPACE", 0x011d9): (142, "DebugEvaluateContextMap"),
+ ("RO_SPACE", 0x01229): (138, "ScriptContextTableMap"),
+ ("RO_SPACE", 0x01279): (87, "FeedbackMetadataArrayMap"),
+ ("RO_SPACE", 0x012c9): (125, "ArrayListMap"),
+ ("RO_SPACE", 0x01319): (66, "BigIntMap"),
+ ("RO_SPACE", 0x01369): (126, "ObjectBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x013b9): (73, "BytecodeArrayMap"),
+ ("RO_SPACE", 0x01409): (153, "CodeDataContainerMap"),
+ ("RO_SPACE", 0x01459): (86, "FixedDoubleArrayMap"),
+ ("RO_SPACE", 0x014a9): (132, "GlobalDictionaryMap"),
+ ("RO_SPACE", 0x014f9): (155, "ManyClosuresCellMap"),
+ ("RO_SPACE", 0x01549): (125, "ModuleInfoMap"),
+ ("RO_SPACE", 0x01599): (70, "MutableHeapNumberMap"),
+ ("RO_SPACE", 0x015e9): (131, "NameDictionaryMap"),
+ ("RO_SPACE", 0x01639): (155, "NoClosuresCellMap"),
+ ("RO_SPACE", 0x01689): (155, "NoFeedbackCellMap"),
+ ("RO_SPACE", 0x016d9): (133, "NumberDictionaryMap"),
+ ("RO_SPACE", 0x01729): (155, "OneClosureCellMap"),
+ ("RO_SPACE", 0x01779): (128, "OrderedHashMapMap"),
+ ("RO_SPACE", 0x017c9): (129, "OrderedHashSetMap"),
+ ("RO_SPACE", 0x01819): (130, "OrderedNameDictionaryMap"),
+ ("RO_SPACE", 0x01869): (158, "PreparseDataMap"),
+ ("RO_SPACE", 0x018b9): (159, "PropertyArrayMap"),
+ ("RO_SPACE", 0x01909): (151, "SideEffectCallHandlerInfoMap"),
+ ("RO_SPACE", 0x01959): (151, "SideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x019a9): (151, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x019f9): (134, "SimpleNumberDictionaryMap"),
+ ("RO_SPACE", 0x01a49): (125, "SloppyArgumentsElementsMap"),
+ ("RO_SPACE", 0x01a99): (162, "SmallOrderedHashMapMap"),
+ ("RO_SPACE", 0x01ae9): (163, "SmallOrderedHashSetMap"),
+ ("RO_SPACE", 0x01b39): (164, "SmallOrderedNameDictionaryMap"),
+ ("RO_SPACE", 0x01b89): (135, "StringTableMap"),
+ ("RO_SPACE", 0x01bd9): (166, "UncompiledDataWithoutPreparseDataMap"),
+ ("RO_SPACE", 0x01c29): (167, "UncompiledDataWithPreparseDataMap"),
+ ("RO_SPACE", 0x01c79): (168, "WeakArrayListMap"),
+ ("RO_SPACE", 0x01cc9): (136, "EphemeronHashTableMap"),
+ ("RO_SPACE", 0x01d19): (124, "EmbedderDataArrayMap"),
+ ("RO_SPACE", 0x01d69): (169, "WeakCellMap"),
+ ("RO_SPACE", 0x01db9): (58, "NativeSourceStringMap"),
+ ("RO_SPACE", 0x01e09): (32, "StringMap"),
+ ("RO_SPACE", 0x01e59): (41, "ConsOneByteStringMap"),
+ ("RO_SPACE", 0x01ea9): (33, "ConsStringMap"),
+ ("RO_SPACE", 0x01ef9): (45, "ThinOneByteStringMap"),
+ ("RO_SPACE", 0x01f49): (37, "ThinStringMap"),
+ ("RO_SPACE", 0x01f99): (35, "SlicedStringMap"),
+ ("RO_SPACE", 0x01fe9): (43, "SlicedOneByteStringMap"),
+ ("RO_SPACE", 0x02039): (34, "ExternalStringMap"),
+ ("RO_SPACE", 0x02089): (42, "ExternalOneByteStringMap"),
+ ("RO_SPACE", 0x020d9): (50, "UncachedExternalStringMap"),
+ ("RO_SPACE", 0x02129): (0, "InternalizedStringMap"),
+ ("RO_SPACE", 0x02179): (2, "ExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x021c9): (10, "ExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x02219): (18, "UncachedExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x02269): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x022b9): (58, "UncachedExternalOneByteStringMap"),
+ ("RO_SPACE", 0x02309): (76, "FixedUint8ArrayMap"),
+ ("RO_SPACE", 0x02359): (75, "FixedInt8ArrayMap"),
+ ("RO_SPACE", 0x023a9): (78, "FixedUint16ArrayMap"),
+ ("RO_SPACE", 0x023f9): (77, "FixedInt16ArrayMap"),
+ ("RO_SPACE", 0x02449): (80, "FixedUint32ArrayMap"),
+ ("RO_SPACE", 0x02499): (79, "FixedInt32ArrayMap"),
+ ("RO_SPACE", 0x024e9): (81, "FixedFloat32ArrayMap"),
+ ("RO_SPACE", 0x02539): (82, "FixedFloat64ArrayMap"),
+ ("RO_SPACE", 0x02589): (83, "FixedUint8ClampedArrayMap"),
+ ("RO_SPACE", 0x025d9): (85, "FixedBigUint64ArrayMap"),
+ ("RO_SPACE", 0x02629): (84, "FixedBigInt64ArrayMap"),
+ ("RO_SPACE", 0x02679): (67, "SelfReferenceMarkerMap"),
+ ("RO_SPACE", 0x026e1): (111, "Tuple2Map"),
+ ("RO_SPACE", 0x02781): (113, "ArrayBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x02ac1): (100, "InterceptorInfoMap"),
+ ("RO_SPACE", 0x04fe1): (89, "AccessCheckInfoMap"),
+ ("RO_SPACE", 0x05031): (90, "AccessorInfoMap"),
+ ("RO_SPACE", 0x05081): (91, "AccessorPairMap"),
+ ("RO_SPACE", 0x050d1): (92, "AliasedArgumentsEntryMap"),
+ ("RO_SPACE", 0x05121): (93, "AllocationMementoMap"),
+ ("RO_SPACE", 0x05171): (94, "AsmWasmDataMap"),
+ ("RO_SPACE", 0x051c1): (95, "AsyncGeneratorRequestMap"),
+ ("RO_SPACE", 0x05211): (96, "ClassPositionsMap"),
+ ("RO_SPACE", 0x05261): (97, "DebugInfoMap"),
+ ("RO_SPACE", 0x052b1): (98, "FunctionTemplateInfoMap"),
+ ("RO_SPACE", 0x05301): (99, "FunctionTemplateRareDataMap"),
+ ("RO_SPACE", 0x05351): (101, "InterpreterDataMap"),
+ ("RO_SPACE", 0x053a1): (102, "ModuleInfoEntryMap"),
+ ("RO_SPACE", 0x053f1): (103, "ModuleMap"),
+ ("RO_SPACE", 0x05441): (104, "ObjectTemplateInfoMap"),
+ ("RO_SPACE", 0x05491): (105, "PromiseCapabilityMap"),
+ ("RO_SPACE", 0x054e1): (106, "PromiseReactionMap"),
+ ("RO_SPACE", 0x05531): (107, "PrototypeInfoMap"),
+ ("RO_SPACE", 0x05581): (108, "ScriptMap"),
+ ("RO_SPACE", 0x055d1): (109, "StackFrameInfoMap"),
+ ("RO_SPACE", 0x05621): (110, "StackTraceFrameMap"),
+ ("RO_SPACE", 0x05671): (112, "Tuple3Map"),
+ ("RO_SPACE", 0x056c1): (114, "WasmDebugInfoMap"),
+ ("RO_SPACE", 0x05711): (115, "WasmExceptionTagMap"),
+ ("RO_SPACE", 0x05761): (116, "WasmExportedFunctionDataMap"),
+ ("RO_SPACE", 0x057b1): (117, "CallableTaskMap"),
+ ("RO_SPACE", 0x05801): (118, "CallbackTaskMap"),
+ ("RO_SPACE", 0x05851): (119, "PromiseFulfillReactionJobTaskMap"),
+ ("RO_SPACE", 0x058a1): (120, "PromiseRejectReactionJobTaskMap"),
+ ("RO_SPACE", 0x058f1): (121, "PromiseResolveThenableJobTaskMap"),
+ ("RO_SPACE", 0x05941): (122, "FinalizationGroupCleanupJobTaskMap"),
+ ("RO_SPACE", 0x05991): (123, "AllocationSiteWithWeakNextMap"),
+ ("RO_SPACE", 0x059e1): (123, "AllocationSiteWithoutWeakNextMap"),
+ ("RO_SPACE", 0x05a31): (157, "LoadHandler1Map"),
+ ("RO_SPACE", 0x05a81): (157, "LoadHandler2Map"),
+ ("RO_SPACE", 0x05ad1): (157, "LoadHandler3Map"),
+ ("RO_SPACE", 0x05b21): (165, "StoreHandler0Map"),
+ ("RO_SPACE", 0x05b71): (165, "StoreHandler1Map"),
+ ("RO_SPACE", 0x05bc1): (165, "StoreHandler2Map"),
+ ("RO_SPACE", 0x05c11): (165, "StoreHandler3Map"),
("MAP_SPACE", 0x00139): (1057, "ExternalMap"),
("MAP_SPACE", 0x00189): (1073, "JSMessageObjectMap"),
}
@@ -369,37 +366,37 @@ KNOWN_OBJECTS = {
("RO_SPACE", 0x00d71): "TerminationException",
("RO_SPACE", 0x00e19): "OptimizedOut",
("RO_SPACE", 0x00eb9): "StaleRegister",
- ("RO_SPACE", 0x027b9): "EmptyEnumCache",
- ("RO_SPACE", 0x02821): "EmptyPropertyArray",
- ("RO_SPACE", 0x02831): "EmptyByteArray",
- ("RO_SPACE", 0x02841): "EmptyObjectBoilerplateDescription",
- ("RO_SPACE", 0x02859): "EmptyArrayBoilerplateDescription",
- ("RO_SPACE", 0x028c1): "EmptyFixedUint8Array",
- ("RO_SPACE", 0x028e1): "EmptyFixedInt8Array",
- ("RO_SPACE", 0x02901): "EmptyFixedUint16Array",
- ("RO_SPACE", 0x02921): "EmptyFixedInt16Array",
- ("RO_SPACE", 0x02941): "EmptyFixedUint32Array",
- ("RO_SPACE", 0x02961): "EmptyFixedInt32Array",
- ("RO_SPACE", 0x02981): "EmptyFixedFloat32Array",
- ("RO_SPACE", 0x029a1): "EmptyFixedFloat64Array",
- ("RO_SPACE", 0x029c1): "EmptyFixedUint8ClampedArray",
- ("RO_SPACE", 0x029e1): "EmptyFixedBigUint64Array",
- ("RO_SPACE", 0x02a01): "EmptyFixedBigInt64Array",
- ("RO_SPACE", 0x02a21): "EmptySloppyArgumentsElements",
- ("RO_SPACE", 0x02a41): "EmptySlowElementDictionary",
- ("RO_SPACE", 0x02a89): "EmptyOrderedHashMap",
- ("RO_SPACE", 0x02ab1): "EmptyOrderedHashSet",
- ("RO_SPACE", 0x02ad9): "EmptyFeedbackMetadata",
- ("RO_SPACE", 0x02ae9): "EmptyPropertyCell",
- ("RO_SPACE", 0x02b11): "EmptyPropertyDictionary",
- ("RO_SPACE", 0x02b61): "NoOpInterceptorInfo",
- ("RO_SPACE", 0x02c01): "EmptyWeakArrayList",
- ("RO_SPACE", 0x02c19): "InfinityValue",
- ("RO_SPACE", 0x02c29): "MinusZeroValue",
- ("RO_SPACE", 0x02c39): "MinusInfinityValue",
- ("RO_SPACE", 0x02c49): "SelfReferenceMarker",
- ("RO_SPACE", 0x02ca1): "OffHeapTrampolineRelocationInfo",
- ("RO_SPACE", 0x02cb9): "HashSeed",
+ ("RO_SPACE", 0x026c9): "EmptyEnumCache",
+ ("RO_SPACE", 0x02731): "EmptyPropertyArray",
+ ("RO_SPACE", 0x02741): "EmptyByteArray",
+ ("RO_SPACE", 0x02751): "EmptyObjectBoilerplateDescription",
+ ("RO_SPACE", 0x02769): "EmptyArrayBoilerplateDescription",
+ ("RO_SPACE", 0x027d1): "EmptyFixedUint8Array",
+ ("RO_SPACE", 0x027f1): "EmptyFixedInt8Array",
+ ("RO_SPACE", 0x02811): "EmptyFixedUint16Array",
+ ("RO_SPACE", 0x02831): "EmptyFixedInt16Array",
+ ("RO_SPACE", 0x02851): "EmptyFixedUint32Array",
+ ("RO_SPACE", 0x02871): "EmptyFixedInt32Array",
+ ("RO_SPACE", 0x02891): "EmptyFixedFloat32Array",
+ ("RO_SPACE", 0x028b1): "EmptyFixedFloat64Array",
+ ("RO_SPACE", 0x028d1): "EmptyFixedUint8ClampedArray",
+ ("RO_SPACE", 0x028f1): "EmptyFixedBigUint64Array",
+ ("RO_SPACE", 0x02911): "EmptyFixedBigInt64Array",
+ ("RO_SPACE", 0x02931): "EmptySloppyArgumentsElements",
+ ("RO_SPACE", 0x02951): "EmptySlowElementDictionary",
+ ("RO_SPACE", 0x02999): "EmptyOrderedHashMap",
+ ("RO_SPACE", 0x029c1): "EmptyOrderedHashSet",
+ ("RO_SPACE", 0x029e9): "EmptyFeedbackMetadata",
+ ("RO_SPACE", 0x029f9): "EmptyPropertyCell",
+ ("RO_SPACE", 0x02a21): "EmptyPropertyDictionary",
+ ("RO_SPACE", 0x02a71): "NoOpInterceptorInfo",
+ ("RO_SPACE", 0x02b11): "EmptyWeakArrayList",
+ ("RO_SPACE", 0x02b29): "InfinityValue",
+ ("RO_SPACE", 0x02b39): "MinusZeroValue",
+ ("RO_SPACE", 0x02b49): "MinusInfinityValue",
+ ("RO_SPACE", 0x02b59): "SelfReferenceMarker",
+ ("RO_SPACE", 0x02bb1): "OffHeapTrampolineRelocationInfo",
+ ("RO_SPACE", 0x02bc9): "HashSeed",
("OLD_SPACE", 0x00139): "ArgumentsIteratorAccessor",
("OLD_SPACE", 0x001a9): "ArrayLengthAccessor",
("OLD_SPACE", 0x00219): "BoundFunctionLengthAccessor",
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 1b70f46680..b6459552b1 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -1,10 +1,10 @@
You can modify this file to create no-op changelists.
-Try to write something funny. And please don't add trailing whitespace.
+Try to write something funny. And please don't add trailing whitespace..
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly.......
-The bartender starts to shake the bottles!
+The autoroller bought a round of Himbeerbrause. Suddenly.....
+The bartender starts to shake the bottles............................